diff --git a/INSTALL.txt b/INSTALL.txt new file mode 100644 index 00000000000..dfe10cfe3ac --- /dev/null +++ b/INSTALL.txt @@ -0,0 +1,60 @@ +This document describes how to set up and configure a single server CloudStack installation so that you can quickly start play around the CloudStack platform. The easiest way is to deploy CloudStack from RPM package, building CloudStack from source is for advanced user. This guide is all about building CloudStack from the source and installing directly from there . This guide is suitable for you if you want to develop the CloudStack. + +I have tested this procedure on Fedora Core 14 + +Step 1: Install the tools and dependencies: +For fedora the package names are ant ant-devel, openjdk, openjdk-devel + +Tools: +yum install ant ant-devel openjdk openjdk-devel mysql mysql-server tomcat + +Dependencies: +yum install jakarta-commons-collections jakarta-commons-dbcp.noarch apache-commons-logging.noarch jakarta-commons-pool jakarta-commons-httpclient.noarch ws-commons-util.noarch glibc-devel gcc python MySQL-python openssh-clients + +Step 2: Configuration + +Start the MySQL service : + +# service mysqld start + +Step 3: Get the source + +$ git clone https://github.com/CloudStack/CloudStack.git + +For subsequent pulls, do: +$ git pull + +Step 4: Building, testing, and deploying CloudStack using Ant : + +Ant is a Java-based build tool designed to be cross-platform, easy to use, extensible, and scalable. Ant is controlled by providing a text file that tells how to perform all the stages of building, testing, and deploying a project. These files are build files, and every project that uses Ant must have at least one named as build.xml. You can see build.xml in your CloudStack source. + +Type to build CloudStack : +$ ant clean-all build-all + +Type to deploy mgt server : +$ ant deploy-server + +Type to deploy database : +$ ant deploydb + +Type to run mgt server: +$ ant debug + +If all of the above process is successful. You are done the single server CloudStack installation.Now your Cloud.com Management Server is running. + +Open your browser and type the bellow url in address bar: + +http://localhost:8080/client/ + +OR + +http://management-server-ip-address:8080/client + +You can see CloudStack Management Console page via a web browser. It will show you management consle login page. You can use the default username and password and leave domain as blank. + +The default credentials are “admin” for user and “password” for password. The domain field should be left blank. A blank +domain field is defaulted to the ROOT domain. + +NOTE : This document is very basic CloudStack installation. If you are very new to CloudStack and want to feel the power of CloudStack very quickly in RPM based distro, this document will guide very clear step to get it done. Since I am new to CloudStack, I doing this documentation by learning from community. I will keet update new information in this guide to make it more valuable resource. + + \ No newline at end of file diff --git a/agent-simulator/.classpath b/agent-simulator/.classpath index 264339135e1..7ae03655472 100644 --- a/agent-simulator/.classpath +++ b/agent-simulator/.classpath @@ -8,17 +8,12 @@ - - - - - diff --git a/agent-simulator/scripts/guava/setup.py b/agent-simulator/scripts/guava/setup.py new file mode 100644 index 00000000000..e8daf82cede --- /dev/null +++ b/agent-simulator/scripts/guava/setup.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python + +''' +############################################################ +# guava uses nfs storage, before setting up make sure +# * optionally turn off stats collectors +# * expunge.delay and expunge.interval are 60s +############################################################ +''' + +from optparse import OptionParser +from configGenerator import * +import random + + +def getGlobalSettings(): + global_settings = {'expunge.delay': '60', + 'expunge.interval': '60', + 'expunge.workers': '3', + 'workers': '10', + 'use.user.concentrated.pod.allocation': 'true', + 'vm.allocation.algorithm': 'random', + 'vm.op.wait.interval': '5', + 'guest.domain.suffix': 'guava.simulator', + 'instance.name': 'TEST', + 'direct.agent.load.size': '1000', + 'default.page.size': '10000', + 'linkLocalIp.nums': '10', + 'check.pod.cidrs': 'false', + } + for k, v in global_settings.iteritems(): + cfg = configuration() + cfg.name = k + cfg.value = v + yield cfg + + +def describeGuavaResources(dbnode='localhost', mshost='localhost'): + zs = cloudstackConfiguration() + numberofpods = 1 + + clustersPerPod = 100 + hostsPerCluster = 10 + + z = zone() + z.dns1 = '4.2.2.2' + z.dns2 = '192.168.110.254' + z.internaldns1 = '10.91.28.6' + z.internaldns2 = '192.168.110.254' + z.name = 'Guava' + z.networktype = 'Advanced' + z.guestcidraddress = '10.1.1.0/24' + z.vlan='100-3000' + + p = pod() + p.name = 'POD1' + p.gateway = '172.1.2.1' + p.startip = '172.1.2.2' + p.endip = '172.1.255.252' + p.netmask = '255.255.0.0' + + v = iprange() + v.vlan = 'untagged' + v.startip = '172.2.1.2' + v.endip = '172.2.255.252' + v.gateway = '172.2.1.1' + v.netmask = '255.255.0.0' + + curhost = 1 + for i in range(1, clustersPerPod + 1): + c = cluster() + c.clustername = 'POD1-CLUSTER' + str(i) + c.hypervisor = 'Simulator' + c.clustertype = 'CloudManaged' + + for j in range(1, hostsPerCluster + 1): + h = host() + h.username = 'root' + h.password = 'password' + h.url = 'http://sim/test-%d'%(curhost) + c.hosts.append(h) + curhost = curhost + 1 + + ps = primaryStorage() + ps.name = 'spool'+str(i) + ps.url = 'nfs://172.16.24.32/export/path/'+str(i) + c.primaryStorages.append(ps) + p.clusters.append(c) + + + secondary = secondaryStorage() + secondary.url = 'nfs://172.16.25.32/secondary/path' + + z.pods.append(p) + z.ipranges.append(v) + z.secondaryStorages.append(secondary) + zs.zones.append(z) + + '''Add mgt server''' + mgt = managementServer() + mgt.mgtSvrIp = mshost + zs.mgtSvr.append(mgt) + + '''Add a database''' + db = dbServer() + db.dbSvr = opts.dbnode + zs.dbSvr = db + + '''Add some configuration''' + [zs.globalConfig.append(cfg) for cfg in getGlobalSettings()] + + ''''add loggers''' + testClientLogger = logger() + testClientLogger.name = 'TestClient' + testClientLogger.file = '/var/log/testclient.log' + + testCaseLogger = logger() + testCaseLogger.name = 'TestCase' + testCaseLogger.file = '/var/log/testcase.log' + + zs.logger.append(testClientLogger) + zs.logger.append(testCaseLogger) + return zs + + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-o', '--output', action='store', default='./guavaCfg', dest='output', help='the path where the json config file generated') + parser.add_option('-d', '--dbnode', dest='dbnode', help='hostname/ip of the database node', action='store') + parser.add_option('-m', '--mshost', dest='mshost', help='hostname/ip of management server', action='store') + + (opts, args) = parser.parse_args() + cfg = describeGuavaResources(opts.dbnode, opts.mshost) + generate_setup_config(cfg, opts.output) diff --git a/agent-simulator/scripts/guava/tests/testProvision.py b/agent-simulator/scripts/guava/tests/testProvision.py new file mode 100644 index 00000000000..99c39d9c18d --- /dev/null +++ b/agent-simulator/scripts/guava/tests/testProvision.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +try: + import unittest2 as unittest +except ImportError: + import unittest + +import random +import hashlib +from cloudstackTestCase import * + +class Provision(cloudstackTestCase): + ''' + ''' + def setUp(self): + pass + + + def tearDown(self): + pass + + + def test_createAccounts(self, numberOfAccounts=5): + ''' + Create a bunch of user accounts + ''' + mdf = hashlib.md5() + mdf.update('password') + mdf_pass = mdf.hexdigest() + api = self.testClient.getApiClient() + for i in range(1, numberOfAccounts + 1): + acct = createAccount.createAccountCmd() + acct.accounttype = 0 + acct.firstname = 'user' + str(i) + acct.lastname = 'user' + str(i) + acct.password = mdf_pass + acct.username = 'user' + str(i) + acct.email = 'user@example.com' + acct.account = 'user' + str(i) + acct.domainid = 1 + acctResponse = api.createAccount(acct) + self.debug("successfully created account: %s, user: %s, id: %s"%(acctResponse.account, acctResponse.username, acctResponse.id)) + + + def deployCmd(self, account): + deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVmCmd.zoneid = 1 + deployVmCmd.hypervisor='Simulator' + deployVmCmd.account=account + deployVmCmd.domainid=1 + deployVmCmd.templateid=10 + deployVmCmd.serviceofferingid=7 + return deployVmCmd + + + def listVmsInAccountCmd(self, acct): + api = self.testClient.getApiClient() + listVmCmd = listVirtualMachines.listVirtualMachinesCmd() + listVmCmd.account = acct + listVmCmd.zoneid = 1 + listVmCmd.domainid = 1 + listVmResponse = api.listVirtualMachines(listVmCmd) + self.debug(listVmResponse) + return listVmResponse + + + def destroyVmCmd(self, key): + api = self.testClient.getApiClient() + destroyVmCmd = destroyVirtualMachine.destroyVirtualMachineCmd() + destroyVmCmd.id = key + api.destroyVirtualMachine(destroyVmCmd) + + + def test_stressDeploy(self): + ''' + Deploy 20 Vms in each account + ''' + api = self.testClient.getApiClient() + for acct in range(1, 5): + [api.deployVirtualMachine(self.deployCmd('user'+str(acct))) for x in range(0,20)] + + def test_stressDestroy(self): + ''' + Cleanup all Vms in every account + ''' + api = self.testClient.getApiClient() + for acct in range(1, 6): + for vm in self.listVmsInAccountCmd('user'+str(acct)): + self.destroyVmCmd(vm.id) + + def test_combineStress(self): + for i in range(0, 5): + self.test_stressDestroy() + self.test_stressDeploy() + + def deployN(self,nargs=300,batchsize=0): + ''' + Deploy Nargs number of VMs concurrently in batches of size {batchsize}. + When batchsize is 0 all Vms are deployed in one batch + VMs will be deployed in 5:2:6 ratio + ''' + cmds = [] + + if batchsize == 0: + self.testClient.submitCmdsAndWait(cmds) + else: + while len(z) > 0: + try: + newbatch = [cmds.pop() for b in range(batchsize)] #pop batchsize items + self.testClient.submitCmdsAndWait(newbatch) + except IndexError: + break + diff --git a/agent-simulator/scripts/kumquat/setup.py b/agent-simulator/scripts/kumquat/setup.py new file mode 100644 index 00000000000..035c70865a6 --- /dev/null +++ b/agent-simulator/scripts/kumquat/setup.py @@ -0,0 +1,181 @@ +#!/usr/bin/env python + +''' +############################################################ +# Kumquat uses nfs storage, before setting up make sure +# * optionally turn off stats collectors +# * expunge.delay and expunge.interval are 60s +############################################################ +''' + +from optparse import OptionParser +from configGenerator import * +import random + + +def getGlobalSettings(): + global_settings = {'expunge.delay': '60', + 'expunge.interval': '60', + 'capacity.skipcounting.hours': '2', + 'cpu.overprovisioning.factor': '1.5', + 'expunge.workers': '3', + 'workers': '10', + 'use.user.concentrated.pod.allocation': 'true', + 'vm.allocation.algorithm': 'random', + 'vm.op.wait.interval': '5', + 'guest.domain.suffix': 'kumquat.simulator', + 'instance.name': 'KIM', + 'direct.agent.load.size': '16', + 'default.page.size': '500', + 'linkLocalIp.nums': '10', + 'check.pod.cidrs': 'false', + 'max.account.public.ips': '10000', + 'max.account.snapshots': '10000', + 'max.account.templates': '10000', + 'max.account.user.vms': '10000', + 'max.account.volumes': '10000', + } + for k, v in global_settings.iteritems(): + cfg = configuration() + cfg.name = k + cfg.value = v + yield cfg + + +def podIpRangeGenerator(): + x=1 + y=2 + while 1: + if y == 255: + x=x+1 + if x == 255: + x=1 + break + + y=1 + + y=y+1 + #pod mangement network + yield ('172.'+str(x)+'.'+str(y)+'.129', '172.'+str(x)+'.'+str(y)+'.130', '172.'+str(x)+'.'+str(y)+'.189') + + +def vlanIpRangeGenerator(): + x=1 + y=2 + while 1: + if y == 255: + x=x+1 + if x==255: + x=1 + break + + y=1 + + y=y+1 + #vlan ip range + yield ('172.'+str(x)+'.'+str(y)+'.129', '172.'+str(x)+'.'+str(y)+'.190', '172.'+str(x)+'.'+str(y)+'.249') + + +def describeKumquatResources(dbnode='localhost', mshost='localhost'): + zs = cloudstackConfiguration() + numberofpods = 15 + + clustersPerPod = 2 + hostsPerCluster = 8 + + curpod = 0 + curhost = 0 + + z = zone() + z.dns1 = '4.2.2.2' + z.dns2 = '192.168.110.254' + z.internaldns1 = '10.91.28.6' + z.internaldns2 = '192.168.110.254' + z.name = 'Kumquat' + z.networktype = 'Advanced' + z.guestcidraddress = '10.1.1.0/24' + z.vlan='100-3000' + + for podRange,vlanRange in zip(podIpRangeGenerator(), vlanIpRangeGenerator()): + p = pod() + curpod=curpod+1 + p.name = 'POD'+str(curpod) + p.gateway=podRange[0] + p.startip=podRange[1] + p.endip=podRange[2] + p.netmask='255.255.255.128' + + for i in range(1,clustersPerPod+1): + c = cluster() + c.clustername = 'POD'+str(curpod)+'-CLUSTER'+str(i) + c.hypervisor = 'Simulator' + c.clustertype = 'CloudManaged' + + ps = primaryStorage() + ps.name = 'spool'+str(i) + ps.url = 'nfs://172.16.24.32/export/path/'+str(curpod)+'/'+str(i) + c.primaryStorages.append(ps) + + for i in range(1, hostsPerCluster + 1): + h = host() + h.username = 'root' + h.password = 'password' + h.url = "http://sim/test-%d"%(curhost) + c.hosts.append(h) + curhost=curhost+1 + + p.clusters.append(c) + + z.pods.append(p) + if curpod == numberofpods: + break + + v = iprange() + v.vlan = 'untagged' + v.gateway='172.2.1.1' + v.startip='172.2.1.2' + v.endip='172.2.255.252' + v.netmask="255.255.0.0" + z.ipranges.append(v) + + secondary = secondaryStorage() + secondary.url = 'nfs://172.16.25.32/secondary/path' + z.secondaryStorages.append(secondary) + zs.zones.append(z) + + '''Add mgt server''' + mgt = managementServer() + mgt.mgtSvrIp = mshost + zs.mgtSvr.append(mgt) + + '''Add a database''' + db = dbServer() + db.dbSvr = opts.dbnode + zs.dbSvr = db + + '''Add some configuration''' + [zs.globalConfig.append(cfg) for cfg in getGlobalSettings()] + + ''''add loggers''' + testClientLogger = logger() + testClientLogger.name = 'TestClient' + testClientLogger.file = '/var/log/testclient.log' + + testCaseLogger = logger() + testCaseLogger.name = 'TestCase' + testCaseLogger.file = '/var/log/testcase.log' + + zs.logger.append(testClientLogger) + zs.logger.append(testCaseLogger) + return zs + + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option('-o', '--output', action='store', default='./KumquatCfg', dest='output', help='the path where the json config file generated') + parser.add_option('-d', '--dbnode', dest='dbnode', help='hostname/ip of the database node', action='store') + parser.add_option('-m', '--mshost', dest='mshost', help='hostname/ip of management server', action='store') + + (opts, args) = parser.parse_args() + cfg = describeKumquatResources(opts.dbnode, opts.mshost) + generate_setup_config(cfg, opts.output) diff --git a/agent-simulator/scripts/kumquat/tests/testProvision.py b/agent-simulator/scripts/kumquat/tests/testProvision.py new file mode 100644 index 00000000000..253ef34becd --- /dev/null +++ b/agent-simulator/scripts/kumquat/tests/testProvision.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +try: + import unittest2 as unittest +except ImportError: + import unittest + +import random +import hashlib +from cloudstackTestCase import * + +class Provision(cloudstackTestCase): + ''' + ''' + + so = '10' #default + + def setUp(self): + pass + + + def tearDown(self): + pass + + @unittest.skip("already done") + def test_createAccounts(self, numberOfAccounts=850): + ''' + Create a bunch of user accounts + ''' + mdf = hashlib.md5() + mdf.update('password') + mdf_pass = mdf.hexdigest() + api = self.testClient.getApiClient() + for i in range(1, numberOfAccounts + 1): + acct = createAccount.createAccountCmd() + acct.accounttype = 0 + acct.firstname = 'user' + str(i) + acct.lastname = 'user' + str(i) + acct.password = mdf_pass + acct.username = 'user' + str(i) + acct.email = 'user@example.com' + acct.account = 'user' + str(i) + acct.domainid = 1 + acctResponse = api.createAccount(acct) + self.debug(acctResponse) + + + def test_setupServiceOffering(self): + socreate = createServiceOffering.createServiceOfferingCmd() + socreate.cpunumber = 1 + socreate.cpuspeed = 100 + socreate.displaytext = 'Sample SO' + socreate.memory = 128 + socreate.name = 'Sample SO' + api = self.testClient.getApiClient() + soresponse = api.createServiceOffering(socreate) + self.so = soresponse.id + + def deployCmd(self, acct): + deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() + deployVmCmd.zoneid = 1 + deployVmCmd.hypervisor='Simulator' + deployVmCmd.account=acct + deployVmCmd.domainid=1 + deployVmCmd.templateid=2 + deployVmCmd.serviceofferingid=self.so + return deployVmCmd + + + def test_stressDeploy(self): + ''' + Deploy 5 Vms in each account + ''' + api = self.testClient.getApiClient() + for acct in range(122, 850): + [api.deployVirtualMachine(self.deployCmd('user'+str(acct))) for x in range(0, 5)] + + + def deployN(self,nargs=300,batchsize=0): + ''' + Deploy Nargs number of VMs concurrently in batches of size {batchsize}. + When batchsize is 0 all Vms are deployed in one batch + VMs will be deployed in 5:2:6 ratio + ''' + cmds = [] + + if batchsize == 0: + self.testClient.submitCmdsAndWait(cmds) + else: + while len(z) > 0: + try: + newbatch = [cmds.pop() for b in range(batchsize)] #pop batchsize items + self.testClient.submitCmdsAndWait(newbatch) + except IndexError: + break diff --git a/agent-simulator/scripts/zucchini/setup.py b/agent-simulator/scripts/zucchini/setup.py index e69207f937a..65d48cec39e 100644 --- a/agent-simulator/scripts/zucchini/setup.py +++ b/agent-simulator/scripts/zucchini/setup.py @@ -44,18 +44,18 @@ def getGlobalSettings(): 'use.user.concentrated.pod.allocation':'false', 'vm.allocation.algorithm':'firstfit', 'capacity.check.period':'0', - 'host.stats.interval':'-1', - 'vm.stats.interval':'-1', - 'storage.stats.interval':'-1', - 'router.stats.interval':'-1', +# 'host.stats.interval':'-1', +# 'vm.stats.interval':'-1', +# 'storage.stats.interval':'-1', +# 'router.stats.interval':'-1', 'vm.op.wait.interval':'5', 'xen.public.network.device':'10.10.10.10', #only a dummy for the simulator - 'guest.domain.suffix':'zcloud.simulator', + 'guest.domain.suffix':'zucchini.simulator', 'instance.name':'ZIM', 'direct.agent.load.size':'1000', 'default.page.size':'10000', 'linkLocalIp.nums':'4', - 'systemvm.use.local.storage':'true', + 'system.vm.use.local.storage':'true', 'use.local.storage':'true', 'check.pod.cidrs':'false', } @@ -201,7 +201,8 @@ if __name__=="__main__": (opts, args) = parser.parse_args() mandatories = ['mshost', 'dbnode', 'agents'] for m in mandatories: - if not opts.__dict__[m]: - print "mandatory option missing" - cfg = describeZucchiniResources(int(opts.agents), opts.dbnode, opts.mshost, opts.randomize) - generate_setup_config(cfg, opts.output) + if not opts.__dict__[m]: + print "mandatory option missing" + + cfg = describeZucchiniResources(int(opts.agents), opts.dbnode, opts.mshost, opts.randomize) + generate_setup_config(cfg, opts.output) diff --git a/agent-simulator/scripts/zucchini/tests/testListVm.py b/agent-simulator/scripts/zucchini/tests/testListVm.py new file mode 100644 index 00000000000..12fdb869eb6 --- /dev/null +++ b/agent-simulator/scripts/zucchini/tests/testListVm.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +''' +List Virtual Machine tests +''' +try: + import unittest2 as unittest +except ImportError: + import unittest + +import timeit +import random +from cloudstackAPI import * +from cloudstackTestCase import * + +class ListVmTests(cloudstackTestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_listAllVm(self): + numVms = 0 + def time_listAllVm(): + api = self.testClient.getApiClient() + listVmCmd = listVirtualMachines.listVirtualMachinesCmd() + listVmCmd.account = 'admin' + listVmCmd.zoneid = 1 + listVmCmd.domainid = 1 + numVms = len(api.listVirtualMachines(listVmCmd)) + + t = timeit.Timer(time_listAllVm) + l = t.repeat(5, 5) + self.debug("Number of VMs: " + str(len(numVms)) + ", time for last 5 listVM calls : " + str(l)) diff --git a/agent/.classpath b/agent/.classpath index a533b6c64e4..536dd38714f 100644 --- a/agent/.classpath +++ b/agent/.classpath @@ -3,10 +3,10 @@ + - diff --git a/agent/.project b/agent/.project index 8b1ee7d35a6..166ff6cc1f3 100644 --- a/agent/.project +++ b/agent/.project @@ -5,6 +5,11 @@ + + org.python.pydev.PyDevBuilder + + + org.eclipse.jdt.core.javabuilder @@ -13,5 +18,6 @@ org.eclipse.jdt.core.javanature + org.python.pydev.pythonNature diff --git a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java index 6229e184b0a..d4eb27762a1 100644 --- a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java +++ b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java @@ -39,6 +39,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; @@ -386,6 +387,31 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv protected String getDefaultStorageScriptsDir() { return "scripts/storage/qcow2"; } + + private void saveProperties(Map params) throws ConfigurationException { + final File file = PropertiesUtil.findConfigFile("agent.properties"); + if (file == null) { + throw new ConfigurationException("Unable to find agent.properties."); + } + + s_logger.info("agent.properties found at " + file.getAbsolutePath()); + + try { + Properties _properties = new Properties(); + _properties.load(new FileInputStream(file)); + Set names = _properties.stringPropertyNames(); + for (String key : params.keySet()) { + if (!names.contains(key)) { + _properties.setProperty(key, (String)params.get(key)); + } + } + _properties.store(new FileOutputStream(file), ""); + } catch (final FileNotFoundException ex) { + throw new CloudRuntimeException("Cannot find the file: " + file.getAbsolutePath(), ex); + } catch (final IOException ex) { + throw new CloudRuntimeException("IOException in reading " + file.getAbsolutePath(), ex); + } + } @Override public boolean configure(String name, Map params) @@ -560,8 +586,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - - _localStoragePath = (String)params.get("local.storage.path"); if (_localStoragePath == null) { _localStoragePath = "/var/lib/libvirt/images/"; @@ -569,7 +593,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv _localStorageUUID = (String)params.get("local.storage.uuid"); if (_localStorageUUID == null) { - throw new ConfigurationException("Can't find local.storage.uuid"); + _localStorageUUID = UUID.randomUUID().toString(); + params.put("local.storage.uuid", _localStorageUUID); } value = (String)params.get("scripts.timeout"); @@ -663,6 +688,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv _storageResource = new LibvirtStorageResource(this, _storage, _createvmPath, _timeout, _mountPoint, _monitor); + saveProperties(params); return true; } @@ -903,7 +929,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } else if (cmd instanceof CopyVolumeCommand) { return execute((CopyVolumeCommand)cmd); } else { - s_logger.warn("Unsupported command "); + s_logger.warn("Unsupported command :"+cmd.toString()); return Answer.createUnsupportedCommandAnswer(cmd); } } catch (final IllegalArgumentException e) { @@ -1596,7 +1622,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv cmd.stringifyRules(), vif, brname); if (!result) { - s_logger.warn("Failed to program network rules for vm " + cmd.getVmName()); + s_logger.warn("Failed to program Ingress network rules for vm " + cmd.getVmName()); return new SecurityIngressRuleAnswer(cmd, false, "programming network rules failed"); } else { s_logger.debug("Programmed network rules for vm " + cmd.getVmName() + " guestIp=" + cmd.getGuestIp() + ", numrules=" + cmd.getRuleSet().length); @@ -1624,7 +1650,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv cmd.stringifyRules(), vif, brname); if (!result) { - s_logger.warn("Failed to program network rules for vm " + cmd.getVmName()); + s_logger.warn("Failed to program Egress network rules for vm " + cmd.getVmName()); return new SecurityEgressRuleAnswer(cmd, false, "programming network rules failed"); } else { s_logger.debug("Programmed network rules for vm " + cmd.getVmName() + " guestIp=" + cmd.getGuestIp() + ", numrules=" + cmd.getRuleSet().length); @@ -3490,7 +3516,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv cmd.add("--vmid", vmId); cmd.add("--vmip", guestIP); /* type of the rule : ingress or egress */ - cmd.add("--type", type); + cmd.add("--ruletype", type); cmd.add("--sig", sig); cmd.add("--seq", seq); cmd.add("--vmmac", mac); diff --git a/api/src/com/cloud/agent/api/BackupSnapshotCommand.java b/api/src/com/cloud/agent/api/BackupSnapshotCommand.java index afff7368c70..3d3591f628c 100644 --- a/api/src/com/cloud/agent/api/BackupSnapshotCommand.java +++ b/api/src/com/cloud/agent/api/BackupSnapshotCommand.java @@ -64,7 +64,8 @@ public class BackupSnapshotCommand extends SnapshotCommand { String prevSnapshotUuid, String prevBackupUuid, boolean isVolumeInactive, - String vmName) + String vmName, + int wait) { super(primaryStoragePoolNameLabel, secondaryStoragePoolURL, snapshotUuid, snapshotName, dcId, accountId, volumeId); this.snapshotId = snapshotId; @@ -74,6 +75,7 @@ public class BackupSnapshotCommand extends SnapshotCommand { this.vmName = vmName; this.pool = new StorageFilerTO(pool); setVolumePath(volumePath); + setWait(wait); } public String getPrevSnapshotUuid() { diff --git a/api/src/com/cloud/agent/api/CheckHealthCommand.java b/api/src/com/cloud/agent/api/CheckHealthCommand.java index f78d7cb49aa..937775e603f 100644 --- a/api/src/com/cloud/agent/api/CheckHealthCommand.java +++ b/api/src/com/cloud/agent/api/CheckHealthCommand.java @@ -20,8 +20,11 @@ package com.cloud.agent.api; public class CheckHealthCommand extends Command { - public CheckHealthCommand() {} - + public CheckHealthCommand() { + setWait(50); + } + + @Override public boolean executeInSequence() { return false; diff --git a/api/src/com/cloud/agent/api/CheckOnHostCommand.java b/api/src/com/cloud/agent/api/CheckOnHostCommand.java index 2bbb564acd4..5bb5d2a203d 100644 --- a/api/src/com/cloud/agent/api/CheckOnHostCommand.java +++ b/api/src/com/cloud/agent/api/CheckOnHostCommand.java @@ -29,6 +29,7 @@ public class CheckOnHostCommand extends Command { public CheckOnHostCommand(Host host) { this.host = new HostTO(host); + setWait(20); } public HostTO getHost() { diff --git a/api/src/com/cloud/agent/api/CheckVirtualMachineCommand.java b/api/src/com/cloud/agent/api/CheckVirtualMachineCommand.java index 3eea2979b4b..346328cc69f 100644 --- a/api/src/com/cloud/agent/api/CheckVirtualMachineCommand.java +++ b/api/src/com/cloud/agent/api/CheckVirtualMachineCommand.java @@ -27,6 +27,7 @@ public class CheckVirtualMachineCommand extends Command { public CheckVirtualMachineCommand(String vmName) { this.vmName = vmName; + setWait(20); } public String getVmName() { diff --git a/api/src/com/cloud/agent/api/Command.java b/api/src/com/cloud/agent/api/Command.java index 120ed6c7cb6..2263d90fd39 100755 --- a/api/src/com/cloud/agent/api/Command.java +++ b/api/src/com/cloud/agent/api/Command.java @@ -28,13 +28,23 @@ import com.cloud.agent.api.LogLevel.Log4jLevel; * all of the methods that needs to be implemented by the children classes. * */ -public abstract class Command { +public abstract class Command { // allow command to carry over hypervisor or other environment related context info @LogLevel(Log4jLevel.Trace) protected Map contextMap = new HashMap(); + private int wait; //in second protected Command() { + this.wait = 0; + } + + public int getWait() { + return wait; + } + + public void setWait(int wait) { + this.wait = wait; } @Override diff --git a/api/src/com/cloud/agent/api/CreatePrivateTemplateFromSnapshotCommand.java b/api/src/com/cloud/agent/api/CreatePrivateTemplateFromSnapshotCommand.java index 13581099af9..eb3cf8dcae0 100644 --- a/api/src/com/cloud/agent/api/CreatePrivateTemplateFromSnapshotCommand.java +++ b/api/src/com/cloud/agent/api/CreatePrivateTemplateFromSnapshotCommand.java @@ -52,12 +52,14 @@ public class CreatePrivateTemplateFromSnapshotCommand extends SnapshotCommand { String backedUpSnapshotName, String origTemplateInstallPath, Long newTemplateId, - String templateName) + String templateName, + int wait) { super(primaryStoragePoolNameLabel, secondaryStoragePoolURL, backedUpSnapshotUuid, backedUpSnapshotName, dcId, accountId, volumeId); this.origTemplateInstallPath = origTemplateInstallPath; this.newTemplateId = newTemplateId; this.templateName = templateName; + setWait(wait); } /** diff --git a/api/src/com/cloud/agent/api/CreatePrivateTemplateFromVolumeCommand.java b/api/src/com/cloud/agent/api/CreatePrivateTemplateFromVolumeCommand.java index 60f34e285b6..20e26607bbf 100644 --- a/api/src/com/cloud/agent/api/CreatePrivateTemplateFromVolumeCommand.java +++ b/api/src/com/cloud/agent/api/CreatePrivateTemplateFromVolumeCommand.java @@ -31,14 +31,15 @@ public class CreatePrivateTemplateFromVolumeCommand extends SnapshotCommand { public CreatePrivateTemplateFromVolumeCommand() {} - public CreatePrivateTemplateFromVolumeCommand(String secondaryStorageURL, long templateId, long accountId, String userSpecifiedName, String uniqueName, String volumePath, String vmName) { + public CreatePrivateTemplateFromVolumeCommand(String secondaryStorageURL, long templateId, long accountId, String userSpecifiedName, String uniqueName, String volumePath, String vmName, int wait) { _secondaryStorageURL = secondaryStorageURL; _templateId = templateId; _accountId = accountId; _userSpecifiedName = userSpecifiedName; _uniqueName = uniqueName; _volumePath = volumePath; - _vmName = vmName; + _vmName = vmName; + setWait(wait); } @Override diff --git a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java index b6a2197d8d1..a9c81486f2a 100644 --- a/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java +++ b/api/src/com/cloud/agent/api/CreateVolumeFromSnapshotCommand.java @@ -50,9 +50,10 @@ public class CreateVolumeFromSnapshotCommand extends SnapshotCommand { Long accountId, Long volumeId, String backedUpSnapshotUuid, - String backedUpSnapshotName) + String backedUpSnapshotName, + int wait) { super(primaryStoragePoolNameLabel, secondaryStoragePoolURL, backedUpSnapshotUuid, backedUpSnapshotName, dcId, accountId, volumeId); + setWait(wait); } - } \ No newline at end of file diff --git a/api/src/com/cloud/agent/api/NetworkUsageAnswer.java b/api/src/com/cloud/agent/api/NetworkUsageAnswer.java index 6138f7a1314..973bef08946 100644 --- a/api/src/com/cloud/agent/api/NetworkUsageAnswer.java +++ b/api/src/com/cloud/agent/api/NetworkUsageAnswer.java @@ -19,9 +19,10 @@ package com.cloud.agent.api; import com.cloud.agent.api.LogLevel.Log4jLevel; -@LogLevel(Log4jLevel.Trace) +@LogLevel(Log4jLevel.Debug) public class NetworkUsageAnswer extends Answer { - Long bytesSent; + String routerName; + Long bytesSent; Long bytesReceived; protected NetworkUsageAnswer() { @@ -31,6 +32,7 @@ public class NetworkUsageAnswer extends Answer { super(cmd, true, details); this.bytesReceived = bytesReceived; this.bytesSent = bytesSent; + routerName = cmd.getDomRName(); } @@ -49,4 +51,8 @@ public class NetworkUsageAnswer extends Answer { public Long getBytesSent() { return bytesSent; } + + public String getRouterName() { + return routerName; + } } diff --git a/api/src/com/cloud/agent/api/PingTestCommand.java b/api/src/com/cloud/agent/api/PingTestCommand.java index 98970315b07..308f47700fc 100644 --- a/api/src/com/cloud/agent/api/PingTestCommand.java +++ b/api/src/com/cloud/agent/api/PingTestCommand.java @@ -27,12 +27,14 @@ public class PingTestCommand extends Command { public PingTestCommand() {} public PingTestCommand(String computingHostIp) { - _computingHostIp = computingHostIp; + _computingHostIp = computingHostIp; + setWait(20); } public PingTestCommand(String routerIp, String privateIp) { _routerIp = routerIp; - _privateIp = privateIp; + _privateIp = privateIp; + setWait(20); } public String getComputingHostIp() { diff --git a/api/src/com/cloud/agent/api/StartupRoutingCommand.java b/api/src/com/cloud/agent/api/StartupRoutingCommand.java index 616b5c7f10f..baecda31bef 100755 --- a/api/src/com/cloud/agent/api/StartupRoutingCommand.java +++ b/api/src/com/cloud/agent/api/StartupRoutingCommand.java @@ -52,6 +52,7 @@ public class StartupRoutingCommand extends StartupCommand { String pool; HypervisorType hypervisorType; Map hostDetails; //stuff like host os, cpu capabilities + String hypervisorVersion; public StartupRoutingCommand() { super(Host.Type.Routing); @@ -59,7 +60,7 @@ public class StartupRoutingCommand extends StartupCommand { getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), RouterPrivateIpStrategy.DcGlobal.toString()); } - + public StartupRoutingCommand(int cpus, long speed, long memory, @@ -71,7 +72,7 @@ public class StartupRoutingCommand extends StartupCommand { this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType, vms); getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStrategy.toString()); } - + public StartupRoutingCommand(int cpus, long speed, long memory, @@ -82,50 +83,55 @@ public class StartupRoutingCommand extends StartupCommand { this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType, new HashMap(), new HashMap()); getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStrategy.toString()); } - + public StartupRoutingCommand(int cpus, - long speed, - long memory, - long dom0MinMemory, - final String caps, - final HypervisorType hypervisorType, - final Map hostDetails, - Map vms) { - super(Host.Type.Routing); - this.cpus = cpus; - this.speed = speed; - this.memory = memory; - this.dom0MinMemory = dom0MinMemory; - this.vms = vms; - this.hypervisorType = hypervisorType; - this.hostDetails = hostDetails; - this.caps = caps; - this.poolSync = false; + long speed, + long memory, + long dom0MinMemory, + final String caps, + final HypervisorType hypervisorType, + final Map hostDetails, + Map vms) { + super(Host.Type.Routing); + this.cpus = cpus; + this.speed = speed; + this.memory = memory; + this.dom0MinMemory = dom0MinMemory; + this.vms = vms; + this.hypervisorType = hypervisorType; + this.hostDetails = hostDetails; + this.caps = caps; + this.poolSync = false; } - + public StartupRoutingCommand(int cpus2, long speed2, long memory2, - long dom0MinMemory2, String caps2, HypervisorType hypervisorType2, - Map vms2) { - this(cpus2, speed2, memory2, dom0MinMemory2, caps2, hypervisorType2, new HashMap(), vms2); - } - - public void setChanges(Map vms) { + long dom0MinMemory2, String caps2, HypervisorType hypervisorType2, + Map vms2) { + this(cpus2, speed2, memory2, dom0MinMemory2, caps2, hypervisorType2, new HashMap(), vms2); + } + + public StartupRoutingCommand(int cpus, long speed, long memory, long dom0MinMemory, final String caps, final HypervisorType hypervisorType, final Map hostDetails, Map vms, String hypervisorVersion) { + this(cpus, speed, memory, dom0MinMemory, caps, hypervisorType, hostDetails, vms); + this.hypervisorVersion = hypervisorVersion; + } + + public void setChanges(Map vms) { this.vms = vms; } - - public void setStateChanges(Map vms) { - for( String vm_name : vms.keySet() ) { - if( this.vms == null ) { - this.vms = new HashMap(); - } - this.vms.put(vm_name, new VmState(vms.get(vm_name), null)); - } - } + + public void setStateChanges(Map vms) { + for( String vm_name : vms.keySet() ) { + if( this.vms == null ) { + this.vms = new HashMap(); + } + this.vms.put(vm_name, new VmState(vms.get(vm_name), null)); + } + } public int getCpus() { return cpus; } - + public String getCapabilities() { return caps; } @@ -145,36 +151,36 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr public Map getVmStates() { return vms; } - + public void setSpeed(long speed) { this.speed = speed; } - + public void setCpus(int cpus) { this.cpus = cpus; } - + public void setMemory(long memory) { this.memory = memory; } - + public void setDom0MinMemory(long dom0MinMemory) { this.dom0MinMemory = dom0MinMemory; } - + public void setCaps(String caps) { this.caps = caps; } - + public String getPool() { - return pool; - } - - public void setPool(String pool) { - this.pool = pool; + return pool; } - public boolean isPoolSync() { + public void setPool(String pool) { + this.pool = pool; + } + + public boolean isPoolSync() { return poolSync; } @@ -183,19 +189,27 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr } public HypervisorType getHypervisorType() { - return hypervisorType; - } + return hypervisorType; + } - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } + public void setHypervisorType(HypervisorType hypervisorType) { + this.hypervisorType = hypervisorType; + } - public Map getHostDetails() { - return hostDetails; - } + public Map getHostDetails() { + return hostDetails; + } - public void setHostDetails(Map hostDetails) { - this.hostDetails = hostDetails; - } + public void setHostDetails(Map hostDetails) { + this.hostDetails = hostDetails; + } + + public String getHypervisorVersion() { + return hypervisorVersion; + } + + public void setHypervisorVersion(String hypervisorVersion) { + this.hypervisorVersion = hypervisorVersion; + } } diff --git a/api/src/com/cloud/agent/api/storage/CopyVolumeCommand.java b/api/src/com/cloud/agent/api/storage/CopyVolumeCommand.java index 3e367230795..cbe5ff03d11 100644 --- a/api/src/com/cloud/agent/api/storage/CopyVolumeCommand.java +++ b/api/src/com/cloud/agent/api/storage/CopyVolumeCommand.java @@ -34,15 +34,16 @@ public class CopyVolumeCommand extends Command { public CopyVolumeCommand() { } - public CopyVolumeCommand(long volumeId, String volumePath, StoragePool pool, String secondaryStorageURL, boolean toSecondaryStorage) { + public CopyVolumeCommand(long volumeId, String volumePath, StoragePool pool, String secondaryStorageURL, boolean toSecondaryStorage, int wait) { this.volumeId = volumeId; this.volumePath = volumePath; this.pool = new StorageFilerTO(pool); this.secondaryStorageURL = secondaryStorageURL; this.toSecondaryStorage = toSecondaryStorage; + setWait(wait); } - @Override + @Override public boolean executeInSequence() { return true; } diff --git a/api/src/com/cloud/agent/api/storage/PrimaryStorageDownloadCommand.java b/api/src/com/cloud/agent/api/storage/PrimaryStorageDownloadCommand.java index 4f514a249a6..58de20e196d 100644 --- a/api/src/com/cloud/agent/api/storage/PrimaryStorageDownloadCommand.java +++ b/api/src/com/cloud/agent/api/storage/PrimaryStorageDownloadCommand.java @@ -32,15 +32,16 @@ public class PrimaryStorageDownloadCommand extends AbstractDownloadCommand { String secondaryStorageUrl; String primaryStorageUrl; - protected PrimaryStorageDownloadCommand() { + protected PrimaryStorageDownloadCommand() { } - public PrimaryStorageDownloadCommand(String name, String url, ImageFormat format, long accountId, long poolId, String poolUuid) { + public PrimaryStorageDownloadCommand(String name, String url, ImageFormat format, long accountId, long poolId, String poolUuid, int wait) { super(name, url, format, accountId); this.poolId = poolId; this.poolUuid = poolUuid; + setWait(wait); } - + public String getPoolUuid() { return poolUuid; } diff --git a/api/src/com/cloud/api/ApiConstants.java b/api/src/com/cloud/api/ApiConstants.java index c57da2e5851..f8705536314 100755 --- a/api/src/com/cloud/api/ApiConstants.java +++ b/api/src/com/cloud/api/ApiConstants.java @@ -257,4 +257,7 @@ public class ApiConstants { public static final String KEYBOARD="keyboard"; public static final String OPEN_FIREWALL="openfirewall"; public static final String TEMPLATE_TAG = "templatetag"; + public static final String HYPERVISOR_VERSION = "hypervisorversion"; + public static final String MAX_GUESTS_LIMIT = "maxguestslimit"; + } diff --git a/api/src/com/cloud/api/ResponseGenerator.java b/api/src/com/cloud/api/ResponseGenerator.java index 51ed0e9b55b..37ec55bb2c3 100755 --- a/api/src/com/cloud/api/ResponseGenerator.java +++ b/api/src/com/cloud/api/ResponseGenerator.java @@ -35,6 +35,7 @@ import com.cloud.api.response.ExtractResponse; import com.cloud.api.response.FirewallResponse; import com.cloud.api.response.FirewallRuleResponse; import com.cloud.api.response.HostResponse; +import com.cloud.api.response.HypervisorCapabilitiesResponse; import com.cloud.api.response.IPAddressResponse; import com.cloud.api.response.InstanceGroupResponse; import com.cloud.api.response.IpForwardingRuleResponse; @@ -72,6 +73,7 @@ import com.cloud.dc.Vlan; import com.cloud.domain.Domain; import com.cloud.event.Event; import com.cloud.host.Host; +import com.cloud.hypervisor.HypervisorCapabilities; import com.cloud.network.IpAddress; import com.cloud.network.Network; import com.cloud.network.RemoteAccessVpn; @@ -81,8 +83,8 @@ import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.LoadBalancer; import com.cloud.network.rules.PortForwardingRule; import com.cloud.network.rules.StaticNatRule; -import com.cloud.network.security.IngressRule; import com.cloud.network.security.EgressRule; +import com.cloud.network.security.IngressRule; import com.cloud.network.security.SecurityGroup; import com.cloud.network.security.SecurityGroupRules; import com.cloud.offering.DiskOffering; @@ -162,20 +164,20 @@ public interface ResponseGenerator { Account findAccountByNameDomain(String accountName, Long domainId); VirtualMachineTemplate findTemplateById(Long templateId); - + Host findHostById(Long hostId); - + List createTemplateResponses(long templateId, long zoneId, boolean readyOnly); - + VpnUsersResponse createVpnUserResponse(VpnUser user); RemoteAccessVpnResponse createRemoteAccessVpnResponse(RemoteAccessVpn vpn); List createTemplateResponses(long templateId, Long zoneId, boolean readyOnly); List createTemplateResponses(long templateId, Long snapshotId, Long volumeId, boolean readyOnly); - + ListResponse createSecurityGroupResponses(List networkGroups); - + SecurityGroupResponse createSecurityGroupResponseFromIngressRule(List ingressRules); SecurityGroupResponse createSecurityGroupResponseFromEgressRule(List egressRules); @@ -197,25 +199,27 @@ public interface ResponseGenerator { TemplatePermissionsResponse createTemplatePermissionsResponse(List accountNames, Long id, boolean isAdmin); AsyncJobResponse queryJobResult(QueryAsyncJobResultCmd cmd); - + NetworkOfferingResponse createNetworkOfferingResponse(NetworkOffering offering); - + NetworkResponse createNetworkResponse(Network network); - UserResponse createUserResponse(User user); + UserResponse createUserResponse(User user); - AccountResponse createUserAccountResponse(UserAccount user); - - Long getSecurityGroupId(String groupName, long accountId); + AccountResponse createUserAccountResponse(UserAccount user); + + Long getSecurityGroupId(String groupName, long accountId); List createIsoResponses(long isoId, Long zoneId, boolean readyOnly); ProjectResponse createProjectResponse(Project project); - + List createIsoResponses(VirtualMachineTemplate iso, long zoneId, boolean readyOnly); - List createTemplateResponses(long templateId, Long vmId); - + List createTemplateResponses(long templateId, Long vmId); + FirewallResponse createFirewallResponse(FirewallRule fwRule); + HypervisorCapabilitiesResponse createHypervisorCapabilitiesResponse(HypervisorCapabilities hpvCapabilities); + } diff --git a/api/src/com/cloud/api/commands/AssociateIPAddrCmd.java b/api/src/com/cloud/api/commands/AssociateIPAddrCmd.java index 3b1e99b0c95..3352df62cf8 100644 --- a/api/src/com/cloud/api/commands/AssociateIPAddrCmd.java +++ b/api/src/com/cloud/api/commands/AssociateIPAddrCmd.java @@ -98,7 +98,8 @@ public class AssociateIPAddrCmd extends BaseAsyncCreateCmd { if (zone.getNetworkType() == NetworkType.Advanced) { List networks = _networkService.getVirtualNetworksOwnedByAccountInZone(getAccountName(), getDomainId(), getZoneId()); if (networks.size() == 0) { - throw new InvalidParameterValueException("Account name=" + getAccountName() + " domainId=" + getDomainId() + " doesn't have virtual networks in zone " + getZoneId()); + String domain = _accountService.getDomain(getDomainId()).getName(); + throw new InvalidParameterValueException("Account name=" + getAccountName() + " domain=" + domain + " doesn't have virtual networks in zone=" + zone.getName()); } assert (networks.size() <= 1) : "Too many virtual networks. This logic should be obsolete"; return networks.get(0).getId(); diff --git a/api/src/com/cloud/api/commands/ListHypervisorCapabilitiesCmd.java b/api/src/com/cloud/api/commands/ListHypervisorCapabilitiesCmd.java new file mode 100644 index 00000000000..d857dd31f1b --- /dev/null +++ b/api/src/com/cloud/api/commands/ListHypervisorCapabilitiesCmd.java @@ -0,0 +1,91 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.api.commands; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; + +import com.cloud.api.ApiConstants; +import com.cloud.api.BaseListCmd; +import com.cloud.api.Implementation; +import com.cloud.api.Parameter; +import com.cloud.api.response.HypervisorCapabilitiesResponse; +import com.cloud.api.response.ListResponse; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorCapabilities; + +@Implementation(description="Lists all hypervisor capabilities.", responseObject=HypervisorCapabilitiesResponse.class) +public class ListHypervisorCapabilitiesCmd extends BaseListCmd { + public static final Logger s_logger = Logger.getLogger(ListHypervisorCapabilitiesCmd.class.getName()); + + private static final String s_name = "listhypervisorcapabilitiesresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.LONG, description="ID of the hypervisor capability") + private Long id; + + @Parameter(name=ApiConstants.HYPERVISOR, type=CommandType.STRING, description="the hypervisor for which to restrict the search") + private String hypervisor; + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public HypervisorType getHypervisor() { + if(hypervisor != null){ + return HypervisorType.getType(hypervisor); + }else{ + return null; + } + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public void execute(){ + List hpvCapabilities = _mgr.listHypervisorCapabilities(getId(), getHypervisor(), this.getStartIndex(), this.getPageSizeVal()); + ListResponse response = new ListResponse(); + List hpvCapabilitiesResponses = new ArrayList(); + for (HypervisorCapabilities capability : hpvCapabilities) { + HypervisorCapabilitiesResponse hpvCapabilityResponse = _responseGenerator.createHypervisorCapabilitiesResponse(capability); + hpvCapabilityResponse.setObjectName("hypervisorCapabilities"); + hpvCapabilitiesResponses.add(hpvCapabilityResponse); + } + + response.setResponses(hpvCapabilitiesResponses); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } +} diff --git a/api/src/com/cloud/api/commands/UpdateHypervisorCapabilitiesCmd.java b/api/src/com/cloud/api/commands/UpdateHypervisorCapabilitiesCmd.java new file mode 100644 index 00000000000..4497a44fc5c --- /dev/null +++ b/api/src/com/cloud/api/commands/UpdateHypervisorCapabilitiesCmd.java @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.api.commands; + +import org.apache.log4j.Logger; + +import com.cloud.api.ApiConstants; +import com.cloud.api.BaseCmd; +import com.cloud.api.Implementation; +import com.cloud.api.Parameter; +import com.cloud.api.ServerApiException; +import com.cloud.api.response.HypervisorCapabilitiesResponse; +import com.cloud.api.response.ServiceOfferingResponse; +import com.cloud.hypervisor.HypervisorCapabilities; +import com.cloud.user.Account; + + +@Implementation(description="Updates a hypervisor capabilities.", responseObject=ServiceOfferingResponse.class) +public class UpdateHypervisorCapabilitiesCmd extends BaseCmd { + public static final Logger s_logger = Logger.getLogger(UpdateHypervisorCapabilitiesCmd.class.getName()); + private static final String s_name = "updatehypervisorcapabilitiesresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.LONG, description="ID of the hypervisor capability") + private Long id; + + @Parameter(name=ApiConstants.SECURITY_GROUP_EANBLED, type=CommandType.BOOLEAN, description="set true to enable security group for this hypervisor.") + private Boolean securityGroupEnabled; + + @Parameter(name=ApiConstants.MAX_GUESTS_LIMIT, type=CommandType.LONG, description="the max number of Guest VMs per host for this hypervisor.") + private Long maxGuestsLimit; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Boolean getSecurityGroupEnabled() { + return securityGroupEnabled; + } + + public Long getId() { + return id; + } + + public Long getMaxGuestsLimit() { + return maxGuestsLimit; + } + + + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + return Account.ACCOUNT_ID_SYSTEM; + } + + @Override + public void execute(){ + HypervisorCapabilities result = _mgr.updateHypervisorCapabilities(getId(), getMaxGuestsLimit(), getSecurityGroupEnabled()); + if (result != null){ + HypervisorCapabilitiesResponse response = _responseGenerator.createHypervisorCapabilitiesResponse(result); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "Failed to update hypervisor capabilities"); + } + } +} diff --git a/api/src/com/cloud/api/response/HostResponse.java b/api/src/com/cloud/api/response/HostResponse.java index cd8e7fb2126..5d6b06a350d 100755 --- a/api/src/com/cloud/api/response/HostResponse.java +++ b/api/src/com/cloud/api/response/HostResponse.java @@ -80,7 +80,7 @@ public class HostResponse extends BaseResponse { @SerializedName("cpuused") @Param(description="the amount of the host's CPU currently used") private String cpuUsed; - + @SerializedName("cpuwithoverprovisioning") @Param(description="the amount of the host's CPU after applying the cpu.overprovisioning.factor ") private String cpuWithOverprovisioning; @@ -122,7 +122,7 @@ public class HostResponse extends BaseResponse { @SerializedName("clustername") @Param(description="the cluster name of the host") private String clusterName; - + @SerializedName("clustertype") @Param(description="the cluster type of the cluster that host belongs to") private String clusterType; @@ -137,28 +137,31 @@ public class HostResponse extends BaseResponse { @SerializedName("events") @Param(description="events available for the host") private String events; - + @SerializedName(ApiConstants.JOB_ID) @Param(description="shows the current pending asynchronous job ID. This tag is not returned if no current pending jobs are acting on the host") private Long jobId; @SerializedName("jobstatus") @Param(description="shows the current pending asynchronous job status") private Integer jobStatus; - + @SerializedName("hosttags") @Param(description="comma-separated list of tags for the host") private String hostTags; - + @SerializedName("hasEnoughCapacity") @Param(description="true if this host has enough CPU and RAM capacity to migrate a VM to it, false otherwise") private Boolean hasEnoughCapacity; - + @SerializedName("allocationstate") @Param(description="the allocation state of the host") private String allocationState; + @SerializedName(ApiConstants.HYPERVISOR_VERSION) @Param(description="the hypervisor version") + private String hypervisorVersion; + @Override public Long getObjectId() { return getId(); } - + @Override public Long getJobId() { return jobId; @@ -168,7 +171,7 @@ public class HostResponse extends BaseResponse { public void setJobId(Long jobId) { this.jobId = jobId; } - + @Override public Integer getJobStatus() { return jobStatus; @@ -426,13 +429,13 @@ public class HostResponse extends BaseResponse { public void setClusterName(String clusterName) { this.clusterName = clusterName; } - + public String getClusterType() { - return clusterType; + return clusterType; } - + public void setClusterType(String clusterType) { - this.clusterType = clusterType; + this.clusterType = clusterType; } public Boolean isLocalStorageActive() { @@ -466,7 +469,7 @@ public class HostResponse extends BaseResponse { public void setEvents(String events) { this.events = events; } - + public String getHostTags() { return hostTags; } @@ -474,7 +477,7 @@ public class HostResponse extends BaseResponse { public void setHostTags(String hostTags) { this.hostTags = hostTags; } - + public Boolean hasEnoughCapacity() { return hasEnoughCapacity; } @@ -482,20 +485,29 @@ public class HostResponse extends BaseResponse { public void setHasEnoughCapacity(Boolean hasEnoughCapacity) { this.hasEnoughCapacity = hasEnoughCapacity; } - + public String getAllocationState() { - return allocationState; + return allocationState; } - + public void setAllocationState(String allocationState) { - this.allocationState = allocationState; + this.allocationState = allocationState; } - + public String getCpuWithOverprovisioning() { return cpuWithOverprovisioning; } public void setCpuWithOverprovisioning(String cpuWithOverprovisioning) { this.cpuWithOverprovisioning = cpuWithOverprovisioning; - } + } + + public void setHypervisorVersion(String hypervisorVersion) { + this.hypervisorVersion = hypervisorVersion; + } + + public String getHypervisorVersion() { + return hypervisorVersion; + } + } diff --git a/api/src/com/cloud/api/response/HypervisorCapabilitiesResponse.java b/api/src/com/cloud/api/response/HypervisorCapabilitiesResponse.java new file mode 100644 index 00000000000..fe968369159 --- /dev/null +++ b/api/src/com/cloud/api/response/HypervisorCapabilitiesResponse.java @@ -0,0 +1,88 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.api.response; + +import com.cloud.api.ApiConstants; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class HypervisorCapabilitiesResponse extends BaseResponse { + @SerializedName(ApiConstants.ID) @Param(description="the ID of the hypervisor capabilities row") + private Long id; + + @SerializedName(ApiConstants.HYPERVISOR_VERSION) @Param(description="the hypervisor version") + private String hypervisorVersion; + + @SerializedName(ApiConstants.HYPERVISOR) @Param(description="the hypervisor type") + private HypervisorType hypervisor; + + @SerializedName(ApiConstants.MAX_GUESTS_LIMIT) @Param(description="the maximum number of guest vms recommended for this hypervisor") + private Long maxGuestsLimit; + + @SerializedName(ApiConstants.SECURITY_GROUP_EANBLED) @Param(description="true if security group is supported") + private boolean isSecurityGroupEnabled; + + + + @Override + public Long getObjectId() { + return getId(); + } + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + + public String getHypervisorVersion() { + return hypervisorVersion; + } + + public void setHypervisorVersion(String hypervisorVersion) { + this.hypervisorVersion = hypervisorVersion; + } + + public HypervisorType getHypervisor() { + return hypervisor; + } + + public void setHypervisor(HypervisorType hypervisor) { + this.hypervisor = hypervisor; + } + + public Long getMaxGuestsLimit() { + return maxGuestsLimit; + } + + public void setMaxGuestsLimit(Long maxGuestsLimit) { + this.maxGuestsLimit = maxGuestsLimit; + } + + public Boolean getIsSecurityGroupEnabled() { + return this.isSecurityGroupEnabled; + } + + public void setIsSecurityGroupEnabled(Boolean sgEnabled) { + this.isSecurityGroupEnabled = sgEnabled; + } +} diff --git a/api/src/com/cloud/host/Host.java b/api/src/com/cloud/host/Host.java index 280ad33d72b..7cd674c0aba 100755 --- a/api/src/com/cloud/host/Host.java +++ b/api/src/com/cloud/host/Host.java @@ -36,7 +36,7 @@ public interface Host { ExternalLoadBalancer(false), PxeServer(false), TrafficMonitor(false), - + ExternalDhcp(false), SecondaryStorageVM(true), LocalSecondaryStorage(false); @@ -44,11 +44,11 @@ public interface Host { private Type(boolean virtual) { _virtual = virtual; } - + public boolean isVirtual() { return _virtual; } - + public static String[] toStrings(Host.Type... types) { String[] strs = new String[types.length]; for (int i = 0; i < types.length; i++) { @@ -57,27 +57,27 @@ public interface Host { return strs; } } - + public enum HostAllocationState { Disabled, Enabled; } - + /** * @return id of the host. */ long getId(); - + /** * @return name of the machine. */ String getName(); - + /** * @return the type of host. */ Type getType(); - + /** * @return the date the host first registered */ @@ -87,32 +87,32 @@ public interface Host { * @return current state of this machine. */ Status getStatus(); - + /** * @return the ip address of the host. */ String getPrivateIpAddress(); - + /** * @return the ip address of the host attached to the storage network. */ String getStorageIpAddress(); - + /** * @return the mac address of the host. */ String getGuid(); - + /** * @return total amount of memory. */ Long getTotalMemory(); - + /** * @return # of cores in a machine. Note two cpus with two cores each returns 4. */ Integer getCpus(); - + /** * @return speed of each cpu in mhz. */ @@ -122,32 +122,32 @@ public interface Host { * @return the proxy port that is being listened at the agent host */ Integer getProxyPort(); - + /** * @return the pod. */ Long getPodId(); - + /** * @return availability zone. */ long getDataCenterId(); - + /** * @return parent path. only used for storage server. */ String getParent(); - + /** * @return storage ip address. */ String getStorageIpAddressDeux(); - + /** * @return type of hypervisor */ HypervisorType getHypervisorType(); - + /** * @return disconnection date */ @@ -176,27 +176,29 @@ public interface Host { *@return removal date */ Date getRemoved(); - + Long getClusterId(); - + String getPublicIpAddress(); - + String getPublicNetmask(); - + String getPrivateNetmask(); - + String getStorageNetmask(); - + String getStorageMacAddress(); - + String getPublicMacAddress(); - + String getPrivateMacAddress(); - + String getStorageNetmaskDeux(); - + String getStorageMacAddressDeux(); - + HostAllocationState getHostAllocationState(); - + + String getHypervisorVersion(); + } diff --git a/api/src/com/cloud/host/Status.java b/api/src/com/cloud/host/Status.java index 7b932e4762d..c915eb6a0cb 100644 --- a/api/src/com/cloud/host/Status.java +++ b/api/src/com/cloud/host/Status.java @@ -180,15 +180,18 @@ public enum Status { s_fsm.addTransition(Status.Disconnected, Event.WaitedTooLong, Status.Alert); s_fsm.addTransition(Status.Disconnected, Event.Remove, Status.Removed); s_fsm.addTransition(Status.Disconnected, Event.HypervisorVersionChanged, Status.Disconnected); + s_fsm.addTransition(Status.Disconnected, Event.AgentDisconnected, Status.Disconnected); s_fsm.addTransition(Status.Down, Event.MaintenanceRequested, Status.PrepareForMaintenance); s_fsm.addTransition(Status.Down, Event.AgentConnected, Status.Connecting); s_fsm.addTransition(Status.Down, Event.Remove, Status.Removed); s_fsm.addTransition(Status.Down, Event.ManagementServerDown, Status.Down); + s_fsm.addTransition(Status.Down, Event.AgentDisconnected, Status.Down); s_fsm.addTransition(Status.Alert, Event.MaintenanceRequested, Status.PrepareForMaintenance); s_fsm.addTransition(Status.Alert, Event.AgentConnected, Status.Connecting); s_fsm.addTransition(Status.Alert, Event.Ping, Status.Up); s_fsm.addTransition(Status.Alert, Event.Remove, Status.Removed); s_fsm.addTransition(Status.Alert, Event.ManagementServerDown, Status.Alert); + s_fsm.addTransition(Status.Alert, Event.AgentDisconnected, Status.Alert); s_fsm.addTransition(Status.Rebalancing, Event.RebalanceFailed, Status.Disconnected); s_fsm.addTransition(Status.Rebalancing, Event.RebalanceCompleted, Status.Connecting); } diff --git a/api/src/com/cloud/hypervisor/Hypervisor.java b/api/src/com/cloud/hypervisor/Hypervisor.java index 15295727c4e..2f50e072512 100644 --- a/api/src/com/cloud/hypervisor/Hypervisor.java +++ b/api/src/com/cloud/hypervisor/Hypervisor.java @@ -20,50 +20,51 @@ package com.cloud.hypervisor; public class Hypervisor { public static enum HypervisorType { - None, //for storage hosts - Xen, - XenServer, - KVM, - VMware, - Hyperv, - VirtualBox, - Parralels, - BareMetal, - Simulator, - Ovm, - - Any; /*If you don't care about the hypervisor type*/ + None, //for storage hosts + Xen, + XenServer, + KVM, + VMware, + Hyperv, + VirtualBox, + Parralels, + BareMetal, + Simulator, + Ovm, - public static HypervisorType getType(String hypervisor) { - if (hypervisor == null) { - return HypervisorType.None; - } + Any; /*If you don't care about the hypervisor type*/ - if (hypervisor.equalsIgnoreCase("Xen")) { - return HypervisorType.Xen; - } else if (hypervisor.equalsIgnoreCase("XenServer")) { - return HypervisorType.XenServer; - } else if (hypervisor.equalsIgnoreCase("KVM")) { - return HypervisorType.KVM; - } else if (hypervisor.equalsIgnoreCase("VMware")) { - return HypervisorType.VMware; - } else if (hypervisor.equalsIgnoreCase("Hyperv")) { - return HypervisorType.Hyperv; - } else if (hypervisor.equalsIgnoreCase("VirtualBox")) { - return HypervisorType.VirtualBox; - } else if (hypervisor.equalsIgnoreCase("Parralels")) { - return HypervisorType.Parralels; - }else if (hypervisor.equalsIgnoreCase("BareMetal")) { - return HypervisorType.BareMetal; - } else if (hypervisor.equalsIgnoreCase("Simulator")) { - return HypervisorType.Simulator; - } else if (hypervisor.equalsIgnoreCase("Ovm")) { - return HypervisorType.Ovm; - } else if (hypervisor.equalsIgnoreCase("Any")) { - return HypervisorType.Any; - } else { - return HypervisorType.None; - } - } + public static HypervisorType getType(String hypervisor) { + if (hypervisor == null) { + return HypervisorType.None; + } + + if (hypervisor.equalsIgnoreCase("Xen")) { + return HypervisorType.Xen; + } else if (hypervisor.equalsIgnoreCase("XenServer")) { + return HypervisorType.XenServer; + } else if (hypervisor.equalsIgnoreCase("KVM")) { + return HypervisorType.KVM; + } else if (hypervisor.equalsIgnoreCase("VMware")) { + return HypervisorType.VMware; + } else if (hypervisor.equalsIgnoreCase("Hyperv")) { + return HypervisorType.Hyperv; + } else if (hypervisor.equalsIgnoreCase("VirtualBox")) { + return HypervisorType.VirtualBox; + } else if (hypervisor.equalsIgnoreCase("Parralels")) { + return HypervisorType.Parralels; + }else if (hypervisor.equalsIgnoreCase("BareMetal")) { + return HypervisorType.BareMetal; + } else if (hypervisor.equalsIgnoreCase("Simulator")) { + return HypervisorType.Simulator; + } else if (hypervisor.equalsIgnoreCase("Ovm")) { + return HypervisorType.Ovm; + } else if (hypervisor.equalsIgnoreCase("Any")) { + return HypervisorType.Any; + } else { + return HypervisorType.None; + } + } } + } diff --git a/api/src/com/cloud/hypervisor/HypervisorCapabilities.java b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java new file mode 100644 index 00000000000..6623740d2a7 --- /dev/null +++ b/api/src/com/cloud/hypervisor/HypervisorCapabilities.java @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.hypervisor; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; + + +/** + * HypervisorCapability represents one particular hypervisor version's capabilities. + */ +public interface HypervisorCapabilities { + /** + * @return id of the host. + */ + long getId(); + + /** + * @return type of hypervisor + */ + HypervisorType getHypervisorType(); + + + String getHypervisorVersion(); + + boolean isSecurityGroupEnabled(); + + /** + * @return the maxGuestslimit + */ + Long getMaxGuestsLimit(); + + +} diff --git a/api/src/com/cloud/network/Network.java b/api/src/com/cloud/network/Network.java index bf88bfe65f3..3495a3e5c23 100644 --- a/api/src/com/cloud/network/Network.java +++ b/api/src/com/cloud/network/Network.java @@ -91,6 +91,7 @@ public interface Network extends ControlledEntity { public static final Provider DhcpServer = new Provider("DhcpServer"); public static final Provider JuniperSRX = new Provider("JuniperSRX"); public static final Provider F5BigIp = new Provider("F5BigIp"); + public static final Provider NetscalerMPX = new Provider("NetscalerMPX"); public static final Provider ExternalDhcpServer = new Provider("ExternalDhcpServer"); public static final Provider ExternalGateWay = new Provider("ExternalGateWay"); public static final Provider ElasticLoadBalancerVm = new Provider("ElasticLoadBalancerVm"); diff --git a/api/src/com/cloud/server/ManagementService.java b/api/src/com/cloud/server/ManagementService.java index b0ec7b3e62c..123f56597a2 100755 --- a/api/src/com/cloud/server/ManagementService.java +++ b/api/src/com/cloud/server/ManagementService.java @@ -89,6 +89,8 @@ import com.cloud.exception.InternalErrorException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.Host; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorCapabilities; import com.cloud.network.IpAddress; import com.cloud.network.router.VirtualRouter; import com.cloud.offering.DiskOffering; @@ -307,7 +309,7 @@ public interface ManagementService { * @return List of capacities */ List listCapacityByType(ListCapacityByTypeCmd cmd); - + /** * List the permissions on a template. This will return a list of account names that have been granted permission to launch * instances from the template. @@ -508,5 +510,9 @@ public interface ManagementService { Pair, List> listHostsForMigrationOfVM(UserVm vm, Long startIndex, Long pageSize); String[] listEventTypes(); - + + List listHypervisorCapabilities(Long id, HypervisorType hypervisorType, Long startIndex, Long pageSizeVal); + + HypervisorCapabilities updateHypervisorCapabilities(Long id, Long maxGuestsLimit, Boolean securityGroupEnabled); + } diff --git a/api/src/com/cloud/storage/Snapshot.java b/api/src/com/cloud/storage/Snapshot.java index 453fa57abbc..d9c190e1d75 100644 --- a/api/src/com/cloud/storage/Snapshot.java +++ b/api/src/com/cloud/storage/Snapshot.java @@ -20,9 +20,10 @@ package com.cloud.storage; import java.util.Date; +import com.cloud.acl.ControlledEntity; import com.cloud.hypervisor.Hypervisor.HypervisorType; -public interface Snapshot { +public interface Snapshot extends ControlledEntity{ public enum Type { MANUAL, RECURRING, diff --git a/api/src/com/cloud/vm/VirtualMachine.java b/api/src/com/cloud/vm/VirtualMachine.java index a763acc966b..0920b0b1f7f 100755 --- a/api/src/com/cloud/vm/VirtualMachine.java +++ b/api/src/com/cloud/vm/VirtualMachine.java @@ -32,8 +32,6 @@ import com.cloud.utils.fsm.StateObject; */ public interface VirtualMachine extends RunningOn, ControlledEntity, StateObject { - public static final String PARAM_KEY_KEYBOARD = "keyboard"; - public enum State { Starting(true, "VM is being started. At this state, you should find host id filled which means it's being started on that host."), Running(false, "VM is running. host id has the host that it is running on."), diff --git a/build.xml b/build.xml index f1f4b7fb1ac..c6712e42be3 100755 --- a/build.xml +++ b/build.xml @@ -25,4 +25,5 @@ + diff --git a/build/build-cloud.xml b/build/build-cloud.xml index 1e3c64f10e5..489b4c48089 100755 --- a/build/build-cloud.xml +++ b/build/build-cloud.xml @@ -13,6 +13,7 @@ --> + @@ -62,6 +63,7 @@ + @@ -145,17 +147,27 @@ + + + + + + + + + + diff --git a/build/build-tests.xml b/build/build-tests.xml index 289394c936a..400465dde51 100755 --- a/build/build-tests.xml +++ b/build/build-tests.xml @@ -25,14 +25,14 @@ - + - + diff --git a/build/build-usage.xml b/build/build-usage.xml new file mode 100644 index 00000000000..eaba1d7f057 --- /dev/null +++ b/build/build-usage.xml @@ -0,0 +1,41 @@ + + + + + Cloud Stack Usage server build + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/build/developer.xml b/build/developer.xml index 2ace5c0a1c0..52c6866869c 100755 --- a/build/developer.xml +++ b/build/developer.xml @@ -20,6 +20,9 @@ + + + @@ -143,8 +146,16 @@ - - + + + + + + + + + + diff --git a/build/package.xml b/build/package.xml index 8f735c01324..175e47b61a4 100755 --- a/build/package.xml +++ b/build/package.xml @@ -189,12 +189,12 @@ - + - - + + diff --git a/client/.project b/client/.project index f3ae78416f1..d11f22afe31 100644 --- a/client/.project +++ b/client/.project @@ -5,6 +5,11 @@ + + org.python.pydev.PyDevBuilder + + + org.eclipse.jdt.core.javabuilder @@ -13,5 +18,6 @@ org.eclipse.jdt.core.javanature + org.python.pydev.pythonNature diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index 21cd03bbb06..336c571d4c8 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -4,6 +4,8 @@ #Labels +label.ocfs2=OCFS2 + label.action.edit.host=Edit Host network.rate=Network Rate @@ -66,7 +68,6 @@ label.PING.CIFS.username=PING CIFS username label.PING.CIFS.password=PING CIFS password label.CPU.cap=CPU Cap -label.network.domain=Network Domain label.action.enable.zone=Enable Zone label.action.enable.zone.processing=Enabling Zone.... @@ -387,7 +388,6 @@ label.full=Full label.gateway=Gateway label.general.alerts=General Alerts label.generating.url=Generating URL -label.generating.url=Generating URL label.go.step.2=Go to Step 2 label.go.step.3=Go to Step 3 label.go.step.4=Go to Step 4 @@ -563,7 +563,6 @@ label.private.ips=Private IP Addresses label.private.port=Private Port label.private.zone=Private Zone label.protocol=Protocol -label.protocol=Protocol label.public.interface=Public Interface label.public.ip=Public IP Address label.public.ips=Public IP Addresses @@ -635,7 +634,6 @@ label.succeeded=Succeeded label.sunday=Sunday label.system.capacity=System Wide Capacity label.system.vm.type=System VM Type -label.system.vm.type=System VM Type label.system.vm=System VM label.system.vms=System VMs label.tagged=Tagged @@ -738,7 +736,6 @@ message.action.destroy.instance=Please confirm that you want to destroy this ins message.action.destroy.systemvm=Please confirm that you want to destroy this System VM. message.action.disable.static.NAT=Please confirm that you want to disable static NAT. message.action.enable.maintenance=Your host has been successfully prepared for maintenance. This process can take up to several minutes or longer depending on how many VMs are currently on this host. -message.action.force.reconnect=Please confirm that you want to force a reconnection for this host. message.action.force.reconnect=Your host has been successfully forced to reconnect. This process can take up to several minutes. message.action.host.enable.maintenance.mode=Enabling maintenance mode will cause a live migration of all running instances on this host to any available host. message.action.instance.reset.password=Please confirm that you want to change the ROOT password for this virtual machine. @@ -842,4 +839,4 @@ error.login=Your username/password does not match our records. error.menu.select=Unable to perform action due to no items being selected. error.mgmt.server.inaccessible=The Management Server is unaccessible. Please try again later. error.session.expired=Your session has expired. -error.unresolved.internet.name=Your internet name cannot be resolved. \ No newline at end of file +error.unresolved.internet.name=Your internet name cannot be resolved. diff --git a/client/WEB-INF/classes/resources/messages_es.properties b/client/WEB-INF/classes/resources/messages_es.properties index 30f0c3a88c8..de8a434065a 100644 --- a/client/WEB-INF/classes/resources/messages_es.properties +++ b/client/WEB-INF/classes/resources/messages_es.properties @@ -4,6 +4,8 @@ #Labels +label.ocfs2=OCFS2 + label.action.edit.host=edición Anfitrión network.rate=Tasa de red diff --git a/client/WEB-INF/classes/resources/messages_ja.properties b/client/WEB-INF/classes/resources/messages_ja.properties index 8e2a5105806..da6f133db5b 100644 --- a/client/WEB-INF/classes/resources/messages_ja.properties +++ b/client/WEB-INF/classes/resources/messages_ja.properties @@ -4,6 +4,8 @@ #Labels +label.ocfs2=OCFS2 + label.action.edit.host=ホストを編集する network.rate=ネットワーク速度 diff --git a/client/WEB-INF/classes/resources/messages_zh_CN.properties b/client/WEB-INF/classes/resources/messages_zh_CN.properties index 8de433f026f..c18156aa439 100644 --- a/client/WEB-INF/classes/resources/messages_zh_CN.properties +++ b/client/WEB-INF/classes/resources/messages_zh_CN.properties @@ -4,6 +4,8 @@ #Labels +label.ocfs2=OCFS2 + label.action.edit.host=编辑主机 network.rate=网络速率 diff --git a/client/bindir/cloud-setup-management.in b/client/bindir/cloud-setup-management.in index 29126e34df9..c82f7407318 100755 --- a/client/bindir/cloud-setup-management.in +++ b/client/bindir/cloud-setup-management.in @@ -4,10 +4,17 @@ from cloudutils.utilities import initLoging from cloudutils.cloudException import CloudRuntimeException, CloudInternalException from cloudutils.globalEnv import globalEnv from cloudutils.serviceConfigServer import cloudManagementConfig +from optparse import OptionParser if __name__ == '__main__': initLoging("/var/log/cloud/setupManagement.log") glbEnv = globalEnv() + parser = OptionParser() + parser.add_option("--https", action="store_true", dest="https", help="Enable HTTPs connection of management server") + (options, args) = parser.parse_args() + if options.https: + glbEnv.svrMode = "HttpsServer" + glbEnv.mode = "Server" print "Starting to configure CloudStack Management Server:" diff --git a/client/tomcatconf/cloudmanagementserver.keystore b/client/tomcatconf/cloudmanagementserver.keystore new file mode 100644 index 00000000000..3ee4d13565a Binary files /dev/null and b/client/tomcatconf/cloudmanagementserver.keystore differ diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 65295ce7558..73280121dec 100755 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -274,4 +274,7 @@ listProjects=com.cloud.api.commands.ListProjectsCmd;15 createFirewallRule=com.cloud.api.commands.CreateFirewallRuleCmd;15 deleteFirewallRule=com.cloud.api.commands.DeleteFirewallRuleCmd;15 listFirewallRules=com.cloud.api.commands.ListFirewallRulesCmd;15 - + +#### hypervisor capabilities commands +updateHypervisorCapabilities=com.cloud.api.commands.UpdateHypervisorCapabilitiesCmd;1 +listHypervisorCapabilities=com.cloud.api.commands.ListHypervisorCapabilitiesCmd;1 \ No newline at end of file diff --git a/client/tomcatconf/components-premium.xml.in b/client/tomcatconf/components-premium.xml.in index 82a903864c4..3d73ed27c10 100755 --- a/client/tomcatconf/components-premium.xml.in +++ b/client/tomcatconf/components-premium.xml.in @@ -31,7 +31,8 @@ - + + diff --git a/client/tomcatconf/db.properties.in b/client/tomcatconf/db.properties.in index c944023d7b3..f3a01a9835b 100644 --- a/client/tomcatconf/db.properties.in +++ b/client/tomcatconf/db.properties.in @@ -26,6 +26,14 @@ db.cloud.logAbandoned=true db.cloud.poolPreparedStatements=false db.cloud.url.params=prepStmtCacheSize=517&cachePrepStmts=true +# Cloud.com database SSL settings +db.cloud.useSSL=false +db.cloud.keyStore= +db.cloud.keyStorePassword= +db.cloud.trustStore= +db.cloud.trustStorePassword= + + # usage database settings db.usage.username=@DBUSER@ db.usage.password=@DBPW@ diff --git a/client/tomcatconf/server.xml.in b/client/tomcatconf/server-nonssl.xml similarity index 100% rename from client/tomcatconf/server.xml.in rename to client/tomcatconf/server-nonssl.xml diff --git a/client/tomcatconf/server-ssl.xml.in b/client/tomcatconf/server-ssl.xml.in new file mode 100755 index 00000000000..cdb9267b811 --- /dev/null +++ b/client/tomcatconf/server-ssl.xml.in @@ -0,0 +1,157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/client/tomcatconf/tomcat6.conf.in b/client/tomcatconf/tomcat6-nonssl.conf.in similarity index 100% rename from client/tomcatconf/tomcat6.conf.in rename to client/tomcatconf/tomcat6-nonssl.conf.in diff --git a/client/tomcatconf/tomcat6-ssl.conf.in b/client/tomcatconf/tomcat6-ssl.conf.in new file mode 100644 index 00000000000..da03a7d18b3 --- /dev/null +++ b/client/tomcatconf/tomcat6-ssl.conf.in @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +# System-wide configuration file for tomcat6 services +# This will be sourced by tomcat6 and any secondary service +# Values will be overridden by service-specific configuration +# files in /etc/sysconfig +# +# Use this one to change default values for all services +# Change the service specific ones to affect only one service +# (see, for instance, /etc/sysconfig/tomcat6) +# + +# Where your java installation lives +#JAVA_HOME="/usr/lib/jvm/java" + +# Where your tomcat installation lives +CATALINA_BASE="@MSENVIRON@" +CATALINA_HOME="@MSENVIRON@" +JASPER_HOME="@MSENVIRON@" +CATALINA_TMPDIR="@MSENVIRON@/temp" + +# You can pass some parameters to java here if you wish to +#JAVA_OPTS="-Xminf0.1 -Xmaxf0.3" + +# Use JAVA_OPTS to set java.library.path for libtcnative.so +#JAVA_OPTS="-Djava.library.path=/usr/lib64" +JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloud/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=128M" + +# What user should run tomcat +TOMCAT_USER="@MSUSER@" +# Do not remove the following line +TOMCAT6_USER="$TOMCAT_USER" + +TOMCAT_LOG="@MSLOGDIR@/catalina.out" + +# You can change your tomcat locale here +#LANG="en_US" + +# Run tomcat under the Java Security Manager +SECURITY_MANAGER="false" + +# Time to wait in seconds, before killing process +SHUTDOWN_WAIT="30" + +# Whether to annoy the user with "attempting to shut down" messages or not +SHUTDOWN_VERBOSE="false" + +# Set the TOMCAT_PID location +CATALINA_PID="@PIDDIR@/@PACKAGE@-management.pid" + +# Connector port is 8080 for this tomcat6 instance +#CONNECTOR_PORT="8080" + +# We pick up the classpath in the next line + +dummy=1 ; . @MSCONF@/classpath.conf diff --git a/cloud.spec b/cloud.spec index 1c45fa7a58d..c996950b04c 100644 --- a/cloud.spec +++ b/cloud.spec @@ -108,7 +108,6 @@ CloudStack uses. Summary: Cloud.com library dependencies Requires: java >= 1.6.0 Obsoletes: vmops-deps < %{version}-%{release} -Obsoletes: cloud-premium-deps < %{version}-%{release} Group: System Environment/Libraries %description deps This package contains a number of third-party dependencies @@ -228,6 +227,7 @@ Group: System Environment/Libraries %if 0%{?rhel} >= 6 Requires: cloud-kvm +Requires: cloud-qemu-img %else Requires: kvm %endif @@ -237,10 +237,6 @@ Requires: cloud-qemu-kvm Requires: cloud-qemu-img %endif -%if 0%{?rhel} >= 6 -Requires: cloud-qemu-img -%endif - %if 0%{?rhel} >= 5 Requires: qemu-img %endif @@ -311,24 +307,19 @@ The Cloud.com test package contains a suite of automated tests that the very much appreciated QA team at Cloud.com constantly uses to help increase the quality of the Cloud.com Stack. +%package usage +Summary: Cloud.com usage monitor +Obsoletes: vmops-usage < %{version}-%{release} +Requires: java >= 1.6.0 +Requires: %{name}-utils = %{version}, %{name}-core = %{version}, %{name}-deps = %{version}, %{name}-server = %{version}, %{name}-daemonize = %{version} +Requires: %{name}-setup = %{version} +Requires: %{name}-client = %{version} +License: CSL 1.1 +Group: System Environment/Libraries +%description usage +The Cloud.com usage monitor provides usage accounting across the entire cloud for +cloud operators to charge based on usage parameters. -#%if %{_premium} -# -# -#%package usage -#Summary: Cloud.com usage monitor -#Obsoletes: vmops-usage < %{version}-%{release} -#Requires: java >= 1.6.0 -#Requires: %{name}-utils = %{version}, %{name}-core = %{version}, %{name}-deps = %{version}, %{name}-server = %{version}, %{name}-premium = %{version}, %{name}-daemonize = %{version} -#Requires: %{name}-setup = %{version} -#Requires: %{name}-client = %{version} -#License: CSL 1.1 -#Group: System Environment/Libraries -#%description usage -#The Cloud.com usage monitor provides usage accounting across the entire cloud for -#cloud operators to charge based on usage parameters. -# -#%endif %prep @@ -390,36 +381,29 @@ if [ "$1" == "1" ] ; then /sbin/chkconfig --level 345 %{name}-management on > /dev/null 2>&1 || true fi +%preun usage +if [ "$1" == "0" ] ; then + /sbin/chkconfig --del %{name}-usage > /dev/null 2>&1 || true + /sbin/service %{name}-usage stop > /dev/null 2>&1 || true +fi +%pre usage +id %{name} > /dev/null 2>&1 || /usr/sbin/useradd -M -c "Cloud.com unprivileged user" \ + -r -s /bin/sh -d %{_sharedstatedir}/%{name}/management %{name}|| true +# user harcoded here, also hardcoded on wscript -#%if %{_premium} -# -#%preun usage -#if [ "$1" == "0" ] ; then -# /sbin/chkconfig --del %{name}-usage > /dev/null 2>&1 || true -# /sbin/service %{name}-usage stop > /dev/null 2>&1 || true -#fi -# -#%pre usage -#id %{name} > /dev/null 2>&1 || /usr/sbin/useradd -M -c "Cloud.com unprivileged user" \ -# -r -s /bin/sh -d %{_sharedstatedir}/%{name}/management %{name}|| true -## user harcoded here, also hardcoded on wscript -# -#%post usage -#if [ "$1" == "1" ] ; then -# /sbin/chkconfig --add %{name}-usage > /dev/null 2>&1 || true -# /sbin/chkconfig --level 345 %{name}-usage on > /dev/null 2>&1 || true -#else -# /sbin/service %{name}-usage condrestart >/dev/null 2>&1 || true -#fi -# -#%endif +%post usage +if [ "$1" == "1" ] ; then + /sbin/chkconfig --add %{name}-usage > /dev/null 2>&1 || true + /sbin/chkconfig --level 345 %{name}-usage on > /dev/null 2>&1 || true +else + /sbin/service %{name}-usage condrestart >/dev/null 2>&1 || true +fi %pre agent-scripts id %{name} > /dev/null 2>&1 || /usr/sbin/useradd -M -c "Cloud.com unprivileged user" \ -r -s /bin/sh -d %{_sharedstatedir}/%{name}/management %{name}|| true - %preun agent if [ "$1" == "0" ] ; then /sbin/chkconfig --del %{name}-agent > /dev/null 2>&1 || true @@ -484,12 +468,15 @@ fi %files deps %defattr(0644,root,root,0755) -%{_javadir}/%{name}-commons-codec-1.4.jar +%{_javadir}/%{name}-commons-codec-1.5.jar +%{_javadir}/%{name}-commons-dbcp-1.4.jar +%{_javadir}/%{name}-commons-pool-1.5.6.jar +%{_javadir}/%{name}-google-gson-1.7.1.jar +%{_javadir}/%{name}-netscaler.jar %{_javadir}/%{name}-log4j-extras.jar %{_javadir}/%{name}-backport-util-concurrent-3.0.jar %{_javadir}/%{name}-ehcache.jar %{_javadir}/%{name}-email.jar -%{_javadir}/%{name}-gson.jar %{_javadir}/%{name}-httpcore-4.0.jar %{_javadir}/%{name}-libvirt-0.4.5.jar %{_javadir}/%{name}-log4j.jar @@ -512,8 +499,6 @@ fi %{_javadir}/vmware*.jar %{_javadir}/%{name}-jnetpcap.jar %{_javadir}/%{name}-junit.jar -%{_javadir}/%{name}-selenium-java-client-driver.jar -%{_javadir}/%{name}-selenium-server.jar %files core @@ -612,20 +597,15 @@ fi %{_libdir}/%{name}/test/* %config(noreplace) %{_sysconfdir}/%{name}/test/* -#%if %{_premium} -# -# -#%files usage -#%defattr(0644,root,root,0775) +%files usage +%defattr(0644,root,root,0775) #%{_javadir}/%{name}-usage.jar -#%attr(0755,root,root) %{_initrddir}/%{name}-usage -#%attr(0755,root,root) %{_libexecdir}/usage-runner +%attr(0755,root,root) %{_initrddir}/%{name}-usage +%attr(0755,root,root) %{_libexecdir}/usage-runner #%dir %attr(0770,root,%{name}) %{_localstatedir}/log/%{name}/usage #%{_sysconfdir}/%{name}/usage/usage-components.xml #%config(noreplace) %{_sysconfdir}/%{name}/usage/log4j-%{name}_usage.xml #%config(noreplace) %attr(0640,root,%{name}) %{_sysconfdir}/%{name}/usage/db.properties -# -#%endif %changelog * Mon May 3 2010 Manuel Amador (Rudd-O) 1.9.12 diff --git a/console-proxy/js/ajaxviewer.js b/console-proxy/js/ajaxviewer.js index fabd50f5e2e..458091c402f 100644 --- a/console-proxy/js/ajaxviewer.js +++ b/console-proxy/js/ajaxviewer.js @@ -198,12 +198,14 @@ KeyboardMapper.prototype = { ///////////////////////////////////////////////////////////////////////////// // class AjaxViewer // -function AjaxViewer(panelId, imageUrl, updateUrl, tileMap, width, height, tileWidth, tileHeight, rawKeyboard) { +function AjaxViewer(panelId, imageUrl, updateUrl, tileMap, width, height, tileWidth, tileHeight, rawKeyboard, linuxGuest) { // logging is disabled by default so that it won't have negative impact on performance // however, a back door key-sequence can trigger to open the logger window, it is designed to help // trouble-shooting g_logger = new Logger(); - g_logger.enable(false); + g_logger.enable(true); + // g_logger.open(); + // g_logger.log(Logger.LEVEL_INFO, 'rawKeyboard: ' + rawKeyboard); var ajaxViewer = this; this.rawKeyboard = rawKeyboard; @@ -226,6 +228,8 @@ function AjaxViewer(panelId, imageUrl, updateUrl, tileMap, width, height, tileWi this.currentKeyboard = 0; this.keyboardMappers = []; + + this.linuxGuest = linuxGuest; this.timer = 0; this.eventQueue = []; @@ -620,21 +624,110 @@ AjaxViewer.prototype = { var charCodeMap = []; var shiftedCharCodeMap = []; - keyCodeMap[106] = { code: 222, shift : 1 }; // JP NUM * - charCodeMap[42] = { code: 34, shift : 1 }; + if(this.linuxGuest) { + // for LINUX guest OSes + + shiftedKeyCodeMap[50] = { code: 222, shift: 1 } ; // JP SHIFT + 2 -> " + shiftedCharCodeMap[64] = { code: 34, shift: 1 }; + + shiftedKeyCodeMap[54] = { code: 55, shift : 1 }; // JP SHIFT + 6 -> & + shiftedCharCodeMap[94] = { code: 38, shift : 1 }; + + shiftedKeyCodeMap[55] = { code: 222, shift : 0 }; // JP SHIFT + 7 -> ' + shiftedCharCodeMap[38] = { code: 39, shift : 1 }; + + shiftedKeyCodeMap[56] = { code: 57, shift : 1 }; // JP SHIFT + 8 -> ( + shiftedCharCodeMap[42] = { code: 40, shift : 1 }; + + shiftedKeyCodeMap[57] = { code: 48, shift : 1 }; // JP SHIFT + 9 -> ) + shiftedCharCodeMap[40] = { code: 41, shift : 1 }; - keyCodeMap[107] = { code: 59, shift : 1 }; // JP NUM + - charCodeMap[43] = { code: 42, shift : 1 }; - - keyCodeMap[110] = { code: 190, shift : 0 }; // JP NUM . - charCodeMap[46] = { code: 46, shift : 0 }; + shiftedKeyCodeMap[48] = { code: 192, shift : 1 }; // JP SHIFT + 0 -> ~ + shiftedCharCodeMap[41] = { code: 126, shift : 1 }; - keyCodeMap[193] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard - shiftedKeyCodeMap[193] = { code: 189, shift: 1, charCode: 64 }; - - keyCodeMap[255] = { code: 220, shift : 0, charCode: 92 }; // JP Japanese Yen mark on JP keyboard - shiftedKeyCodeMap[255] = { code: 220, shift: 1, charCode: 95 }; - + shiftedKeyCodeMap[109] = { code: 107, shift : 1 }; // JP SHIFT + (-=), keycode/charcode(109, 95) from Firefox + shiftedCharCodeMap[95] = { code: 61, shift : 0 }; + + shiftedKeyCodeMap[189] = { code: 107, shift : 1 }; // JP SHIFT + (-=), keycode/charcode(109, 95) from Chrome/Safari/MSIE + shiftedCharCodeMap[95] = { code: 61, shift : 0 }; + + shiftedKeyCodeMap[222] = { code: 192, shift : 1 }; // JP SHIFT + (~^) + shiftedCharCodeMap[126] = { code: 126, shift : 1 }; + + if($.browser.mozilla) { + keyCodeMap[107] = { code: 107, shift : 1, defer : true }; // JP NUM +, keycode/charcode (107, 43) from Firefox + charCodeMap[43] = { code: 43, shift : 1, keyCode: 43 }; + charCodeMap[61] = { code: 94, shift : 0, keyCode: 94 }; // JP (~^), keycode/charcode (107, 61) from Firefox + + shiftedKeyCodeMap[107] = { code: 192, shift : 1 }; // JP SHIFT + (!^) + shiftedCharCodeMap[43] = { code: 126, shift : 1 }; + } else { + keyCodeMap[187] = { code: 54, shift: 1, defer: true }; // JP ~^ + charCodeMap[61] = { code: 94, shift: 0, keyCode: 94 }; + + shiftedKeyCodeMap[187] = { code: 192, shift : 1 }; // JP SHIFT + (~^) + shiftedCharCodeMap[43] = { code: 126, shift : 1 }; + + keyCodeMap[107] = { code: 107, shift : 0, defer: true }; // JP NUM +, keycode/charcode(107, 43) + charCodeMap[43] = { code: 43, shift : 1, keyCode: 43 }; + } + + shiftedKeyCodeMap[255] = { code: 220, shift : 1, charCode: 124 }; // JP (|-, key before backspace), Japanese Yen mark + + keyCodeMap[219] = { code: 192, shift : 0 }; // JP @` + charCodeMap[91] = { code: 96, shift : 0 }; + shiftedKeyCodeMap[219] = { code: 50, shift : 1 }; // JP SHIFT + (@`) + shiftedCharCodeMap[123] = { code: 64, shift : 1 }; + + keyCodeMap[221] = { code: 219, shift : 0 }; // JP [{ + charCodeMap[93] = { code: 91, shift : 0 }; + shiftedKeyCodeMap[221] = { code: 219, shift : 1 }; + shiftedCharCodeMap[125] = { code: 123, shift : 1 }; + + if($.browser.mozilla) { + shiftedKeyCodeMap[59] = { code: 107, shift : 1, defer: true }; // JP ;+ + shiftedCharCodeMap[58] = { code: 43, shift : 1, keyCode: 43 }; + } else { + shiftedKeyCodeMap[186] = { code: 107, shift : 1, defer: true }; // JP ;+ + shiftedCharCodeMap[58] = { code: 43, shift : 1, keyCode: 43 }; + } + + keyCodeMap[222] = { code: 59, shift : 0, defer : true }; // JP :* + charCodeMap[39] = { code: 59, shift : 0, keyCode: 58 }; + shiftedKeyCodeMap[222] = { code: 56, shift : 1 }; + shiftedCharCodeMap[34] = { code: 42, shift : 1 }; + + keyCodeMap[220] = { code: 221, shift : 0 }; // JP ]} + charCodeMap[92] = { code: 93, shift : 0 }; + shiftedKeyCodeMap[220] = { code: 221, shift : 1 }; + shiftedCharCodeMap[124] = { code: 125, shift : 1 }; + + keyCodeMap[106] = { code: 222, shift : 1, defer: true }; // JP NUM * + charCodeMap[42] = { code: 42, shift : 1, keyCode: 42 }; + + keyCodeMap[110] = { code: 190, shift : 0 }; // JP NUM . + charCodeMap[46] = { code: 46, shift : 0 }; + + keyCodeMap[193] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard + shiftedKeyCodeMap[193] = { code: 189, shift: 1, charCode: 64 }; + + keyCodeMap[255] = { code: 220, shift : 0, charCode: 92 }; // JP Japanese Yen mark on JP keyboard + shiftedKeyCodeMap[255] = { code: 220, shift: 1, charCode: 95 }; + + } else { + // for windows guest OSes + keyCodeMap[106] = { code: 222, shift : 1 }; // JP NUM * + charCodeMap[42] = { code: 34, shift : 1 }; + + keyCodeMap[110] = { code: 190, shift : 0 }; // JP NUM . + charCodeMap[46] = { code: 46, shift : 0 }; + + keyCodeMap[193] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard + shiftedKeyCodeMap[193] = { code: 189, shift: 1, charCode: 64 }; + + keyCodeMap[255] = { code: 220, shift : 0, charCode: 92 }; // JP Japanese Yen mark on JP keyboard + shiftedKeyCodeMap[255] = { code: 220, shift: 1, charCode: 95 }; + } this.keyboardMappers[AjaxViewer.KEYBOARD_TYPE_JAPAN_EN_OS_TO_JP_VM] = new KeyboardMapper(false, keyCodeMap, shiftedKeyCodeMap, charCodeMap, shiftedCharCodeMap); }, @@ -645,76 +738,136 @@ AjaxViewer.prototype = { var charCodeMap = []; var shiftedCharCodeMap = []; - shiftedKeyCodeMap[50] = { code: 50, shift: 1, defer: true }; // JP SHIFT + 2 -> " - shiftedCharCodeMap[34] = { code: 0, shift : 1, keyCode: 50 }; - - shiftedKeyCodeMap[55] = { code: 222, shift : 0, defer:true }; // JP SHIFT + 7 -> ' - shiftedCharCodeMap[39] = { code: 0, shift : 1, keyCode: 55 }; - - keyCodeMap[222] = { code: 107, shift: 0 }; // JP ~^ - charCodeMap[94] = { code: 59, shift: 0 }; - - shiftedKeyCodeMap[222] = { code: 107, shift : 1 }; // JP SHIFT + (~^) - shiftedCharCodeMap[126] = { code: 43, shift : 1 }; - - keyCodeMap[192] = { code: 219, shift : 0 }; // JP @` - charCodeMap[64] = { code: 91, shift : 0 }; - shiftedKeyCodeMap[192] = { code: 219, shift : 1 }; // JP SHIFT + (@`) - shiftedCharCodeMap[96] = { code: 123, shift : 1 }; - - keyCodeMap[219] = { code: 221, shift : 0 }; // JP [{ - charCodeMap[91] = { code: 93, shift : 0 }; - shiftedKeyCodeMap[219] = { code: 221, shift : 1 }; - shiftedCharCodeMap[123] = { code: 125, shift : 1 }; - - if($.browser.mozilla) { - // Note, keycode 107 is duplicated with "+" key at NUM pad - keyCodeMap[107] = { code: 59, shift : 0, defer: true }; // JP ;+ - charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; - shiftedKeyCodeMap[107] = { code: 59, shift : 1 }; - shiftedCharCodeMap[43] = { code: 42, shift : 1 }; - - // keyCodeMap[107] = { code: 59, shift : 1 }; // JP NUM + - charCodeMap[43] = { code: 42, shift : 1, keyCode: 59 }; - } else { - keyCodeMap[187] = { code: 59, shift : 0, defer: true }; // JP ;+ - charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; - shiftedKeyCodeMap[187] = { code: 59, shift : 1 }; - shiftedCharCodeMap[43] = { code: 42, shift : 1 }; - - keyCodeMap[107] = { code: 59, shift : 1 }; // JP NUM + - charCodeMap[43] = { code: 42, shift : 1 }; - } - - if($.browser.mozilla) { - keyCodeMap[59] = { code: 222, shift : 0 }; // JP :* - charCodeMap[58] = { code: 39, shift : 0 }; - shiftedKeyCodeMap[59] = { code: 222, shift : 1 }; - shiftedCharCodeMap[42] = { code: 34, shift : 1 }; - } else { - keyCodeMap[186] = { code: 222, shift : 0 }; // JP :* - charCodeMap[58] = { code: 39, shift : 0 }; - shiftedKeyCodeMap[186] = { code: 222, shift : 1 }; - shiftedCharCodeMap[42] = { code: 34, shift : 1 }; - } - - keyCodeMap[221] = { code: 220, shift : 0 }; // JP ]} - charCodeMap[93] = { code: 92, shift : 0 }; - shiftedKeyCodeMap[221] = { code: 220, shift : 1 }; - shiftedCharCodeMap[125] = { code: 124, shift : 1 }; - - keyCodeMap[106] = { code: 222, shift : 1 }; // JP NUM * - charCodeMap[42] = { code: 34, shift : 1 }; - - keyCodeMap[110] = { code: 190, shift : 0 }; // JP NUM . - charCodeMap[46] = { code: 46, shift : 0 }; + if(this.linuxGuest) { + shiftedKeyCodeMap[50] = { code: 50, shift: 1, defer: true }; // JP SHIFT + 2 -> " + shiftedCharCodeMap[34] = { code: 34, shift : 1, keyCode: 34 }; - keyCodeMap[193] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard - shiftedKeyCodeMap[193] = { code: 189, shift: 1, charCode: 64 }; - - keyCodeMap[255] = { code: 220, shift : 0, charCode: 92 }; // JP Japanese Yen mark on JP keyboard - shiftedKeyCodeMap[255] = { code: 220, shift: 1, charCode: 95 }; + shiftedKeyCodeMap[54] = { code: 55, shift : 1 }; // JP SHIFT + 6 -> & + shiftedCharCodeMap[94] = { code: 38, shift : 1 }; + + shiftedKeyCodeMap[55] = { code: 222, shift : 0, defer:true }; // JP SHIFT + 7 -> ' + shiftedCharCodeMap[39] = { code: 39, shift : 1, keyCode: 39 }; + + shiftedKeyCodeMap[56] = { code: 57, shift : 1 }; // JP SHIFT + 8 -> ( + shiftedCharCodeMap[42] = { code: 40, shift : 1 }; + + shiftedKeyCodeMap[57] = { code: 48, shift : 1 }; // JP SHIFT + 9 -> ) + shiftedCharCodeMap[40] = { code: 41, shift : 1 }; + + shiftedKeyCodeMap[48] = { code: 192, shift : 1 }; // JP SHIFT + 0 -> ~ + shiftedCharCodeMap[41] = { code: 126, shift : 1 }; + + keyCodeMap[222] = { code: 107, shift: 0, defer: true }; // JP ~^ + charCodeMap[94] = { code: 94, shift: 0, keyCode: 94 }; + shiftedKeyCodeMap[222] = { code: 192, shift : 1, defer: true }; // JP SHIFT + (~^) + shiftedCharCodeMap[126] = { code: 126, shift : 1 }; + + shiftedKeyCodeMap[192] = { code: 50, shift : 1 }; // JP SHIFT + (@`) + shiftedCharCodeMap[96] = { code: 64, shift : 1 }; + + if($.browser.mozilla) { + shiftedKeyCodeMap[109] = { code: 107, shift : 1 }; // JP SHIFT + (-=), keycode/charcode(109, 95) from Firefox + + // Note, keycode 107 is duplicated with "+" key at NUM pad + keyCodeMap[107] = { code: 59, shift : 0, defer: true }; // JP ;+ + charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; + charCodeMap[43] = { code: 43, shift : 1, keyCode: 43 }; // JP NUM + + + shiftedKeyCodeMap[107] = { code: 59, shift : 0, defer: true }; // JP ;+ + shiftedCharCodeMap[43] = { code: 43, shift : 1, keyCode: 43 }; + + keyCodeMap[59] = { code: 59, shift : 0, defer : true }; // JP :* + charCodeMap[58] = { code: 58, shift : 0, keyCode: 58 }; + } else { + shiftedKeyCodeMap[189] = { code: 107, shift : 1 }; // JP SHIFT + (-=), keycode/charcode(109, 95) from Chrome/Safari/MSIE + shiftedCharCodeMap[95] = { code: 61, shift : 0 }; + + keyCodeMap[187] = { code: 59, shift : 0, defer: true }; // JP ;+ + charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; + shiftedKeyCodeMap[187] = { code: 59, shift : 1, defer: true }; + shiftedCharCodeMap[43] = { code: 43, shift : 1, keyCode: 43 }; + + keyCodeMap[107] = { code: 59, shift : 0, defer: true }; // JP NUM + + charCodeMap[43] = { code: 43, shift : 1, keyCode: 43}; + + keyCodeMap[186] = { code: 59, shift : 0, defer: true }; // JP :* + charCodeMap[58] = { code: 58, shift : 0, keyCode: 58 }; + } + + keyCodeMap[226] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard + shiftedKeyCodeMap[226] = { code: 189, shift: 1 }; + + } else { + // windows guest + shiftedKeyCodeMap[50] = { code: 50, shift: 1, defer: true }; // JP SHIFT + 2 -> " + shiftedCharCodeMap[34] = { code: 0, shift : 1, keyCode: 50 }; + + shiftedKeyCodeMap[55] = { code: 222, shift : 0, defer:true }; // JP SHIFT + 7 -> ' + shiftedCharCodeMap[39] = { code: 0, shift : 1, keyCode: 55 }; + + keyCodeMap[222] = { code: 107, shift: 0 }; // JP ~^ + charCodeMap[94] = { code: 59, shift: 0 }; + + shiftedKeyCodeMap[222] = { code: 107, shift : 1 }; // JP SHIFT + (~^) + shiftedCharCodeMap[126] = { code: 43, shift : 1 }; + + keyCodeMap[192] = { code: 219, shift : 0 }; // JP @` + charCodeMap[64] = { code: 91, shift : 0 }; + shiftedKeyCodeMap[192] = { code: 219, shift : 1 }; // JP SHIFT + (@`) + shiftedCharCodeMap[96] = { code: 123, shift : 1 }; + + keyCodeMap[219] = { code: 221, shift : 0 }; // JP [{ + charCodeMap[91] = { code: 93, shift : 0 }; + shiftedKeyCodeMap[219] = { code: 221, shift : 1 }; + shiftedCharCodeMap[123] = { code: 125, shift : 1 }; + + if($.browser.mozilla) { + // Note, keycode 107 is duplicated with "+" key at NUM pad + keyCodeMap[107] = { code: 59, shift : 0, defer: true }; // JP ;+ + charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; + shiftedKeyCodeMap[107] = { code: 59, shift : 0 }; + shiftedCharCodeMap[43] = { code: 42, shift : 0 }; + charCodeMap[43] = { code: 42, shift : 1, keyCode: 59 }; + } else { + keyCodeMap[187] = { code: 59, shift : 0, defer: true }; // JP ;+ + charCodeMap[59] = { code: 58, shift : 0, keyCode: 59 }; + shiftedKeyCodeMap[187] = { code: 59, shift : 1 }; + shiftedCharCodeMap[43] = { code: 42, shift : 1 }; + + keyCodeMap[107] = { code: 59, shift : 1 }; // JP NUM + + charCodeMap[43] = { code: 42, shift : 1 }; + } + + if($.browser.mozilla) { + keyCodeMap[59] = { code: 222, shift : 0 }; // JP :* + charCodeMap[58] = { code: 39, shift : 0 }; + shiftedKeyCodeMap[59] = { code: 222, shift : 1 }; + shiftedCharCodeMap[42] = { code: 34, shift : 1 }; + } else { + keyCodeMap[186] = { code: 222, shift : 0 }; // JP :* + charCodeMap[58] = { code: 39, shift : 0 }; + shiftedKeyCodeMap[186] = { code: 222, shift : 1 }; + shiftedCharCodeMap[42] = { code: 34, shift : 1 }; + } + + keyCodeMap[221] = { code: 220, shift : 0 }; // JP ]} + charCodeMap[93] = { code: 92, shift : 0 }; + shiftedKeyCodeMap[221] = { code: 220, shift : 1 }; + shiftedCharCodeMap[125] = { code: 124, shift : 1 }; + + keyCodeMap[106] = { code: 222, shift : 1 }; // JP NUM * + charCodeMap[42] = { code: 34, shift : 1 }; + + keyCodeMap[110] = { code: 190, shift : 0 }; // JP NUM . + charCodeMap[46] = { code: 46, shift : 0 }; + + keyCodeMap[193] = { code: 220, shift : 0, charCode: 92 }; // JP key left to right shift on JP keyboard + shiftedKeyCodeMap[193] = { code: 189, shift: 1, charCode: 64 }; + + keyCodeMap[255] = { code: 220, shift : 0, charCode: 92 }; // JP Japanese Yen mark on JP keyboard + shiftedKeyCodeMap[255] = { code: 220, shift: 1, charCode: 95 }; + } this.keyboardMappers[AjaxViewer.KEYBOARD_TYPE_JAPAN_JP_OS_TO_JP_VM] = new KeyboardMapper(false, keyCodeMap, shiftedKeyCodeMap, charCodeMap, shiftedCharCodeMap); }, diff --git a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java index bb1f1e21682..6bd73786701 100644 --- a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java +++ b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxHandler.java @@ -76,7 +76,8 @@ public class ConsoleProxyAjaxHandler implements HttpHandler { String tag = queryMap.get("tag"); String ticket = queryMap.get("ticket"); String ajaxSessionIdStr = queryMap.get("sess"); - String eventStr = queryMap.get("event"); + String eventStr = queryMap.get("event"); + if(tag == null) tag = ""; @@ -171,8 +172,9 @@ public class ConsoleProxyAjaxHandler implements HttpHandler { if(s_logger.isDebugEnabled()) s_logger.debug("Ajax request indicates a fresh client start"); - String title = queryMap.get("t"); - handleClientStart(t, viewer, title != null ? title : ""); + String title = queryMap.get("t"); + String guest = queryMap.get("guest"); + handleClientStart(t, viewer, title != null ? title : "", guest); } else { if(s_logger.isTraceEnabled()) @@ -392,9 +394,9 @@ public class ConsoleProxyAjaxHandler implements HttpHandler { } } - private void handleClientStart(HttpExchange t, ConsoleProxyViewer viewer, String title) throws IOException { + private void handleClientStart(HttpExchange t, ConsoleProxyViewer viewer, String title, String guest) throws IOException { List languages = t.getRequestHeaders().get("Accept-Language"); - String response = viewer.onAjaxClientStart(title, languages); + String response = viewer.onAjaxClientStart(title, languages, guest); Headers hds = t.getResponseHeaders(); hds.set("Content-Type", "text/html"); diff --git a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxKeyMapper.java b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxKeyMapper.java index 2f0b1a2a3c8..9911cc1a500 100644 --- a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxKeyMapper.java +++ b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyAjaxKeyMapper.java @@ -178,7 +178,7 @@ public class ConsoleProxyAjaxKeyMapper { js2javaCodeMap.put(new Integer(18), new Integer(0xffe9)); // for SHIFT transaction at proxy side - shiftedKeyCharMap = new HashMap(); + shiftedKeyCharMap = new HashMap(); shiftedKeyCharMap.put(new Integer('1'), new Integer('!')); shiftedKeyCharMap.put(new Integer('2'), new Integer('@')); shiftedKeyCharMap.put(new Integer('3'), new Integer('#')); @@ -190,7 +190,7 @@ public class ConsoleProxyAjaxKeyMapper { shiftedKeyCharMap.put(new Integer('9'), new Integer('(')); shiftedKeyCharMap.put(new Integer('0'), new Integer(')')); shiftedKeyCharMap.put(new Integer('-'), new Integer('_')); - shiftedKeyCharMap.put(new Integer('='), new Integer('+')); + //shiftedKeyCharMap.put(new Integer('='), new Integer('+')); shiftedKeyCharMap.put(new Integer('`'), new Integer('~')); shiftedKeyCharMap.put(new Integer('['), new Integer('{')); shiftedKeyCharMap.put(new Integer(']'), new Integer('}')); @@ -239,7 +239,7 @@ public class ConsoleProxyAjaxKeyMapper { return vkCode.intValue(); } - public int getJvmKeyCode(int jsKeyCode) { + public int getJvmKeyCode(int jsKeyCode) { Integer code = js2javaCodeMap.get(jsKeyCode); if(code != null) return code.intValue(); diff --git a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyViewer.java b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyViewer.java index d3d0153f365..c1c2211c06c 100644 --- a/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyViewer.java +++ b/console-proxy/src/com/cloud/consoleproxy/ConsoleProxyViewer.java @@ -924,7 +924,7 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro "

"; } - public String onAjaxClientStart(String title, List languages) { + public String onAjaxClientStart(String title, List languages, String guest) { if(!waitForViewerReady()) return onAjaxClientConnectFailed(); @@ -1005,11 +1005,11 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro */ return getAjaxViewerPageContent(sbTileSequence.toString(), imgUrl, updateUrl, width, height, tileWidth, tileHeight, title, - ConsoleProxy.keyboardType == ConsoleProxy.KEYBOARD_RAW, languages); + ConsoleProxy.keyboardType == ConsoleProxy.KEYBOARD_RAW, languages, guest); } private String getAjaxViewerPageContent(String tileSequence, String imgUrl, String updateUrl, int width, - int height, int tileWidth, int tileHeight, String title, boolean rawKeyboard, List languages) { + int height, int tileWidth, int tileHeight, String title, boolean rawKeyboard, List languages, String guest) { StringBuffer sbLanguages = new StringBuffer(""); if(languages != null) { @@ -1020,6 +1020,10 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro sbLanguages.append(lang); } } + + boolean linuxGuest = true; + if(guest != null && guest.equalsIgnoreCase("windows")) + linuxGuest = false; String[] content = new String[] { "", @@ -1066,7 +1070,8 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro "var acceptLanguages = '" + sbLanguages.toString() + "';", "var tileMap = [ " + tileSequence + " ];", "var ajaxViewer = new AjaxViewer('main_panel', '" + imgUrl + "', '" + updateUrl + "', tileMap, ", - String.valueOf(width) + ", " + String.valueOf(height) + ", " + String.valueOf(tileWidth) + ", " + String.valueOf(tileHeight) + ", " + (rawKeyboard ? "true" : "false") + ");", + String.valueOf(width) + ", " + String.valueOf(height) + ", " + String.valueOf(tileWidth) + ", " + String.valueOf(tileHeight) + + ", " + (rawKeyboard ? "true" : "false") + ", " + (linuxGuest ? "true" : "false") + ");", "$(function() {", "ajaxViewer.start();", @@ -1276,13 +1281,13 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro } } } - + public void sendClientRawKeyboardEvent(int event, int code, int modifiers) { code = ConsoleProxyAjaxKeyMapper.getInstance().getJvmKeyCode(code); switch(event) { case 4 : // Key press // - // special handling for ' and " (keycode: 222, char code : 39 and 34 + // special handling for ' and " (keycode: 222, char code : 39 and 34) // if(code == 39 || code == 34) { writeKeyboardEvent(KeyEvent.KEY_PRESSED, 222, (char)code, getAwtModifiers(modifiers)); @@ -1307,8 +1312,33 @@ public class ConsoleProxyViewer implements java.lang.Runnable, RfbViewer, RfbPro getAwtModifiers(modifiers)); break; } - } - + } + +/* + public void sendClientRawKeyboardEvent(int event, int code, int modifiers) { + // code = ConsoleProxyAjaxKeyMapper.getInstance().getJvmKeyCode(code); + switch(event) { + case 4 : // Key press + break; + + case 5 : // Key down + if((modifiers & ConsoleProxyViewer.CTRL_KEY_MASK) != 0 && (modifiers & ConsoleProxyViewer.ALT_KEY_MASK) != 0 && code == KeyEvent.VK_INSERT) { + code = KeyEvent.VK_DELETE; + } + + writeKeyboardEvent(KeyEvent.KEY_PRESSED, code, + (char)code, + getAwtModifiers(modifiers)); + break; + + case 6 : // Key Up + writeKeyboardEvent(KeyEvent.KEY_RELEASED, code, + (char)code, + getAwtModifiers(modifiers)); + break; + } + } +*/ public void sendClientKeyboardEvent(int event, int code, int modifiers) { int vkCode; switch(event) { diff --git a/core/.classpath b/core/.classpath index 75386efe374..7411e482344 100644 --- a/core/.classpath +++ b/core/.classpath @@ -3,9 +3,9 @@ + - diff --git a/core/src/com/cloud/host/HostVO.java b/core/src/com/cloud/host/HostVO.java index a4d124b1bb6..37593111571 100644 --- a/core/src/com/cloud/host/HostVO.java +++ b/core/src/com/cloud/host/HostVO.java @@ -50,15 +50,15 @@ public class HostVO implements Host { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") - private long id; + private long id; @Column(name="disconnected") @Temporal(value=TemporalType.TIMESTAMP) private Date disconnectedOn; - + @Column(name="name", nullable=false) - private String name = null; - + private String name = null; + /** * Note: There is no setter for status because it has to be set in the dao code. */ @@ -68,76 +68,78 @@ public class HostVO implements Host { @Column(name="type", updatable = true, nullable=false) @Enumerated(value=EnumType.STRING) private Type type; - + @Column(name="private_ip_address", nullable=false) - private String privateIpAddress; - + private String privateIpAddress; + @Column(name="private_mac_address", nullable=false) private String privateMacAddress; - + @Column(name="private_netmask", nullable=false) private String privateNetmask; - + @Column(name="public_netmask") private String publicNetmask; - + @Column(name="public_ip_address") private String publicIpAddress; - + @Column(name="public_mac_address") private String publicMacAddress; - + @Column(name="storage_ip_address") private String storageIpAddress; @Column(name="cluster_id") private Long clusterId; - + @Column(name="storage_netmask") private String storageNetmask; - + @Column(name="storage_mac_address") private String storageMacAddress; - + @Column(name="storage_ip_address_2") private String storageIpAddressDeux; - + @Column(name="storage_netmask_2") private String storageNetmaskDeux; - + @Column(name="storage_mac_address_2") private String storageMacAddressDeux; - + @Column(name="hypervisor_type", updatable = true, nullable=false) @Enumerated(value=EnumType.STRING) private HypervisorType hypervisorType; - + @Column(name="proxy_port") private Integer proxyPort; - + @Column(name="resource") private String resource; - + @Column(name="fs_type") private StoragePoolType fsType; - + @Column(name="available") private boolean available = true; - + @Column(name="setup") private boolean setup = false; - + @Column(name="allocation_state", nullable=false) @Enumerated(value=EnumType.STRING) private HostAllocationState hostAllocationState; + @Column(name="hypervisor_version") + private String hypervisorVersion; // This is a delayed load value. If the value is null, // then this field has not been loaded yet. // Call host dao to load it. @Transient Map details; - + // This is a delayed load value. If the value is null, // then this field has not been loaded yet. // Call host dao to load it. @@ -146,41 +148,41 @@ public class HostVO implements Host { @Override public String getStorageIpAddressDeux() { - return storageIpAddressDeux; - } + return storageIpAddressDeux; + } - public void setStorageIpAddressDeux(String deuxStorageIpAddress) { - this.storageIpAddressDeux = deuxStorageIpAddress; - } + public void setStorageIpAddressDeux(String deuxStorageIpAddress) { + this.storageIpAddressDeux = deuxStorageIpAddress; + } - @Override + @Override public String getStorageNetmaskDeux() { - return storageNetmaskDeux; - } + return storageNetmaskDeux; + } - @Override + @Override public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public void setStorageNetmaskDeux(String deuxStorageNetmask) { - this.storageNetmaskDeux = deuxStorageNetmask; - } + return clusterId; + } + + public void setClusterId(Long clusterId) { + this.clusterId = clusterId; + } - @Override + public void setStorageNetmaskDeux(String deuxStorageNetmask) { + this.storageNetmaskDeux = deuxStorageNetmask; + } + + @Override public String getStorageMacAddressDeux() { - return storageMacAddressDeux; - } + return storageMacAddressDeux; + } - public void setStorageMacAddressDeux(String duexStorageMacAddress) { - this.storageMacAddressDeux = duexStorageMacAddress; - } + public void setStorageMacAddressDeux(String duexStorageMacAddress) { + this.storageMacAddressDeux = duexStorageMacAddress; + } - @Override + @Override public String getPrivateMacAddress() { return privateMacAddress; } @@ -188,11 +190,11 @@ public class HostVO implements Host { public void setPrivateMacAddress(String privateMacAddress) { this.privateMacAddress = privateMacAddress; } - + public boolean isAvailable() { return available; } - + public void setAvailable(boolean available) { this.available = available; } @@ -255,11 +257,11 @@ public class HostVO implements Host { public String getStorageMacAddress() { return storageMacAddress; } - + public boolean isSetup() { return setup; } - + public void setSetup(boolean setup) { this.setup = setup; } @@ -267,31 +269,31 @@ public class HostVO implements Host { public void setStorageMacAddress(String storageMacAddress) { this.storageMacAddress = storageMacAddress; } - + public String getResource() { return resource; } - + public void setResource(String resource) { this.resource = resource; } - + public Map getDetails() { return details; } - + public String getDetail(String name) { assert (details != null) : "Did you forget to load the details?"; - + return details != null ? details.get(name) : null; } - + public void setDetail(String name, String value) { assert (details != null) : "Did you forget to load the details?"; - + details.put(name, value); } - + public void setDetails(Map details) { this.details = details; } @@ -299,59 +301,59 @@ public class HostVO implements Host { public List getHostTags() { return hostTags; } - + public void setHostTags(List hostTags) { this.hostTags = hostTags; } @Column(name="data_center_id", nullable=false) private long dataCenterId; - + @Column(name="pod_id") - private Long podId; - + private Long podId; + @Column(name="cpus") private Integer cpus; - + @Column(name="url") private String storageUrl; @Column(name="speed") private Long speed; - + @Column(name="ram") private long totalMemory; - + @Column(name="parent", nullable=false) private String parent; - + @Column(name="guid", updatable=true, nullable=false) private String guid; - @Column(name="capabilities") + @Column(name="capabilities") private String caps; - + @Column(name="total_size") private Long totalSize; - + @Column(name="last_ping") private long lastPinged; - + @Column(name="mgmt_server_id") private Long managementServerId; - + @Column(name="dom0_memory") private long dom0MinMemory; - + @Column(name="version") private String version; - + @Column(name=GenericDao.CREATED_COLUMN) private Date created; - + @Column(name=GenericDao.REMOVED_COLUMN) private Date removed; - + public HostVO(String guid) { this.guid = guid; this.status = Status.Up; @@ -359,70 +361,70 @@ public class HostVO implements Host { this.dom0MinMemory = 0; this.hostAllocationState = Host.HostAllocationState.Enabled; } - + protected HostVO() { } - - public HostVO(long id, - String name, - Type type, - String privateIpAddress, - String privateNetmask, - String privateMacAddress, - String publicIpAddress, - String publicNetmask, - String publicMacAddress, - String storageIpAddress, - String storageNetmask, - String storageMacAddress, - String deuxStorageIpAddress, - String duxStorageNetmask, - String deuxStorageMacAddress, - String guid, - Status status, - String version, - String iqn, - Date disconnectedOn, - long dcId, - Long podId, - long serverId, - long ping, - String parent, - long totalSize, - StoragePoolType fsType) { - this(id, name, type, privateIpAddress, privateNetmask, privateMacAddress, publicIpAddress, publicNetmask, publicMacAddress, storageIpAddress, storageNetmask, storageMacAddress, guid, status, version, iqn, disconnectedOn, dcId, podId, serverId, ping, null, null, null, 0, null); - this.parent = parent; - this.totalSize = totalSize; - this.fsType = fsType; - this.hostAllocationState = Host.HostAllocationState.Enabled; - } - + public HostVO(long id, - String name, - Type type, - String privateIpAddress, - String privateNetmask, - String privateMacAddress, - String publicIpAddress, - String publicNetmask, - String publicMacAddress, - String storageIpAddress, - String storageNetmask, - String storageMacAddress, - String guid, - Status status, - String version, - String url, - Date disconnectedOn, - long dcId, - Long podId, - long serverId, - long ping, - Integer cpus, - Long speed, - Long totalMemory, - long dom0MinMemory, - String caps) { + String name, + Type type, + String privateIpAddress, + String privateNetmask, + String privateMacAddress, + String publicIpAddress, + String publicNetmask, + String publicMacAddress, + String storageIpAddress, + String storageNetmask, + String storageMacAddress, + String deuxStorageIpAddress, + String duxStorageNetmask, + String deuxStorageMacAddress, + String guid, + Status status, + String version, + String iqn, + Date disconnectedOn, + long dcId, + Long podId, + long serverId, + long ping, + String parent, + long totalSize, + StoragePoolType fsType) { + this(id, name, type, privateIpAddress, privateNetmask, privateMacAddress, publicIpAddress, publicNetmask, publicMacAddress, storageIpAddress, storageNetmask, storageMacAddress, guid, status, version, iqn, disconnectedOn, dcId, podId, serverId, ping, null, null, null, 0, null); + this.parent = parent; + this.totalSize = totalSize; + this.fsType = fsType; + this.hostAllocationState = Host.HostAllocationState.Enabled; + } + + public HostVO(long id, + String name, + Type type, + String privateIpAddress, + String privateNetmask, + String privateMacAddress, + String publicIpAddress, + String publicNetmask, + String publicMacAddress, + String storageIpAddress, + String storageNetmask, + String storageMacAddress, + String guid, + Status status, + String version, + String url, + Date disconnectedOn, + long dcId, + Long podId, + long serverId, + long ping, + Integer cpus, + Long speed, + Long totalMemory, + long dom0MinMemory, + String caps) { this.id = id; this.name = name; this.status = status; @@ -454,28 +456,28 @@ public class HostVO implements Host { this.storageUrl = url; this.hostAllocationState = Host.HostAllocationState.Enabled; } - + public void setPodId(Long podId) { - + this.podId = podId; } - + public void setDataCenterId(long dcId) { this.dataCenterId = dcId; } - + public void setVersion(String version) { this.version = version; } - + public void setStorageUrl(String url) { this.storageUrl = url; } - + public void setDisconnectedOn(Date disconnectedOn) { this.disconnectedOn = disconnectedOn; } - + public String getStorageUrl() { return storageUrl; } @@ -524,32 +526,32 @@ public class HostVO implements Host { public long getLastPinged() { return lastPinged; } - + @Override public String getParent() { return parent; } - + @Override public long getTotalSize() { return totalSize; } - + @Override public String getCapabilities() { return caps; } - + @Override public Date getCreated() { return created; } - + @Override public Date getRemoved() { return removed; } - + @Override public String getVersion() { return version; @@ -558,66 +560,66 @@ public class HostVO implements Host { public void setType(Type type) { this.type = type; } - - @Override + + @Override public long getId() { - return id; - } - - @Override - public String getName() { - return name; - } - - @Override - public Status getStatus() { - return status; - } - - @Override - public long getDataCenterId() { - return dataCenterId; - } - - @Override - public Long getPodId() { - return podId; - } - + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public Status getStatus() { + return status; + } + + @Override + public long getDataCenterId() { + return dataCenterId; + } + + @Override + public Long getPodId() { + return podId; + } + @Override public Long getManagementServerId() { return managementServerId; } - + @Override public Date getDisconnectedOn() { return disconnectedOn; } - + @Override public String getPrivateIpAddress() { return privateIpAddress; } - + @Override public String getGuid() { return guid; } - + public void setGuid(String guid) { - this.guid = guid; - } - + this.guid = guid; + } + @Override public Integer getCpus() { return cpus; } - + @Override public Long getSpeed() { return speed; } - + @Override public Long getTotalMemory() { return totalMemory; @@ -625,17 +627,17 @@ public class HostVO implements Host { @Override public Integer getProxyPort() { - return proxyPort; + return proxyPort; } - + public void setProxyPort(Integer port) { - proxyPort = port; + proxyPort = port; } - + public StoragePoolType getFsType() { return fsType; } - + @Override public Type getType() { return type; @@ -645,36 +647,46 @@ public class HostVO implements Host { public int hashCode() { return NumbersUtil.hash(id); } - + @Override - public boolean equals(Object obj) { - if (obj instanceof HostVO) { - return ((HostVO)obj).getId() == this.getId(); - } else { - return false; - } + public boolean equals(Object obj) { + if (obj instanceof HostVO) { + return ((HostVO)obj).getId() == this.getId(); + } else { + return false; + } } @Override public String toString() { - return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString(); + return new StringBuilder("Host[").append("-").append(id).append("-").append(type).append("]").toString(); } - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } + public void setHypervisorType(HypervisorType hypervisorType) { + this.hypervisorType = hypervisorType; + } - @Override - public HypervisorType getHypervisorType() { - return hypervisorType; - } - - @Override - public HostAllocationState getHostAllocationState() { - return hostAllocationState; + @Override + public HypervisorType getHypervisorType() { + return hypervisorType; } - + + @Override + public HostAllocationState getHostAllocationState() { + return hostAllocationState; + } + public void setHostAllocationState(HostAllocationState hostAllocationState) { - this.hostAllocationState = hostAllocationState; + this.hostAllocationState = hostAllocationState; + } + + + public void setHypervisorVersion(String hypervisorVersion) { + this.hypervisorVersion = hypervisorVersion; + } + + @Override + public String getHypervisorVersion() { + return hypervisorVersion; } } diff --git a/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java new file mode 100644 index 00000000000..a552655d649 --- /dev/null +++ b/core/src/com/cloud/hypervisor/HypervisorCapabilitiesVO.java @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.hypervisor; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.EnumType; +import javax.persistence.Enumerated; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.utils.NumbersUtil; + +@Entity +@Table(name="hypervisor_capabilities") +public class HypervisorCapabilitiesVO implements HypervisorCapabilities{ + @Id + @GeneratedValue(strategy=GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="hypervisor_type") + @Enumerated(value=EnumType.STRING) + private HypervisorType hypervisorType; + + @Column(name="hypervisor_version") + private String hypervisorVersion; + + @Column(name="max_guests_limit") + private Long maxGuestsLimit; + + @Column(name="security_group_enabled") + private boolean securityGroupEnabled; + + + protected HypervisorCapabilitiesVO() { + } + + public HypervisorCapabilitiesVO(HypervisorType hypervisorType, String hypervisorVersion, Long maxGuestsLimit, boolean securityGroupEnabled) { + this.hypervisorType = hypervisorType; + this.hypervisorVersion = hypervisorVersion; + this.maxGuestsLimit = maxGuestsLimit; + this.securityGroupEnabled = securityGroupEnabled; + } + + /** + * @param hypervisorType the hypervisorType to set + */ + public void setHypervisorType(HypervisorType hypervisorType) { + this.hypervisorType = hypervisorType; + } + + + /** + * @return the hypervisorType + */ + @Override + public HypervisorType getHypervisorType() { + return hypervisorType; + } + + /** + * @param hypervisorVersion the hypervisorVersion to set + */ + public void setHypervisorVersion(String hypervisorVersion) { + this.hypervisorVersion = hypervisorVersion; + } + + /** + * @return the hypervisorVersion + */ + @Override + public String getHypervisorVersion() { + return hypervisorVersion; + } + + public void setSecurityGroupEnabled(Boolean securityGroupEnabled) { + this.securityGroupEnabled = securityGroupEnabled; + } + + /** + * @return the securityGroupSupport + */ + @Override + public boolean isSecurityGroupEnabled() { + return securityGroupEnabled; + } + + /** + * @param maxGuests the maxGuests to set + */ + public void setMaxGuestsLimit(Long maxGuestsLimit) { + this.maxGuestsLimit = maxGuestsLimit; + } + + /** + * @return the maxGuests + */ + @Override + public Long getMaxGuestsLimit() { + return maxGuestsLimit; + } + + + public long getId() { + return id; + } + + @Override + public int hashCode() { + return NumbersUtil.hash(id); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HypervisorCapabilitiesVO) { + return ((HypervisorCapabilitiesVO)obj).getId() == this.getId(); + } else { + return false; + } + } + +} diff --git a/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index cd1b30ca9e9..3df94664503 100755 --- a/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -146,6 +146,7 @@ import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.mo.HostVirtualNicType; import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; +import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHostNetworkSummary; @@ -178,6 +179,7 @@ import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineName; +import com.cloud.vm.VmDetailConstants; import com.google.gson.Gson; import com.vmware.vim25.ClusterDasConfigInfo; import com.vmware.vim25.ComputeResourceSummary; @@ -770,8 +772,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true); VmwareManager mgr = getServiceContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - - VirtualDevice nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), mgr.getGuestNicDeviceType(), + + // Note: public NIC is plugged inside system VM + VirtualDevice nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), VirtualEthernetCardType.Vmxnet3, networkInfo.second(), vifMacAddress, -1, 1, true, true); vmMo.plugDevice(nic); } @@ -1057,8 +1060,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualMachineTO vmSpec = cmd.getVirtualMachine(); - String vmName = vmSpec.getName(); - + String vmName = vmSpec.getName(); + State state = State.Stopped; VmwareContext context = getServiceContext(); try { @@ -1068,7 +1071,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa synchronized (_vms) { _vms.put(vmName, State.Starting); } - + + VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.valueOf(vmSpec.getDetails().get(VmDetailConstants.NIC_ADAPTER)); + if(s_logger.isDebugEnabled()) + s_logger.debug("VM " + vmName + " will be started with NIC device type: " + nicDeviceType); + VmwareHypervisorHost hyperHost = getHyperHost(context); VolumeTO[] disks = validateDisks(vmSpec.getDisks()); assert (disks.length > 0); @@ -1269,14 +1276,12 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } VirtualDevice nic; - int nicDeviceNumber = -1; for (NicTO nicTo : sortNicsByDeviceId(nics)) { s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo)); Pair networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo); - nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), mgr.getGuestNicDeviceType(), networkInfo.second(), nicTo.getMac(), nicDeviceNumber, i + 1, true, true); - nicDeviceNumber = nic.getUnitNumber() + 1; + nic = VmwareHelper.prepareNicDevice(vmMo, networkInfo.first(), nicDeviceType, networkInfo.second(), nicTo.getMac(), i, i + 1, true, true); deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); deviceConfigSpecArray[i].setDevice(nic); deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.add); @@ -1290,14 +1295,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmConfigSpec.setDeviceChange(deviceConfigSpecArray); // pass boot arguments through machine.id - OptionValue[] machineIdOptions = new OptionValue[1]; + OptionValue[] machineIdOptions = new OptionValue[2]; machineIdOptions[0] = new OptionValue(); machineIdOptions[0].setKey("machine.id"); machineIdOptions[0].setValue(vmSpec.getBootArgs()); + machineIdOptions[1] = new OptionValue(); + machineIdOptions[1].setKey("devices.hotplug"); + machineIdOptions[1].setValue("true"); + String keyboardLayout = null; if(vmSpec.getDetails() != null) - keyboardLayout = vmSpec.getDetails().get(VirtualMachine.PARAM_KEY_KEYBOARD); + keyboardLayout = vmSpec.getDetails().get(VmDetailConstants.KEYBOARD); vmConfigSpec.setExtraConfig(configureVnc(machineIdOptions, hyperHost, vmName, vmSpec.getVncPassword(), keyboardLayout)); if (!vmMo.configureVm(vmConfigSpec)) { @@ -3178,8 +3187,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (isVmInCluster(vm)) { if (s_logger.isDebugEnabled()) { s_logger.debug("VM " + vm + " is now missing from host report but we detected that it might be migrated to other host by vCenter"); - } - _vms.remove(vm); + } + + if(oldState != State.Starting && oldState != State.Migrating) { + s_logger.debug("VM " + vm + " is now missing from host report and VM is not at starting/migrating state, remove it from host VM-sync map, oldState: " + oldState); + _vms.remove(vm); + } else { + s_logger.debug("VM " + vm + " is missing from host report, but we will ignore VM " + vm + " in transition state " + oldState); + } continue; } diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java index ffa5bafe745..b2e2c4dbeea 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixHelper.java @@ -108,7 +108,8 @@ public class CitrixHelper { _xcpGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)"); _xcpGuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); _xcpGuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); - _xcpGuestOsMap.put("Other install media", "Other install media"); + _xcpGuestOsMap.put("Other Linux (32-bit)", "Other install media"); + _xcpGuestOsMap.put("Other Linux (64-bit)", "Other install media"); _xcpGuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); _xcpGuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } @@ -172,7 +173,8 @@ public class CitrixHelper { _xenServerGuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)"); _xenServerGuestOsMap.put("Windows XP SP2 (32-bit)", "Windows XP SP2 (32-bit)"); _xenServerGuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)"); - _xenServerGuestOsMap.put("Other install media", "Other install media"); + _xenServerGuestOsMap.put("Other Linux (32-bit)", "Other install media"); + _xenServerGuestOsMap.put("Other Linux (64-bit)", "Other install media"); _xenServerGuestOsMap.put("Other PV (32-bit)", "CentOS 5.4 (32-bit)"); _xenServerGuestOsMap.put("Other PV (64-bit)", "CentOS 5.4 (64-bit)"); } @@ -247,11 +249,87 @@ public class CitrixHelper { _xenServer56FP1GuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)"); _xenServer56FP1GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); _xenServer56FP1GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); - _xenServer56FP1GuestOsMap.put("Other install media", "Other install media"); + _xenServer56FP1GuestOsMap.put("Other Linux (32-bit)", "Other install media"); + _xenServer56FP1GuestOsMap.put("Other Linux (64-bit)", "Other install media"); _xenServer56FP1GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); _xenServer56FP1GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } + static { + _xenServer56FP2GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 4.6 (32-bit)", "CentOS 4.6 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 4.7 (32-bit)", "CentOS 4.7 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 4.8 (32-bit)", "CentOS 4.8 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.0 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.0 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.1 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.1 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.2 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.2 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.3 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.3 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.4 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.4 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.5 (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("CentOS 5.5 (64-bit)", "CentOS 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Debian GNU/Linux 5.0 (32-bit)", "Debian Lenny 5.0 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Debian GNU/Linux 6(32-bit)", "Debian Squeeze 6.0 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Debian GNU/Linux 6(64-bit)", "Debian Squeeze 6.0 (64-bit) (experimental)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.0 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.0 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.1 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.1 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.2 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.2 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.3 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.3 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.4 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.4 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.5 (32-bit)", "Oracle Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Oracle Enterprise Linux 5.5 (64-bit)", "Oracle Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 4.5 (32-bit)", "Red Hat Enterprise Linux 4.5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 4.6 (32-bit)", "Red Hat Enterprise Linux 4.6 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 4.7 (32-bit)", "Red Hat Enterprise Linux 4.7 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 4.8 (32-bit)", "Red Hat Enterprise Linux 4.8 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.0 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.0 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.1 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.1 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.2 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.2 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.3 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.3 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.4 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.4 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.5 (32-bit)", "Red Hat Enterprise Linux 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 5.5 (64-bit)", "Red Hat Enterprise Linux 5 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 6.0 (32-bit)", "Red Hat Enterprise Linux 6 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Red Hat Enterprise Linux 6.0 (64-bit)", "Red Hat Enterprise Linux 6 (64-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 9 SP4 (32-bit)", "SUSE Linux Enterprise Server 9 SP4 (32-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (32-bit)", "SUSE Linux Enterprise Server 10 SP1 (32-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 10 SP1 (64-bit)", "SUSE Linux Enterprise Server 10 SP1 (64-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (32-bit)", "SUSE Linux Enterprise Server 10 SP2 (32-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 10 SP2 (64-bit)", "SUSE Linux Enterprise Server 10 SP2 (64-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 10 SP3 (64-bit)", "SUSE Linux Enterprise Server 10 SP3 (64-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 11 (32-bit)", "SUSE Linux Enterprise Server 11 (32-bit)"); + _xenServer56FP2GuestOsMap.put("SUSE Linux Enterprise Server 11 (64-bit)", "SUSE Linux Enterprise Server 11 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Windows 7 (32-bit)", "Windows 7 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Windows 7 (64-bit)", "Windows 7 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Server 2003 (32-bit)", "Windows Server 2003 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Server 2003 (64-bit)", "Windows Server 2003 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Server 2008 (32-bit)", "Windows Server 2008 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Server 2008 (64-bit)", "Windows Server 2008 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Server 2008 R2 (64-bit)", "Windows Server 2008 R2 (64-bit)"); + _xenServer56FP2GuestOsMap.put("Windows Vista (32-bit)", "Windows Vista (32-bit)"); + _xenServer56FP2GuestOsMap.put("Windows XP SP3 (32-bit)", "Windows XP SP3 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Ubuntu 10.04 (32-bit)", "Ubuntu Lucid Lynx 10.04 (32-bit) (experimental)"); + _xenServer56FP2GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit) (experimental)"); + _xenServer56FP2GuestOsMap.put("Other Linux (32-bit)", "Other install media"); + _xenServer56FP2GuestOsMap.put("Other Linux (64-bit)", "Other install media"); + _xenServer56FP2GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); + _xenServer56FP2GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); + } + static { _xenServer60GuestOsMap.put("CentOS 4.5 (32-bit)", "CentOS 4.5 (32-bit)"); @@ -339,7 +417,8 @@ public class CitrixHelper { _xenServer60GuestOsMap.put("Ubuntu 10.04 (64-bit)", "Ubuntu Lucid Lynx 10.04 (64-bit)"); _xenServer60GuestOsMap.put("Ubuntu 10.10 (32-bit)", "Ubuntu Maverick Meerkat 10.10 (32-bit) (experimental)"); _xenServer60GuestOsMap.put("Ubuntu 10.10 (64-bit)", "Ubuntu Maverick Meerkat 10.10 (64-bit) (experimental)"); - _xenServer60GuestOsMap.put("Other install media", "Other install media"); + _xenServer60GuestOsMap.put("Other Linux (32-bit)", "Other install media"); + _xenServer60GuestOsMap.put("Other Linux (64-bit)", "Other install media"); _xenServer60GuestOsMap.put("Other PV (32-bit)", "CentOS 5 (32-bit)"); _xenServer60GuestOsMap.put("Other PV (64-bit)", "CentOS 5 (64-bit)"); } diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 91974d93be5..2e4e93296d3 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -262,6 +262,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected String _storageNetworkName2; protected String _guestNetworkName; protected int _wait; + protected int _migratewait; protected String _instance; //instance name (default is usually "VM") static final Random _rand = new Random(System.currentTimeMillis()); @@ -325,7 +326,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try { vm.destroy(conn); } catch (Exception e) { - s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to " + e.toString()); + s_logger.warn("Catch Exception " + e.getClass().getName() + ": unable to destroy VM " + vmRec.nameLabel + " due to ", e); success = false; } } @@ -481,6 +482,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return execute((CheckSshCommand)cmd); } else if (clazz == SecurityIngressRulesCmd.class) { return execute((SecurityIngressRulesCmd) cmd); + } else if (clazz == SecurityEgressRulesCmd.class) { + return execute((SecurityEgressRulesCmd) cmd); } else if (clazz == OvsCreateGreTunnelCommand.class) { return execute((OvsCreateGreTunnelCommand)cmd); } else if (clazz == OvsSetTagAndFlowCommand.class) { @@ -622,6 +625,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected Network getNetwork(Connection conn, NicTO nic) throws XenAPIException, XmlRpcException { String[] tags = nic.getTags(); XsLocalNetwork network = getNativeNetworkForTraffic(conn, nic.getType(), tags != null && tags.length > 0 ? tags[0] : null); + if (network == null) { + s_logger.error("Network is not configured on the backend for nic " + nic.toString()); + throw new CloudRuntimeException("Network for the backend is not configured correctly for network broadcast domain: " + nic.getBroadcastUri()); + } if (nic.getBroadcastUri() != null && nic.getBroadcastUri().toString().contains("untagged")) { return network.getNetwork(); } else if (nic.getBroadcastType() == BroadcastDomainType.Vlan) { @@ -1910,7 +1917,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe stats = getVmStatsRawXML(conn); } } catch (Exception e1) { - s_logger.warn("Error whilst collecting raw stats from plugin:" + e1); + s_logger.warn("Error whilst collecting raw stats from plugin: ", e1); return null; } @@ -1929,7 +1936,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try { doc = DocumentBuilderFactory.newInstance().newDocumentBuilder().parse(statsSource); } catch (Exception e) { - s_logger.warn("Exception caught whilst processing the document via document factory:"+e); + s_logger.warn("Exception caught whilst processing the document via document factory:", e); return null; } @@ -2161,9 +2168,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - String copy_vhd_to_secondarystorage(Connection conn, String mountpoint, String vdiuuid, String sruuid) { + private String copy_vhd_to_secondarystorage(Connection conn, String mountpoint, String vdiuuid, String sruuid, int wait) { String results = callHostPluginAsync(conn, "vmopspremium", "copy_vhd_to_secondarystorage", - 2 * 60 * 60 * 1000, "mountpoint", mountpoint, "vdiuuid", vdiuuid, "sruuid", sruuid); + wait, "mountpoint", mountpoint, "vdiuuid", vdiuuid, "sruuid", sruuid); if (results == null || results.isEmpty()) { String msg = "copy_vhd_to_secondarystorage return null"; @@ -2182,7 +2189,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String upgradeSnapshot(Connection conn, String templatePath, String snapshotPath) { String results = callHostPluginAsync(conn, "vmopspremium", "upgrade_snapshot", - 2 * 60 * 60 * 1000, "templatePath", templatePath, "snapshotPath", snapshotPath); + 2 * 60 * 60, "templatePath", templatePath, "snapshotPath", snapshotPath); if (results == null || results.isEmpty()) { String msg = "upgrade_snapshot return null"; @@ -2199,9 +2206,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - String createTemplateFromSnapshot(Connection conn, String templatePath, String snapshotPath) { + String createTemplateFromSnapshot(Connection conn, String templatePath, String snapshotPath, int wait) { String results = callHostPluginAsync(conn, "vmopspremium", "create_privatetemplate_from_snapshot", - 2 * 60 * 60 * 1000, "templatePath", templatePath, "snapshotPath", snapshotPath); + wait, "templatePath", templatePath, "snapshotPath", snapshotPath); if (results == null || results.isEmpty()) { String msg = "create_privatetemplate_from_snapshot return null"; @@ -2218,9 +2225,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - String copy_vhd_from_secondarystorage(Connection conn, String mountpoint, String sruuid) { + String copy_vhd_from_secondarystorage(Connection conn, String mountpoint, String sruuid, int wait) { String results = callHostPluginAsync(conn, "vmopspremium", "copy_vhd_from_secondarystorage", - 2 * 60 * 60 * 1000, "mountpoint", mountpoint, "sruuid", sruuid); + wait, "mountpoint", mountpoint, "sruuid", sruuid); if (results == null || results.isEmpty()) { String msg = "copy_vhd_from_secondarystorage return null"; @@ -2240,6 +2247,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe public PrimaryStorageDownloadAnswer execute(final PrimaryStorageDownloadCommand cmd) { String tmplturl = cmd.getUrl(); String poolName = cmd.getPoolUuid(); + int wait = cmd.getWait(); try { URI uri = new URI(tmplturl); String tmplpath = uri.getHost() + ":" + uri.getPath(); @@ -2255,7 +2263,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } String pUuid = poolsr.getUuid(conn); boolean isISCSI = IsISCSI(poolsr.getType(conn)); - String uuid = copy_vhd_from_secondarystorage(conn, tmplpath, pUuid); + String uuid = copy_vhd_from_secondarystorage(conn, tmplpath, pUuid, wait); VDI tmpl = getVDIbyUuid(conn, uuid); VDI snapshotvdi = tmpl.snapshot(conn, new HashMap()); String snapshotUuid = snapshotvdi.getUuid(conn); @@ -2444,9 +2452,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe break; } } - Map other = new HashMap(); - other.put("live", "true"); - vm.poolMigrate(conn, dsthost, other); + migrateVM(conn, dsthost, vm, vmName); vm.setAffinity(conn, dsthost); state = State.Stopping; } @@ -2779,7 +2785,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vm.hardShutdown(conn); } catch (Exception e) { String msg = "VM hardshutdown failed due to " + e.toString(); - s_logger.warn(msg); + s_logger.warn(msg, e); } } if (vm.getPowerState(conn) == VmPowerState.HALTED) { @@ -2787,12 +2793,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vm.destroy(conn); } catch (Exception e) { String msg = "VM destroy failed due to " + e.toString(); - s_logger.warn(msg); + s_logger.warn(msg, e); } } } catch (Exception e) { String msg = "VM getPowerState failed due to " + e.toString(); - s_logger.warn(msg); + s_logger.warn(msg, e); } } if (mounts != null) { @@ -2803,7 +2809,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vbds = vdi.getVBDs(conn); } catch (Exception e) { String msg = "VDI getVBDS failed due to " + e.toString(); - s_logger.warn(msg); + s_logger.warn(msg, e); continue; } for (VBD vbd : vbds) { @@ -2812,7 +2818,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vbd.destroy(conn); } catch (Exception e) { String msg = "VBD destroy failed due to " + e.toString(); - s_logger.warn(msg); + s_logger.warn(msg, e); } } } @@ -2984,13 +2990,50 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } } - - protected VDI cloudVDIcopy(Connection conn, VDI vdi, SR sr) throws XenAPIException, XmlRpcException { + + private + void migrateVM(Connection conn, Host destHost, VM vm, String vmName) throws XmlRpcException { Task task = null; + try { + Map other = new HashMap(); + other.put("live", "true"); + task = vm.poolMigrateAsync(conn, destHost, other); + try { + // poll every 1 seconds + long timeout = (long)(_migratewait) * 1000L; + waitForTask(conn, task, 1000, timeout); + checkForSuccess(conn, task); + } catch (Types.HandleInvalid e) { + if (vm.getResidentOn(conn).equals(destHost)) { + task = null; + return; + } + throw new CloudRuntimeException("migrate VM catch HandleInvalid and VM is not running on dest host"); + } + } catch (XenAPIException e) { + String msg = "Unable to migrate VM(" + vmName + ") from host(" + _host.uuid +") due to " + e.toString(); + s_logger.warn(msg, e); + throw new CloudRuntimeException(msg); + }finally { + if( task != null) { + try { + task.destroy(conn); + } catch (Exception e1) { + s_logger.debug("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid +") due to " + e1.toString()); + } + } + } + } + + protected VDI cloudVDIcopy(Connection conn, VDI vdi, SR sr, int wait) throws XenAPIException, XmlRpcException { + Task task = null; + if ( wait == 0 ) { + wait = 2 * 60 * 60; + } try { task = vdi.copyAsync(conn, sr); // poll every 1 seconds , timeout after 2 hours - waitForTask(conn, task, 1000, 2 * 60 * 60 * 1000); + waitForTask(conn, task, 1000, wait * 1000); checkForSuccess(conn, task); VDI dvdi = Types.toVDI(task, conn); return dvdi; @@ -2999,8 +3042,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try { task.destroy(conn); } catch (Exception e1) { - s_logger.warn("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid + ") due to " - + e1.toString()); + s_logger.warn("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid + ") due to ", e1); } } } @@ -3009,7 +3051,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe boolean swiftDownload(Connection conn, SwiftTO swift, String rfilename, String lfilename) { String result = null; try { - result = callHostPluginAsync(conn, "swift", "swift", 60 * 60 * 1000, + result = callHostPluginAsync(conn, "swift", "swift", 60 * 60, "op", "download", "hostname", swift.getHostName(), "account", swift.getAccount(), "username", swift.getUserName(), "token", swift.getToken(), "rfilename", rfilename, "lfilename", lfilename); @@ -3017,7 +3059,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return true; } } catch (Exception e) { - s_logger.warn("swift download failed due to " + e.toString()); + s_logger.warn("swift download failed due to ", e); } return false; } @@ -3025,7 +3067,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe boolean swiftUpload(Connection conn, SwiftTO swift, String rfilename, String lfilename) { String result = null; try { - result = callHostPluginAsync(conn, "swift", "swift", 60 * 60 * 1000, + result = callHostPluginAsync(conn, "swift", "swift", 60 * 60, "op", "upload", "hostname", swift.getHostName(), "account", swift.getAccount(), "username", swift.getUserName(), "token", swift.getToken(), "rfilename", rfilename, "lfilename", lfilename); @@ -3033,7 +3075,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return true; } } catch (Exception e) { - s_logger.warn("swift download failed due to " + e.toString()); + s_logger.warn("swift download failed due to ", e); } return false; } @@ -3048,7 +3090,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return true; } } catch (Exception e) { - s_logger.warn("swift download failed due to " + e.toString()); + s_logger.warn("swift download failed due to ", e); } return false; } @@ -3059,7 +3101,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected String backupSnapshot(Connection conn, String primaryStorageSRUuid, Long dcId, Long accountId, - Long volumeId, String secondaryStorageMountPath, String snapshotUuid, String prevBackupUuid, Boolean isISCSI) { + Long volumeId, String secondaryStorageMountPath, String snapshotUuid, String prevBackupUuid, Boolean isISCSI, int wait) { String backupSnapshotUuid = null; if (prevBackupUuid == null) { @@ -3068,7 +3110,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe // Each argument is put in a separate line for readability. // Using more lines does not harm the environment. - String results = callHostPluginAsync(conn, "vmopsSnapshot", "backupSnapshot", 60 * 60 * 1000, + String results = callHostPluginAsync(conn, "vmopsSnapshot", "backupSnapshot", wait, "primaryStorageSRUuid", primaryStorageSRUuid, "dcId", dcId.toString(), "accountId", accountId .toString(), "volumeId", volumeId.toString(), "secondaryStorageMountPath", secondaryStorageMountPath, "snapshotUuid", snapshotUuid, "prevBackupUuid", prevBackupUuid, "isISCSI", @@ -3099,7 +3141,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return backupSnapshotUuid; } - protected String callHostPluginAsync(Connection conn, String plugin, String cmd, int timeout, String... params) { + protected String callHostPluginAsync(Connection conn, String plugin, String cmd, int wait, String... params) { + int timeout = wait * 1000; Map args = new HashMap(); Task task = null; try { @@ -3133,8 +3176,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try { task.destroy(conn); } catch (Exception e1) { - s_logger.warn("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid + ") due to " - + e1.toString()); + s_logger.warn("unable to destroy task(" + task.toString() + ") on host(" + _host.uuid + ") due to ", e1); } } } @@ -3813,7 +3855,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe @Override public PingCommand getCurrentStatus(long id) { try { - Connection conn = getConnection(); if (!pingXenServer()) { Thread.sleep(1000); if (!pingXenServer()) { @@ -3821,6 +3862,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return null; } } + Connection conn = getConnection(); HashMap newStates = deltaSync(conn); if (newStates == null) { s_logger.warn("Unable to get current status from sync"); @@ -4091,7 +4133,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe sr = pbd.getSR(conn); srRec = sr.getRecord(conn); } catch (Exception e) { - s_logger.warn("pbd.getSR get Exception due to " + e.toString()); + s_logger.warn("pbd.getSR get Exception due to ", e); continue; } String type = srRec.type; @@ -4104,7 +4146,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe pbd.destroy(conn); sr.forget(conn); } catch (Exception e) { - s_logger.warn("forget SR catch Exception due to " + e.toString()); + s_logger.warn("forget SR catch Exception due to ", e); } } } @@ -4971,6 +5013,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String value = (String) params.get("wait"); _wait = NumbersUtil.parseInt(value, 600); + + value = (String) params.get("migratewait"); + _migratewait = NumbersUtil.parseInt(value, 3600); if (_pod == null) { throw new ConfigurationException("Unable to get the pod"); @@ -5422,6 +5467,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe StorageFilerTO poolTO = cmd.getPool(); String secondaryStorageURL = cmd.getSecondaryStorageURL(); boolean toSecondaryStorage = cmd.toSecondaryStorage(); + int wait = cmd.getWait(); try { URI uri = new URI(secondaryStorageURL); String remoteVolumesMountPath = uri.getHost() + ":" + uri.getPath() + "/volumes/"; @@ -5444,20 +5490,20 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe // Look up the volume on the source primary storage pool VDI srcVolume = getVDIbyUuid(conn, volumeUUID); // Copy the volume to secondary storage - VDI destVolume = cloudVDIcopy(conn, srcVolume, secondaryStorage); + VDI destVolume = cloudVDIcopy(conn, srcVolume, secondaryStorage, wait); String destVolumeUUID = destVolume.getUuid(conn); return new CopyVolumeAnswer(cmd, true, null, null, destVolumeUUID); } finally { removeSR(conn, secondaryStorage); } } else { - String uuid = copy_vhd_to_secondarystorage(conn, mountpoint, volumeUUID, srUuid); + String uuid = copy_vhd_to_secondarystorage(conn, mountpoint, volumeUUID, srUuid, wait); return new CopyVolumeAnswer(cmd, true, null, null, uuid); } } else { try { String volumePath = mountpoint + "/" + volumeUUID + ".vhd"; - String uuid = copy_vhd_from_secondarystorage(conn, volumePath, srUuid); + String uuid = copy_vhd_from_secondarystorage(conn, volumePath, srUuid, wait ); return new CopyVolumeAnswer(cmd, true, null, srUuid, uuid); } finally { deleteSecondaryStorageFolder(conn, remoteVolumesMountPath, volumeFolder); @@ -5752,7 +5798,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Long accountId = cmd.getAccountId(); String userSpecifiedName = cmd.getTemplateName(); Long templateId = cmd.getTemplateId(); - + int wait = cmd.getWait(); String details = null; SR tmpltSR = null; boolean result = false; @@ -5773,7 +5819,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe tmpltSR = createNfsSRbyURI(conn, tmpltURI, false); // copy volume to template SR - VDI tmpltVDI = cloudVDIcopy(conn, volume, tmpltSR); + VDI tmpltVDI = cloudVDIcopy(conn, volume, tmpltSR, wait); // scan makes XenServer pick up VDI physicalSize tmpltSR.scan(conn); if (userSpecifiedName != null) { @@ -5842,6 +5888,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String backedUpSnapshotUuid = cmd.getSnapshotUuid(); Long newTemplateId = cmd.getNewTemplateId(); String userSpecifiedName = cmd.getTemplateName(); + int wait = cmd.getWait(); // By default, assume failure String details = null; boolean result = false; @@ -5857,7 +5904,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String templatePath = secondaryStorageMountPath + "/" + installPath; // create snapshot SR String snapshotPath = secondaryStorageMountPath + "/snapshots/" + accountId + "/" + volumeId + "/" + backedUpSnapshotUuid + ".vhd"; - String results = createTemplateFromSnapshot(conn, templatePath, snapshotPath); + String results = createTemplateFromSnapshot(conn, templatePath, snapshotPath, wait); String[] tmp = results.split("#"); String tmpltUuid = tmp[1]; long physicalSize = Long.parseLong(tmp[2]); @@ -5918,6 +5965,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String snapshotUuid = cmd.getSnapshotUuid(); // not null: Precondition. String prevBackupUuid = cmd.getPrevBackupUuid(); String prevSnapshotUuid = cmd.getPrevSnapshotUuid(); + int wait = cmd.getWait(); // By default assume failure String details = null; boolean success = false; @@ -5960,7 +6008,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe SR snapshotSr = null; try { snapshotSr = createNfsSRbyURI(conn, new URI(snapshotMountpoint), false); - VDI backedVdi = cloudVDIcopy(conn, snapshotVdi, snapshotSr); + VDI backedVdi = cloudVDIcopy(conn, snapshotVdi, snapshotSr, wait); snapshotBackupUuid = backedVdi.getUuid(conn); if( cmd.getSwift() != null ) { try { @@ -5983,7 +6031,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe snapshotBackupUuid = filename; success = true; } else { - snapshotBackupUuid = backupSnapshot(conn, primaryStorageSRUuid, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotUuid, prevBackupUuid, isISCSI); + snapshotBackupUuid = backupSnapshot(conn, primaryStorageSRUuid, dcId, accountId, volumeId, secondaryStorageMountPath, snapshotUuid, prevBackupUuid, isISCSI, wait); success = (snapshotBackupUuid != null); } } @@ -6010,7 +6058,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Long volumeId = cmd.getVolumeId(); String secondaryStoragePoolURL = cmd.getSecondaryStoragePoolURL(); String backedUpSnapshotUuid = cmd.getSnapshotUuid(); - + int wait = cmd.getWait(); boolean result = false; // Generic error message. String details = null; @@ -6031,7 +6079,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe URI snapshotURI = new URI(secondaryStoragePoolURL + "/snapshots/" + accountId + "/" + volumeId ); String snapshotPath = snapshotURI.getHost() + ":" + snapshotURI.getPath() + "/" + backedUpSnapshotUuid + ".vhd"; String srUuid = primaryStorageSR.getUuid(conn); - volumeUUID = copy_vhd_from_secondarystorage(conn, snapshotPath, srUuid); + volumeUUID = copy_vhd_from_secondarystorage(conn, snapshotPath, srUuid, wait); result = true; } catch (XenAPIException e) { details += " due to " + e.toString(); diff --git a/core/src/com/cloud/hypervisor/xen/resource/XenServer56Resource.java b/core/src/com/cloud/hypervisor/xen/resource/XenServer56Resource.java index 629a8647463..13120aa055a 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/XenServer56Resource.java +++ b/core/src/com/cloud/hypervisor/xen/resource/XenServer56Resource.java @@ -277,8 +277,8 @@ public class XenServer56Resource extends CitrixResourceBase { @Override protected CheckOnHostAnswer execute(CheckOnHostCommand cmd) { - Connection conn = getConnection(); try { + Connection conn = getConnection(); String result = callHostPluginPremium(conn, "check_heartbeat", "host", cmd.getHost().getGuid(), "interval", Integer.toString(_heartbeatInterval * 2)); if (result == null) { diff --git a/core/src/com/cloud/network/resource/NetscalerMPXResource.java b/core/src/com/cloud/network/resource/NetscalerMPXResource.java new file mode 100644 index 00000000000..2893ae270a8 --- /dev/null +++ b/core/src/com/cloud/network/resource/NetscalerMPXResource.java @@ -0,0 +1,812 @@ +/** + * * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved +* + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package com.cloud.network.resource; + +import java.net.URL; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.naming.ConfigurationException; + +import com.cloud.agent.IAgentControl; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.ExternalNetworkResourceUsageAnswer; +import com.cloud.agent.api.ExternalNetworkResourceUsageCommand; +import com.cloud.agent.api.MaintainAnswer; +import com.cloud.agent.api.MaintainCommand; +import com.cloud.agent.api.PingCommand; +import com.cloud.agent.api.ReadyAnswer; +import com.cloud.agent.api.ReadyCommand; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.StartupExternalLoadBalancerCommand; +import com.cloud.agent.api.routing.IpAssocAnswer; +import com.cloud.agent.api.routing.IpAssocCommand; +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.to.IpAddressTO; +import com.cloud.agent.api.to.LoadBalancerTO; +import com.cloud.agent.api.to.LoadBalancerTO.DestinationTO; +import com.cloud.host.Host; +import com.cloud.host.Host.Type; +import com.cloud.resource.ServerResource; +import com.cloud.serializer.GsonHelper; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.exception.ExecutionException; +import com.cloud.utils.net.NetUtils; +import com.google.gson.Gson; + +import com.citrix.netscaler.nitro.service.nitro_service; +import com.citrix.netscaler.nitro.resource.base.base_response; +import com.citrix.netscaler.nitro.exception.nitro_exception; +import com.citrix.netscaler.nitro.resource.config.ns.nsconfig; +import com.citrix.netscaler.nitro.resource.config.lb.lbvserver; +import com.citrix.netscaler.nitro.resource.config.basic.service; +import com.citrix.netscaler.nitro.resource.config.network.*; +import com.citrix.netscaler.nitro.resource.config.ns.*; +import com.citrix.netscaler.nitro.resource.config.basic.server_service_binding; +import org.apache.axis.types.*; +import org.apache.log4j.Logger; + +class NitroError { + static final int NS_RESOURCE_EXISTS = 273; + static final int NS_RESOURCE_NOT_EXISTS=258; + static final int NS_NO_SERIVCE = 344; +} + +public class NetscalerMPXResource implements ServerResource { + + // deployment configuration + private String _name; + private String _zoneId; + private String _ip; + private String _username; + private String _password; + private String _publicInterface; + private String _privateInterface; + private Integer _numRetries; + private String _guid; + private boolean _inline; + + private static final Logger s_logger = Logger.getLogger(NetscalerMPXResource.class); + protected Gson _gson; + private String _objectNamePathSep = "-"; + + nitro_service nsService ; + Long timeout = new Long(100000); + base_response apiCallResult; + + public NetscalerMPXResource () { + _gson = GsonHelper.getGsonLogger(); + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + try { + _name = (String) params.get("name"); + if (_name == null) { + throw new ConfigurationException("Unable to find name"); + } + + _zoneId = (String) params.get("zoneId"); + if (_zoneId == null) { + throw new ConfigurationException("Unable to find zone"); + } + + _ip = (String) params.get("ip"); + if (_ip == null) { + throw new ConfigurationException("Unable to find IP"); + } + + _username = (String) params.get("username"); + if (_username == null) { + throw new ConfigurationException("Unable to find username"); + } + + _password = (String) params.get("password"); + if (_password == null) { + throw new ConfigurationException("Unable to find password"); + } + + _publicInterface = (String) params.get("publicInterface"); + if (_publicInterface == null) { + throw new ConfigurationException("Unable to find public interface"); + } + + _privateInterface = (String) params.get("privateInterface"); + if (_privateInterface == null) { + throw new ConfigurationException("Unable to find private interface"); + } + + _numRetries = NumbersUtil.parseInt((String) params.get("numRetries"), 1); + + _guid = (String)params.get("guid"); + if (_guid == null) { + throw new ConfigurationException("Unable to find the guid"); + } + + _inline = Boolean.parseBoolean((String) params.get("inline")); + + if (!login()) { + throw new ExecutionException("Failed to login to the Netscaler device."); + } + + if (!enableNetScalerLoadBalancing()) { + throw new ExecutionException("Failed to enable load balancing feature on the Netscaler device."); + } + + return true; + } catch (Exception e) { + throw new ConfigurationException(e.getMessage()); + } + } + + private boolean login() { + try { + nsService = new nitro_service(_ip, "https"); + apiCallResult = nsService.login(_username, _password, timeout); + if (apiCallResult.errorcode == 0) { + return true; + } else { + s_logger.debug("Failed to log in to Netscaler device at " + _ip + " due to " + apiCallResult.message); + return false; + } + } catch (nitro_exception e) { + s_logger.debug("Failed to log in to Netscaler device at " + _ip + " due to " + e.response[0].message); + } catch (Exception e) { + s_logger.debug("Failed to log in to Netscaler device at " + _ip + " due to " + e.getMessage()); + } + return false; + } + + private boolean enableNetScalerLoadBalancing() { + try { + String[] feature = new String[1]; + feature[0] = "LB"; + nsService.enable_features(feature); + return true; + } catch (nitro_exception e) { + System.out.println("Enabling netscaler load balancing feature failed errorcode="+e.getErrorCode()+",message="+ e.getMessage()); + } catch (Exception e) { + System.out.println("Enabling netscaler load balancing feature failed due to "+e.getMessage()); + } + return false; + } + + @Override + public StartupCommand[] initialize() { + StartupExternalLoadBalancerCommand cmd = new StartupExternalLoadBalancerCommand(); + cmd.setName(_name); + cmd.setDataCenter(_zoneId); + cmd.setPod(""); + cmd.setPrivateIpAddress(_ip); + cmd.setStorageIpAddress(""); + cmd.setVersion(""); + cmd.setGuid(_guid); + return new StartupCommand[]{cmd}; + } + + @Override + public Answer executeRequest(Command cmd) { + return executeRequest(cmd, _numRetries); + } + + private Answer executeRequest(Command cmd, int numRetries) { + if (cmd instanceof ReadyCommand) { + return execute((ReadyCommand) cmd); + } else if (cmd instanceof MaintainCommand) { + return execute((MaintainCommand) cmd); + } else if (cmd instanceof IpAssocCommand) { + return execute((IpAssocCommand) cmd, numRetries); + } else if (cmd instanceof LoadBalancerConfigCommand) { + return execute((LoadBalancerConfigCommand) cmd, numRetries); + } else if (cmd instanceof ExternalNetworkResourceUsageCommand) { + return execute((ExternalNetworkResourceUsageCommand) cmd); + } else if (cmd instanceof MaintainCommand) { + return execute((MaintainCommand) cmd); + } else { + return Answer.createUnsupportedCommandAnswer(cmd); + } + } + + private Answer execute(ReadyCommand cmd) { + return new ReadyAnswer(cmd); + } + + protected Answer execute(MaintainCommand cmd) { + if (s_logger.isInfoEnabled()) { + s_logger.info("Executing resource MaintainCommand"); + } + return new MaintainAnswer(cmd, "Put host in maintaince"); + } + + private synchronized Answer execute(IpAssocCommand cmd, int numRetries) { + String[] results = new String[cmd.getIpAddresses().length]; + int i = 0; + try { + IpAddressTO[] ips = cmd.getIpAddresses(); + for (IpAddressTO ip : ips) { + long guestVlanTag = Long.valueOf(ip.getVlanId()); + String vlanSelfIp = ip.getVlanGateway(); + String vlanNetmask = ip.getVlanNetmask(); + + // Check and delete any existing guest VLAN with this tag, self IP, and netmask + deleteGuestVlan(guestVlanTag, vlanSelfIp, vlanNetmask); + + if (ip.isAdd()) { + // Add a new guest VLAN and its subnet and bind it to private interface + addGuestVlanAndSubnet(guestVlanTag, vlanSelfIp, vlanNetmask); + } + + saveConfiguration(); + results[i++] = ip.getPublicIp() + " - success"; + } + } catch (ExecutionException e) { + s_logger.error("Failed to execute IPAssocCommand due to " + e); + + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + results[i++] = IpAssocAnswer.errorResult; + } + } + + return new IpAssocAnswer(cmd, results); + } + + private synchronized Answer execute(LoadBalancerConfigCommand cmd, int numRetries) { + try { + String lbProtocol; + String lbMethod; + LoadBalancerTO[] loadBalancers = cmd.getLoadBalancers(); + + for (LoadBalancerTO loadBalancer : loadBalancers) { + + if (loadBalancer.getProtocol() == null) { + lbProtocol = "TCP"; + } else if (loadBalancer.getProtocol().equals(NetUtils.TCP_PROTO)){ + lbProtocol = "TCP"; + } else if (loadBalancer.getProtocol().equals(NetUtils.UDP_PROTO)) { + lbProtocol = "UDP"; + } else { + throw new ExecutionException("Got invalid protocol: " + loadBalancer.getProtocol()); + } + + if (loadBalancer.getAlgorithm().equals("roundrobin")) { + lbMethod = "ROUNDROBIN"; + } else if (loadBalancer.getAlgorithm().equals("leastconn")) { + lbMethod = "LEASTCONNECTION"; + } else { + throw new ExecutionException("Got invalid load balancing algorithm: " + loadBalancer.getAlgorithm()); + } + + String srcIp = loadBalancer.getSrcIp(); + int srcPort = loadBalancer.getSrcPort(); + String nsVirtualServerName = generateNSVirtualServerName(srcIp, srcPort, lbProtocol); + + boolean destinationsToAdd = false; + for (DestinationTO destination : loadBalancer.getDestinations()) { + if (!destination.isRevoked()) { + destinationsToAdd = true; + break; + } + } + + if (!loadBalancer.isRevoked() && destinationsToAdd) { + + // create a load balancing virtual server + addLBVirtualServer(nsVirtualServerName, srcIp, srcPort, lbMethod, lbProtocol); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Created load balancing virtual server " + nsVirtualServerName + " on the Netscaler device"); + } + + List activePoolMembers = new ArrayList(); + for (DestinationTO destination : loadBalancer.getDestinations()) { + + String nsServerName = generateNSServerName(destination.getDestIp()); + String nsServiceName = generateNSServiceName(destination.getDestIp(), destination.getDestPort()); + + if (!destination.isRevoked()) { + // add a new destination to deployed load balancing rule + + // add a new server + if (!nsServerExists(nsServerName)) { + com.citrix.netscaler.nitro.resource.config.basic.server nsServer = new com.citrix.netscaler.nitro.resource.config.basic.server(); + nsServer.set_name(nsServerName); + nsServer.set_ipaddress(destination.getDestIp()); + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.server.add(nsService, nsServer); + if ((apiCallResult.errorcode != 0) && (apiCallResult.errorcode != NitroError.NS_RESOURCE_EXISTS)) { + throw new ExecutionException("Failed to add server " + destination.getDestIp() + " due to" + apiCallResult.message); + } + } + + // create a new service using the server added + if (!nsServiceExists(nsServiceName)) { + com.citrix.netscaler.nitro.resource.config.basic.service newService = new com.citrix.netscaler.nitro.resource.config.basic.service(); + newService.set_name(nsServiceName); + newService.set_port(destination.getDestPort()); + newService.set_servername(nsServerName); + newService.set_state("ENABLED"); + newService.set_servicetype(lbProtocol); + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.service.add(nsService, newService); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to create service " + nsServiceName + " using server " + nsServerName + " due to" + apiCallResult.message); + } + } + + //bind service to load balancing virtual server + if (!nsServiceBindingExists(nsVirtualServerName, nsServiceName)) { + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding svcBinding = new com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding(); + svcBinding.set_name(nsVirtualServerName); + svcBinding.set_servicename(nsServiceName); + apiCallResult = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.add(nsService, svcBinding); + + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to bind service: " + nsServiceName + " to the lb virtual server: " + nsVirtualServerName + " on Netscaler device"); + } + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Successfully added LB destination: " + destination.getDestIp() + ":" + destination.getDestPort() + " to load balancer " + srcIp + ":" + srcPort); + } + } else { + // remove a destination from the deployed load balancing rule + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding[] serviceBindings = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.get(nsService, nsVirtualServerName); + if (serviceBindings != null) { + for (com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding binding : serviceBindings) { + if (nsServiceName.equalsIgnoreCase(binding.get_servicename())) { + // delete the binding + apiCallResult = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.delete(nsService, binding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to delete the binding between the virtual server: " + nsVirtualServerName + " and service:" + nsServiceName); + } + + // delete the service + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.service.delete(nsService, nsServiceName); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to delete service: " + nsServiceName); + } + + // delete the server if there is no associated services + server_service_binding[] services = server_service_binding.get(nsService, nsServerName); + if ((services == null) || (services.length == 0)) { + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.server.delete(nsService, nsServerName); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove server:" + nsServerName); + } + } + } + } + } + } + } + } else { + // delete the implemented load balancing rule and its destinations + lbvserver lbserver = lbvserver.get(nsService, nsVirtualServerName); + if (lbserver == null) { + throw new ExecutionException("Failed to find virtual server with name:" + nsVirtualServerName); + } + //unbind the all services associated with this virtual server + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding[] serviceBindings = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.get(nsService, nsVirtualServerName); + + if (serviceBindings != null) { + for (com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding binding : serviceBindings) { + String serviceName = binding.get_servicename(); + apiCallResult = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.delete(nsService, binding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to unbind servic from the lb virtual server: " + nsVirtualServerName); + } + + com.citrix.netscaler.nitro.resource.config.basic.service svc = com.citrix.netscaler.nitro.resource.config.basic.service.get(nsService, serviceName); + String nsServerName = svc.get_servername(); + + // delete the service + com.citrix.netscaler.nitro.resource.config.basic.service.delete(nsService, serviceName); + + //delete the server if no more services attached + server_service_binding[] services = server_service_binding.get(nsService, nsServerName); + if ((services == null) || (services.length == 0)) { + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.server.delete(nsService, nsServerName); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove server:" + nsServerName); + } + } + } + } + removeLBVirtualServer(nsVirtualServerName); + } + } + + if (s_logger.isInfoEnabled()) { + s_logger.info("Successfully executed resource LoadBalancerConfigCommand: " + _gson.toJson(cmd)); + } + + saveConfiguration(); + return new Answer(cmd); + } catch (ExecutionException e) { + s_logger.error("Failed to execute LoadBalancerConfigCommand due to " + e.getMessage()); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new Answer(cmd, e); + } + } catch (Exception e) { + s_logger.error("Failed to execute LoadBalancerConfigCommand due to " + e.getMessage()); + if (shouldRetry(numRetries)) { + return retry(cmd, numRetries); + } else { + return new Answer(cmd, e); + } + } + } + + private synchronized ExternalNetworkResourceUsageAnswer execute(ExternalNetworkResourceUsageCommand cmd) { + try { + return getPublicIpBytesSentAndReceived(cmd); + } catch (ExecutionException e) { + return new ExternalNetworkResourceUsageAnswer(cmd, e); + } + } + + private void addGuestVlanAndSubnet(long vlanTag, String vlanSelfIp, String vlanNetmask) throws ExecutionException { + org.apache.axis.types.UnsignedInt result; + + try { + String vlanName = generateVlanName(vlanTag); + if (!nsVlanExists(vlanTag)) { + // add new vlan + vlan vlanObj = new vlan(); + vlanObj.set_id(vlanTag); + apiCallResult = vlan.add(nsService, vlanObj); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to add new vlan with tag:" + vlanTag + "due to" + apiCallResult.message); + } + + // add self-ip and subnet to the Netscaler + nsip selfIp = new nsip(); + selfIp.set_ipaddress(vlanSelfIp); + selfIp.set_netmask(vlanNetmask); + selfIp.set_type("SNIP"); + apiCallResult = nsip.add(nsService, selfIp); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to add new self-ip due to "+ apiCallResult.message); + } + + //bind the vlan to guest subnet + vlan_nsip_binding ipVlanBinding = new vlan_nsip_binding(); + ipVlanBinding.set_id(vlanTag); + ipVlanBinding.set_ipaddress(vlanSelfIp); + ipVlanBinding.set_netmask(vlanNetmask); + apiCallResult = vlan_nsip_binding.add(nsService, ipVlanBinding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to bind vlan with tag:" + vlanTag + " to the subnet due to" + apiCallResult.message); + } + + // bind vlan to the private interface + vlan_interface_binding vlanBinding = new vlan_interface_binding(); + vlanBinding.set_ifnum(_privateInterface); + vlanBinding.set_tagged(true); + vlanBinding.set_id(vlanTag); + apiCallResult = vlan_interface_binding.add(nsService, vlanBinding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to bind vlan with tag:" + vlanTag + " with the interface " + _privateInterface + " due to " + apiCallResult.message); + } + } else { + throw new ExecutionException("Failed to configure Netscaler device for vlan with tag " + vlanTag + " as vlan already exisits"); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to implement guest network on the Netscaler device"); + } catch (Exception e) { + throw new ExecutionException("Failed to implement guest network on the Netscaler device"); + } + } + + private void deleteGuestVlan(long vlanTag, String vlanSelfIp, String vlanNetmask) throws ExecutionException { + org.apache.axis.types.UnsignedInt result; + + try { + if (nsVlanExists(vlanTag)) { + + // Delete all servers and associated services from this guest VLAN + deleteServersInGuestVlan(vlanTag, vlanSelfIp, vlanNetmask); + + // unbind vlan to the private interface + vlan_interface_binding vlanIfBinding = new vlan_interface_binding(); + vlanIfBinding.set_id(vlanTag); + vlanIfBinding.set_ifnum(_privateInterface); + vlanIfBinding.set_tagged(true); + apiCallResult = vlan_interface_binding.delete(nsService, vlanIfBinding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to unbind vlan:" + vlanTag + " with the private interface due to " + apiCallResult.message); + } + + //unbind the vlan to subnet + vlan_nsip_binding vlanSnipBinding = new vlan_nsip_binding(); + vlanSnipBinding.set_netmask(vlanNetmask); + vlanSnipBinding.set_ipaddress(vlanSelfIp); + vlanSnipBinding.set_id(vlanTag); + apiCallResult = vlan_nsip_binding.delete(nsService, vlanSnipBinding); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to unbind vlan:" + vlanTag + " with the subnet due to " + apiCallResult.message); + } + + // remove subnet IP + nsip subnetIp = nsip.get(nsService, vlanSelfIp); + apiCallResult = nsip.delete(nsService, subnetIp); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove subnet ip:" + vlanTag + " to the subnet due to" + apiCallResult.message); + } + + // remove vlan + apiCallResult = com.citrix.netscaler.nitro.resource.config.network.vlan.delete(nsService, vlanTag); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove vlan with tag:" + vlanTag + "due to" + apiCallResult.message); + } + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to delete guest vlan network on the Netscaler device"); + } catch (Exception e) { + s_logger.error(e); + throw new ExecutionException(e.getMessage()); + } + } + + private boolean nsVlanExists(long vlanTag) {// throws ExecutionException { + try { + if (vlan.get(nsService, new Long(vlanTag)) != null) { + return true; + } + return false; + } catch (Exception e) { + return false; + } + } + + private boolean nsServerExists(String serverName) throws ExecutionException { + try { + if (com.citrix.netscaler.nitro.resource.config.basic.server.get(nsService, serverName) != null) { + return true; + } else { + return false; + } + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_RESOURCE_NOT_EXISTS) { + return false; + } else { + throw new ExecutionException(e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException(e.getMessage()); + } + } + + private boolean nsServiceExists(String serviceName) throws ExecutionException { + try { + if (com.citrix.netscaler.nitro.resource.config.basic.service.get(nsService, serviceName) != null) { + return true; + } else { + return false; + } + } catch (nitro_exception e) { + if (e.getErrorCode() == NitroError.NS_NO_SERIVCE) { + return false; + } else { + throw new ExecutionException(e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException(e.getMessage()); + } + } + + private boolean nsServiceBindingExists(String lbVirtualServer, String serviceName) throws ExecutionException { + try { + com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding[] serviceBindings = com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.get(nsService, lbVirtualServer); + if (serviceBindings != null) { + for (com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding binding : serviceBindings) { + if (serviceName.equalsIgnoreCase(binding.get_servicename())) { + return true; + } + } + } + return false; + } catch (nitro_exception e) { + throw new ExecutionException(e.getMessage()); + } catch (Exception e) { + throw new ExecutionException(e.getMessage()); + } + } + + private void deleteServersInGuestVlan(long vlanTag, String vlanSelfIp, String vlanNetmask) throws ExecutionException { + try { + com.citrix.netscaler.nitro.resource.config.basic.server[] serverList = com.citrix.netscaler.nitro.resource.config.basic.server.get(nsService); + + if (serverList == null) { + return; + } + + // remove the server and services associated with guest vlan + for (com.citrix.netscaler.nitro.resource.config.basic.server server : serverList) { + // check if server belong to same subnet as one associated with vlan + if (NetUtils.sameSubnet(vlanSelfIp, server.get_ipaddress(), vlanNetmask)) { + // first remove services associated with this server + com.citrix.netscaler.nitro.resource.config.basic.service serveicesList[] = com.citrix.netscaler.nitro.resource.config.basic.service.get(nsService); + if (serveicesList != null) { + for (com.citrix.netscaler.nitro.resource.config.basic.service svc : serveicesList) { + if (svc.get_servername().equals(server.get_ipaddress())) { + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.service.delete(nsService, svc.get_name()); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove service:" + svc.get_name()); + } + } + } + } + // remove the server + apiCallResult = com.citrix.netscaler.nitro.resource.config.basic.server.delete(nsService, server.get_name()); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove server:" + server.get_name()); + } + } + } + } catch (Exception e) { + throw new ExecutionException("Failed to delete server and services in the guest vlan:" + vlanTag + " on the Netscaler device due to: "+ e.getMessage()); + } + } + + private void addLBVirtualServer(String virtualServerName, String srcIp, int srcPort, String lbMethod, String lbProtocol) throws ExecutionException { + try { + lbvserver vserver = new lbvserver(); + vserver.set_name(virtualServerName); + vserver.set_ipv46(srcIp); + vserver.set_port(srcPort); + vserver.set_servicetype(lbProtocol); + vserver.set_lbmethod(lbMethod); + apiCallResult = lbvserver.add(nsService,vserver); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to create new virtual server:" + virtualServerName); + } + } catch (nitro_exception e) { + if (e.getErrorCode() != NitroError.NS_RESOURCE_EXISTS) { + throw new ExecutionException("Failed to create new virtual server:" + virtualServerName + " due to " + e.getMessage()); + } + } catch (Exception e) { + throw new ExecutionException("Failed to create new virtual server:" + virtualServerName + " due to " + e.getMessage()); + } + } + + private void removeLBVirtualServer (String virtualServerName) throws ExecutionException { + try { + lbvserver vserver = lbvserver.get(nsService, virtualServerName); + if (vserver == null) { + throw new ExecutionException("Failed to find virtual server with name:" + virtualServerName); + } + apiCallResult = lbvserver.delete(nsService, vserver); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Failed to remove virtual server:" + virtualServerName); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed remove virtual server:" + virtualServerName +" due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed remove virtual server:" + virtualServerName +" due to " + e.getMessage()); + } + } + + private void saveConfiguration() throws ExecutionException { + try { + apiCallResult = nsconfig.save(nsService); + if (apiCallResult.errorcode != 0) { + throw new ExecutionException("Error occured while saving configuration changes to Netscaler device due to error:" + apiCallResult.errorcode); + } + } catch (nitro_exception e) { + throw new ExecutionException("Failed to save configuration changes to Netscaler device due to " + e.getMessage()); + } catch (Exception e) { + throw new ExecutionException("Failed to save configuration changes to Netscaler device due to " + e.getMessage()); + } + } + + private ExternalNetworkResourceUsageAnswer getPublicIpBytesSentAndReceived(ExternalNetworkResourceUsageCommand cmd) throws ExecutionException { + ExternalNetworkResourceUsageAnswer answer = new ExternalNetworkResourceUsageAnswer(cmd); + + try { + //TODO: add the stats collection + } catch (Exception e) { + s_logger.error(e); + throw new ExecutionException(e.getMessage()); + } + + return answer; + } + + private Answer retry(Command cmd, int numRetries) { + int numRetriesRemaining = numRetries - 1; + s_logger.error("Retrying " + cmd.getClass().getSimpleName() + ". Number of retries remaining: " + numRetriesRemaining); + return executeRequest(cmd, numRetriesRemaining); + } + + private boolean shouldRetry(int numRetries) { + return (numRetries > 0 && login()); + } + + private String generateVlanName(long vlanTag) { + return genObjectName("cloud-vlan", String.valueOf(vlanTag)); + } + + private String generateNSVirtualServerName(String srcIp, long srcPort, String protocol) { + return genObjectName("cloud-VirtualServer", protocol, srcIp, srcPort); + } + + private String generateNSServerName(String serverIP) { + return genObjectName("cloud-server", serverIP); + } + + private String generateNSServiceName(String ip, long port) { + return genObjectName("cloud-Service", ip, port); + } + + private String genObjectName(Object... args) { + String objectName = ""; + for (int i = 0; i < args.length; i++) { + objectName += args[i]; + if (i != args.length -1) { + objectName += _objectNamePathSep; + } + } + return objectName; + } + + @Override + public IAgentControl getAgentControl() { + return null; + } + + @Override + public PingCommand getCurrentStatus(long id) { + return new PingCommand(Host.Type.ExternalLoadBalancer, id); + } + + @Override + public Type getType() { + return Host.Type.ExternalLoadBalancer; + } + + @Override + public void setAgentControl(IAgentControl agentControl) { + return; + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public void disconnected() { + return; + } +} diff --git a/core/src/com/cloud/vm/UserVmVO.java b/core/src/com/cloud/vm/UserVmVO.java index 953bedcf43a..8bb2c1fd4fd 100755 --- a/core/src/com/cloud/vm/UserVmVO.java +++ b/core/src/com/cloud/vm/UserVmVO.java @@ -47,9 +47,6 @@ public class UserVmVO extends VMInstanceVO implements UserVm { transient String password; - @Transient - Map details; - @Override public String getPassword() { return password; @@ -119,11 +116,6 @@ public class UserVmVO extends VMInstanceVO implements UserVm { this.displayName = displayName; } - @Override - public Map getDetails() { - return details; - } - @Override public String getDetail(String name) { assert (details != null) : "Did you forget to load the details?"; @@ -131,18 +123,7 @@ public class UserVmVO extends VMInstanceVO implements UserVm { return details != null ? details.get(name) : null; } - public void setDetail(String name, String value) { - assert (details != null) : "Did you forget to load the details?"; - - details.put(name, value); - } - - public void setDetails(Map details) { - this.details = details; - } - public void setAccountId(long accountId){ this.accountId = accountId; } - } diff --git a/core/src/com/cloud/vm/VMInstanceVO.java b/core/src/com/cloud/vm/VMInstanceVO.java index 60b00a1b2fc..df02d666230 100644 --- a/core/src/com/cloud/vm/VMInstanceVO.java +++ b/core/src/com/cloud/vm/VMInstanceVO.java @@ -35,6 +35,7 @@ import javax.persistence.Table; import javax.persistence.TableGenerator; import javax.persistence.Temporal; import javax.persistence.TemporalType; +import javax.persistence.Transient; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.GenericDao; @@ -140,6 +141,9 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject details; public VMInstanceVO(long id, long serviceOfferingId, @@ -396,9 +400,19 @@ public class VMInstanceVO implements VirtualMachine, FiniteStateObject getDetails() { - return null; - } + public Map getDetails() { + return details; + } + + public void setDetail(String name, String value) { + assert (details != null) : "Did you forget to load the details?"; + + details.put(name, value); + } + + public void setDetails(Map details) { + this.details = details; + } transient String toString; @Override diff --git a/core/src/com/cloud/vm/VmDetailConstants.java b/core/src/com/cloud/vm/VmDetailConstants.java new file mode 100644 index 00000000000..dd819975f96 --- /dev/null +++ b/core/src/com/cloud/vm/VmDetailConstants.java @@ -0,0 +1,23 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.vm; + +public interface VmDetailConstants { + public static final String KEYBOARD = "keyboard"; + public static final String NIC_ADAPTER = "nic_adapter"; +} diff --git a/deps/.classpath b/deps/.classpath index 3d085366b8d..662ae5099eb 100755 --- a/deps/.classpath +++ b/deps/.classpath @@ -48,12 +48,11 @@ - - - + + diff --git a/deps/cloud-commons-httpclient-3.1.jar b/deps/cloud-commons-httpclient-3.1.jar new file mode 100644 index 00000000000..7c59774aed4 Binary files /dev/null and b/deps/cloud-commons-httpclient-3.1.jar differ diff --git a/deps/cloud-httpclient-4.1.2.jar b/deps/cloud-httpclient-4.1.2.jar deleted file mode 100644 index b3cdb4cdc8c..00000000000 Binary files a/deps/cloud-httpclient-4.1.2.jar and /dev/null differ diff --git a/deps/cloud-netscaler.jar b/deps/cloud-netscaler.jar new file mode 100644 index 00000000000..8c1bf8d3fc6 Binary files /dev/null and b/deps/cloud-netscaler.jar differ diff --git a/ovm/.classpath b/ovm/.classpath new file mode 100644 index 00000000000..45bd29e131f --- /dev/null +++ b/ovm/.classpath @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/ovm/.project b/ovm/.project new file mode 100755 index 00000000000..4b9d34562c4 --- /dev/null +++ b/ovm/.project @@ -0,0 +1,23 @@ + + + ovm + + + + + + org.python.pydev.PyDevBuilder + + + + + org.eclipse.jdt.core.javabuilder + + + + + + org.eclipse.jdt.core.javanature + org.python.pydev.pythonNature + + diff --git a/ovm/scripts/vm/hypervisor/ovm/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch b/ovm/scripts/vm/hypervisor/ovm/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch new file mode 100644 index 00000000000..7c41e555147 --- /dev/null +++ b/ovm/scripts/vm/hypervisor/ovm/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch @@ -0,0 +1,13 @@ +diff --git a/OVSXUtility.py b/OVSXUtility.py +index 4a98bc8..1053ef7 100644 +--- a/OVSXUtility.py ++++ b/OVSXUtility.py +@@ -160,7 +160,7 @@ def get_storage_repos(): + l = parse_storage_repos() + for sr in l: + d = get_storage_free_space(sr) +- if d: ++ if len(d) > 0: + d_repos_space[sr] = d[sr] + + if not d_repos_space: diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmCommonModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmCommonModule.py index 9e94da8dade..da51e184283 100755 --- a/ovm/scripts/vm/hypervisor/ovm/OvmCommonModule.py +++ b/ovm/scripts/vm/hypervisor/ovm/OvmCommonModule.py @@ -23,6 +23,7 @@ HEARTBEAT_TIMESTAMP_FORMAT='%s' HEARTBEAT_TIMESTAMP_PATTERN='(\\d+.\d+<\/timestamp\>)' HEARTBEAT_DIR='heart_beat' ETC_HOSTS='/etc/hosts' +HOSTNAME_FILE='/etc/sysconfig/network' logger = OvmLogger('OvmCommon') diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py old mode 100644 new mode 100755 index 6c4708aee34..983c50593f1 --- a/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py +++ b/ovm/scripts/vm/hypervisor/ovm/OvmHostModule.py @@ -63,7 +63,9 @@ class OvmHost(OvmObject): return vmPath def _vmNameToPath(self, vmName): - return successToMap(xen_get_vm_path(vmName))['path'] + # the xen_get_vm_path always sucks!!! + #return successToMap((vmName))['path'] + return self._getVmPathFromPrimaryStorage(vmName) def _getAllDomains(self): stdout = timeout_command(["xm", "list"]) diff --git a/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py b/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py index 22eb9ad908e..6684e5b22ec 100755 --- a/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py +++ b/ovm/scripts/vm/hypervisor/ovm/OvmStoragePoolModule.py @@ -2,6 +2,7 @@ from OvmCommonModule import * from OVSSiteSR import sp_create, sr_create, sr_do from OVSParser import parse_ocfs2_cluster_conf from OVSXCluster import clusterm_set_ocfs2_cluster_conf, clusterm_start_o2cb_service +from OVSSiteRMServer import get_master_ip import re class OvmStoragePoolDecoder(json.JSONDecoder): @@ -241,6 +242,33 @@ class OvmStoragePool(OvmObject): fd = open(ETC_HOSTS, "w") fd.write(orignalConf) fd.close() + + def configureHostName(nodes): + myIp = successToMap(get_master_ip())['ip'] + nodeName = None + for n in nodes: + if myIp == n["ip_address"]: + nodeName = n["name"] + break + + if nodeName == None: raise Exception("Cannot find node equals to my ip address:%s"%myIp) + if not exists(HOSTNAME_FILE): + originalConf = "" + else: + fd = open(HOSTNAME_FILE, "r") + originalConf = fd.read() + fd.close() + + pattern = r"HOSTNAME=(.*)" + # remove any old hostname + originalConf = re.sub(pattern, "", originalConf) + # remove extra empty lines + originalConf = re.sub(r"\n\s*\n*", "\n", originalConf) + "\n" + "HOSTNAME=%s"%nodeName + logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure %s:%s\n"%(HOSTNAME_FILE,originalConf)) + fd = open(HOSTNAME_FILE, "w") + fd.write(originalConf) + fd.close() + doCmd(['hostname', nodeName]) try: nodeString = nodeString.strip(";") @@ -273,10 +301,12 @@ class OvmStoragePool(OvmObject): lines.append("\tname = %s\n" % clusterName) lines.append("\n") conf = "".join(lines) + + configureHostName(nodes) + configureEtcHosts(nodes) clusterm_set_ocfs2_cluster_conf(conf) clusterm_start_o2cb_service() logger.debug(OvmStoragePool.prepareOCFS2Nodes, "Configure cluster.conf to:\n%s"%conf) - configureEtcHosts(nodes) rs = SUCC() return rs diff --git a/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh b/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh index 2ad45ead698..e291b90441d 100755 --- a/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh +++ b/ovm/scripts/vm/hypervisor/ovm/configureOvm.sh @@ -63,10 +63,13 @@ applyPatch() { } postSetup() { - openPortOnIptables 7777 tcp - openPortOnIptables 7777 udp + openPortOnIptables 7777 tcp # for OCFS2, maybe tcp only + openPortOnIptables 7777 udp + openPortOnIptables 3260 tcp # for ISCSI, maybe tcp only + openPortOnIptables 3260 udp applyPatch "/opt/ovs-agent-latest/OvmPatch.patch" 2 applyPatch "/opt/ovs-agent-latest/OvmDontTouchOCFS2ClusterWhenAgentStart.patch" 1 + applyPatch "/opt/ovs-agent-latest/Fixget_storage_reposExceptionDueToWrongReturnValueCheck.patch" 1 stopHeartbeat diff --git a/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java b/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java index f3c1bfebc9b..6105edec51b 100755 --- a/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -216,12 +216,16 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { /* set to false so each time ModifyStoragePoolCommand will re-setup heartbeat*/ _isHeartBeat = false; + /* try { _canBridgeFirewall = canBridgeFirewall(); } catch (XmlRpcException e) { s_logger.error("Failed to detect whether the host supports security groups.", e); _canBridgeFirewall = false; } + */ + + _canBridgeFirewall = false; s_logger.debug(_canBridgeFirewall ? "OVM host supports security groups." : "OVM host doesn't support security groups."); diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/patches/systemvm/debian/config/etc/init.d/cloud-early-config index 680875213d7..f604ba517cd 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/patches/systemvm/debian/config/etc/init.d/cloud-early-config @@ -225,6 +225,8 @@ setup_common() { hostname $NAME #Nameserver + sed -i -e "/^nameserver.*$/d" /etc/resolv.conf # remove previous entries + sed -i -e "/^nameserver.*$/d" /etc/dnsmasq-resolv.conf # remove previous entries if [ -n "$internalNS1" ] then echo "nameserver $internalNS1" > /etc/dnsmasq-resolv.conf diff --git a/python/lib/cloudutils/serviceConfig.py b/python/lib/cloudutils/serviceConfig.py index 1acee497968..f5a1ebbf654 100644 --- a/python/lib/cloudutils/serviceConfig.py +++ b/python/lib/cloudutils/serviceConfig.py @@ -587,6 +587,7 @@ class cloudAgentConfig(serviceCfgBase): cfo.save() self.syscfg.svo.stopService("cloud-agent") + bash("sleep 30") self.syscfg.svo.enableService("cloud-agent") return True except: diff --git a/python/lib/cloudutils/serviceConfigServer.py b/python/lib/cloudutils/serviceConfigServer.py index 9ed4eec67bb..f22da8637ce 100644 --- a/python/lib/cloudutils/serviceConfigServer.py +++ b/python/lib/cloudutils/serviceConfigServer.py @@ -61,6 +61,26 @@ class cloudManagementConfig(serviceCfgBase): cfo = configFileOps("/etc/cloud/management/tomcat6.conf", self) cfo.add_lines("JAVA_OPTS+=\" -Djavax.net.ssl.trustStore=%s \""%keyPath) + elif self.syscfg.env.svrMode == "HttpsServer": + if not os.path.exists("/etc/cloud/management/server-ssl.xml") or not os.path.exists("/etc/cloud/management/tomcat6-ssl.conf"): + raise CloudRuntimeException("Cannot find /etc/cloud/management/server-ssl.xml or /etc/cloud/management/tomcat6-ssl.conf, https enables failed") + if os.path.exists("/etc/cloud/management/server.xml"): + bash("rm -f /etc/cloud/management/server.xml") + if os.path.exists("/etc/cloud/management/tomcat6.conf"): + bash("rm -f /etc/cloud/management/tomcat6.conf") + bash("ln -s /etc/cloud/management/server-ssl.xml /etc/cloud/management/server.xml") + bash("ln -s /etc/cloud/management/tomcat6-ssl.conf /etc/cloud/management/tomcat6.conf") + if not bash("iptables-save |grep PREROUTING | grep 6443").isSuccess(): + bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 6443") + else: + if not os.path.exists("/etc/cloud/management/server-nonssl.xml") or not os.path.exists("/etc/cloud/management/tomcat6-nonssl.conf"): + raise CloudRuntimeException("Cannot find /etc/cloud/management/server-nonssl.xml or /etc/cloud/management/tomcat6-nonssl.conf, https enables failed") + if os.path.exists("/etc/cloud/management/server.xml"): + bash("rm -f /etc/cloud/management/server.xml") + if os.path.exists("/etc/cloud/management/tomcat6.conf"): + bash("rm -f /etc/cloud/management/tomcat6.conf") + bash("ln -s /etc/cloud/management/server-nonssl.xml /etc/cloud/management/server.xml") + bash("ln -s /etc/cloud/management/tomcat6-nonssl.conf /etc/cloud/management/tomcat6.conf") try: self.syscfg.svo.disableService("tomcat6") diff --git a/python/lib/cloudutils/syscfg.py b/python/lib/cloudutils/syscfg.py index c71d2edf734..801f83c6671 100644 --- a/python/lib/cloudutils/syscfg.py +++ b/python/lib/cloudutils/syscfg.py @@ -7,6 +7,8 @@ class sysConfigFactory: return sysConfigAgentFactory.getAgent(glbEnv) elif glbEnv.mode == "Server": return sysConfigServerFactory.getServer(glbEnv) + elif glbEnv.mode == "HttpsServer": + return sysConfigServerFactory.getServer(glbEnv) elif glbEnv.mode == "Db": return sysConfigDbFactory.getDb(glbEnv) else: @@ -69,6 +71,9 @@ class sysConfig(object): return True class sysConfigAgent(sysConfig): + def __init__(self, env): + super(sysConfigServer, self).__init__(env) + def check(self): if self.env.debug: return True diff --git a/python/lib/cloudutils/utilities.py b/python/lib/cloudutils/utilities.py index d9f47e46088..17c81573607 100644 --- a/python/lib/cloudutils/utilities.py +++ b/python/lib/cloudutils/utilities.py @@ -88,11 +88,9 @@ class Distribution: if os.path.exists("/etc/fedora-release"): self.distro = "Fedora" - elif os.path.exists("/etc/centos-release"): - self.distro = "CentOS" elif os.path.exists("/etc/redhat-release"): version = file("/etc/redhat-release").readline() - if version.find("Red Hat Enterprise Linux Server release 6") != -1 or version.find("Scientific Linux release 6") != -1 or version.find("CentOS release 6") != -1: + if version.find("Red Hat Enterprise Linux Server release 6") != -1 or version.find("Scientific Linux release 6") != -1 or version.find("CentOS Linux release 6") != -1: self.distro = "RHEL6" elif version.find("CentOS release") != -1: self.distro = "CentOS" diff --git a/scripts/storage/qcow2/createtmplt.sh b/scripts/storage/qcow2/createtmplt.sh index e5f5d96277a..3674856acac 100755 --- a/scripts/storage/qcow2/createtmplt.sh +++ b/scripts/storage/qcow2/createtmplt.sh @@ -101,9 +101,7 @@ create_from_file() { local tmpltimg=$2 local tmpltname=$3 - - #copy the file to the disk - cp $tmpltimg /$tmpltfs/$tmpltname + $qemu_img convert -f qcow2 -O qcow2 $tmpltimg /$tmpltfs/$tmpltname >& /dev/null if [ "$cleanup" == "true" ] then diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index fa117dd847b..28779fe4176 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -889,7 +889,7 @@ def cleanup_rules(session, args): instance = 'VM' try: - chainscmd = "iptables-save | grep '^:' | awk '{print $1}' | cut -d':' -f2 | sed 's/-def/-%s/'|sort|uniq" % instance + chainscmd = "iptables-save | grep '^:' | awk '{print $1}' | cut -d':' -f2 | sed 's/-def/-%s/' | sed 's/-egress//' |sort|uniq" % instance chains = util.pread2(['/bin/bash', '-c', chainscmd]).split('\n') cleaned = 0 cleanup = [] @@ -897,9 +897,11 @@ def cleanup_rules(session, args): if 1 in [ chain.startswith(c) for c in ['r-', 'i-', 's-', 'v-', 'l-'] ]: vm = session.xenapi.VM.get_by_name_label(chain) if len(vm) != 1: - util.SMlog("chain " + chain + " does not correspond to a vm, cleaning up") - cleanup.append(chain) - continue + vm = session.xenapi.VM.get_by_name_label(chain + "-untagged") + if len(vm) != 1: + util.SMlog("chain " + chain + " does not correspond to a vm, cleaning up") + cleanup.append(chain) + continue vm_rec = session.xenapi.VM.get_record(vm[0]) state = vm_rec.get('power_state') if state != 'Running' and state != 'Paused': @@ -1053,14 +1055,23 @@ def network_rules(session, args): util.SMlog(" failed to create ipset for rule " + str(tokens)) if protocol == 'all': - iptables = ['iptables', '-I', vmchain, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] + if type == 'egress': + iptables = ['iptables', '-I', vmchain, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'dst', '-j', 'ACCEPT'] + else: + iptables = ['iptables', '-I', vmchain, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] elif protocol != 'icmp': - iptables = ['iptables', '-I', vmchain, '-p', protocol, '-m', protocol, '--dport', range, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] + if type == 'egress': + iptables = ['iptables', '-I', vmchain, '-p', protocol, '-m', protocol, '--dport', range, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'dst', '-j', 'ACCEPT'] + else: + iptables = ['iptables', '-I', vmchain, '-p', protocol, '-m', protocol, '--dport', range, '-m', 'state', '--state', 'NEW', '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] else: range = start + "/" + end if start == "-1": range = "any" - iptables = ['iptables', '-I', vmchain, '-p', 'icmp', '--icmp-type', range, '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] + if type == 'egress': + iptables = ['iptables', '-I', vmchain, '-p', 'icmp', '--icmp-type', range, '-m', 'set', '--match-set', ipsetname, 'dst', '-j', 'ACCEPT'] + else: + iptables = ['iptables', '-I', vmchain, '-p', 'icmp', '--icmp-type', range, '-m', 'set', '--match-set', ipsetname, 'src', '-j', 'ACCEPT'] util.pread2(iptables) util.SMlog(iptables) diff --git a/scripts/vm/network/security_group.py b/scripts/vm/network/security_group.py index de59df698c6..e91fe45174a 100755 --- a/scripts/vm/network/security_group.py +++ b/scripts/vm/network/security_group.py @@ -82,6 +82,7 @@ def ipset(ipsetname, proto, start, end, ips): def destroy_network_rules_for_vm(vm_name, vif=None): vmchain = vm_name + vmchain_egress = vm_name + "-egress" vmchain_default = None delete_rules_for_vm_in_bridge_firewall_chain(vm_name) @@ -111,7 +112,19 @@ def destroy_network_rules_for_vm(vm_name, vif=None): execute("iptables -X " + vmchain) except: logging.debug("Ignoring failure to delete chain " + vmchain) + + + try: + execute("iptables -F " + vmchain_egress) + except: + logging.debug("Ignoring failure to delete chain " + vmchain_egress) + try: + execute("iptables -X " + vmchain_egress) + except: + logging.debug("Ignoring failure to delete chain " + vmchain_egress) + + if vif is not None: try: dnats = execute("iptables -t nat -S | grep " + vif + " | sed 's/-A/-D/'").split("\n") @@ -246,6 +259,7 @@ def default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, brname): domID = getvmId(vm_name) delete_rules_for_vm_in_bridge_firewall_chain(vmName) vmchain = vm_name + vmchain_egress = vm_name +"-egress" vmchain_default = '-'.join(vmchain.split('-')[:-1]) + "-def" destroy_ebtables_rules(vmName, vif) @@ -254,7 +268,12 @@ def default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, brname): execute("iptables -N " + vmchain) except: execute("iptables -F " + vmchain) - + + try: + execute("iptables -N " + vmchain_egress) + except: + execute("iptables -F " + vmchain_egress) + try: execute("iptables -N " + vmchain_default) except: @@ -270,7 +289,7 @@ def default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, brname): #don't let vm spoof its ip address if vm_ip is not None: - execute("iptables -A " + vmchain_default + " -m physdev --physdev-is-bridged --physdev-in " + vif + " --source " + vm_ip + " -j ACCEPT") + execute("iptables -A " + vmchain_default + " -m physdev --physdev-is-bridged --physdev-in " + vif + " --source " + vm_ip + " -j " + vmchain_egress) execute("iptables -A " + vmchain_default + " -j " + vmchain) execute("iptables -A " + vmchain + " -j DROP") except: @@ -468,7 +487,7 @@ def cleanup_rules_for_dead_vms(): def cleanup_rules(): try: - chainscmd = "iptables-save | grep '^:' | grep -v '.*-def' | awk '{print $1}' | cut -d':' -f2" + chainscmd = "iptables-save | grep '^:' | grep -v '.*-def' | grep -v '.*-egress' | awk '{print $1}' | cut -d':' -f2" chains = execute(chainscmd).split('\n') cleaned = 0 cleanup = [] @@ -552,12 +571,17 @@ def remove_rule_log_for_vm(vmName): return result -def add_network_rules(vm_name, vm_id, vm_ip, signature, seqno, vmMac, rules, vif, brname): +def add_network_rules(vm_name, vm_id, vm_ip, signature, seqno, vmMac, rules, vif, brname,ruletype): try: vmName = vm_name domId = getvmId(vmName) - vmchain = vm_name + if ruletype == 'egress': + vmchain = vm_name + "-egress" + else: + vmchain = vm_name + + changes = [] changes = check_rule_log_for_vm(vmName, vm_id, vm_ip, domId, signature, seqno) @@ -596,16 +620,25 @@ def add_network_rules(vm_name, vm_id, vm_ip, signature, seqno, vmMac, rules, vif if ips: if protocol == 'all': for ip in ips: - execute("iptables -I " + vmchain + " -m state --state NEW -s " + ip + " -j ACCEPT") + if ruletype == 'egress': + execute("iptables -I " + vmchain + " -m state --state NEW -d " + ip + " -j ACCEPT") + else: + execute("iptables -I " + vmchain + " -m state --state NEW -s " + ip + " -j ACCEPT") elif protocol != 'icmp': for ip in ips: - execute("iptables -I " + vmchain + " -p " + protocol + " -m " + protocol + " --dport " + range + " -m state --state NEW -s " + ip + " -j ACCEPT") + if ruletype == 'egress': + execute("iptables -I " + vmchain + " -p " + protocol + " -m " + protocol + " --dport " + range + " -m state --state NEW -d " + ip + " -j ACCEPT") + else: + execute("iptables -I " + vmchain + " -p " + protocol + " -m " + protocol + " --dport " + range + " -m state --state NEW -s " + ip + " -j ACCEPT") else: range = start + "/" + end if start == "-1": range = "any" for ip in ips: - execute("iptables -I " + vmchain + " -p icmp --icmp-type " + range + " -s " + ip + " -j ACCEPT") + if ruletype == 'egress': + execute("iptables -I " + vmchain + " -p icmp --icmp-type " + range + " -d " + ip + " -j ACCEPT") + else: + execute("iptables -I " + vmchain + " -p icmp --icmp-type " + range + " -s " + ip + " -j ACCEPT") if allow_any and protocol != 'all': if protocol != 'icmp': @@ -704,6 +737,7 @@ if __name__ == '__main__': parser.add_option("--vmid", dest="vmID") parser.add_option("--vmmac", dest="vmMAC") parser.add_option("--vif", dest="vif") + parser.add_option("--ruletype", dest="ruletype") parser.add_option("--sig", dest="sig") parser.add_option("--seq", dest="seq") parser.add_option("--rules", dest="rules") @@ -724,7 +758,7 @@ if __name__ == '__main__': elif cmd == "get_rule_logs_for_vms": get_rule_logs_for_vms() elif cmd == "add_network_rules": - add_network_rules(option.vmName, option.vmID, option.vmIP, option.sig, option.seq, option.vmMAC, option.rules, option.vif, option.brname) + add_network_rules(option.vmName, option.vmID, option.vmIP, option.sig, option.seq, option.vmMAC, option.rules, option.vif, option.brname,option.ruletype) elif cmd == "cleanup_rules": cleanup_rules() elif cmd == "post_default_network_rules": diff --git a/server/src/com/cloud/agent/AgentManager.java b/server/src/com/cloud/agent/AgentManager.java index 0851eb7d8fc..838c9b56174 100755 --- a/server/src/com/cloud/agent/AgentManager.java +++ b/server/src/com/cloud/agent/AgentManager.java @@ -73,7 +73,6 @@ public interface AgentManager extends Manager { * command * @return an Answer */ - Answer send(Long hostId, Command cmd, int timeout) throws AgentUnavailableException, OperationTimedoutException; Answer send(Long hostId, Command cmd) throws AgentUnavailableException, OperationTimedoutException; @@ -245,8 +244,6 @@ public interface AgentManager extends Manager { public boolean reconnect(final long hostId) throws AgentUnavailableException; - Answer easySend(Long hostId, Command cmd, int timeout); - boolean isHostNativeHAEnabled(long hostId); Answer sendTo(Long dcId, HypervisorType type, Command cmd); diff --git a/server/src/com/cloud/agent/manager/AgentManagerImpl.java b/server/src/com/cloud/agent/manager/AgentManagerImpl.java index 1a54603d341..81f38687f69 100755 --- a/server/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -274,7 +274,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { _pingInterval = NumbersUtil.parseInt(value, 60); value = configs.get("wait"); - _wait = NumbersUtil.parseInt(value, 1800) * 1000; + _wait = NumbersUtil.parseInt(value, 1800); value = configs.get("alert.wait"); _alertWait = NumbersUtil.parseInt(value, 1800); @@ -797,10 +797,10 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } @Override - public Answer send(Long hostId, Command cmd, int timeout) throws AgentUnavailableException, OperationTimedoutException { + public Answer send(Long hostId, Command cmd) throws AgentUnavailableException, OperationTimedoutException { Commands cmds = new Commands(OnError.Stop); cmds.addCommand(cmd); - send(hostId, cmds, timeout); + send(hostId, cmds, cmd.getWait()); Answer[] answers = cmds.getAnswers(); if (answers != null && !(answers[0] instanceof UnsupportedAnswer)) { return answers[0]; @@ -856,21 +856,13 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { s_logger.debug("checking if agent (" + hostId + ") is alive"); } - try { - Request req = new Request(hostId, _nodeId, new CheckHealthCommand(), true); - req.setSequence(agent.getNextSequence()); - Answer[] answers = agent.send(req, 50 * 1000); - if (answers != null && answers[0] != null && answers[0].getResult()) { - Status status = Status.Up; - if (s_logger.isDebugEnabled()) { - s_logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); - } - return status; + Answer answer = easySend(hostId, new CheckHealthCommand()); + if (answer != null && answer.getResult()) { + Status status = Status.Up; + if (s_logger.isDebugEnabled()) { + s_logger.debug("agent (" + hostId + ") responded to checkHeathCommand, reporting that agent is " + status); } - } catch (AgentUnavailableException e) { - s_logger.debug("Agent is unavailable so we move on. Error: " + e.getMessage()); - } catch (OperationTimedoutException e) { - s_logger.debug("Timed Out " + e.getMessage()); + return status; } return _haMgr.investigate(hostId); @@ -1201,75 +1193,86 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { @SuppressWarnings("rawtypes") protected boolean loadDirectlyConnectedHost(HostVO host, boolean forRebalance) { - String resourceName = host.getResource(); + boolean initialized = false; ServerResource resource = null; - try { - Class clazz = Class.forName(resourceName); - Constructor constructor = clazz.getConstructor(); - resource = (ServerResource) constructor.newInstance(); - } catch (ClassNotFoundException e) { - s_logger.warn("Unable to find class " + host.getResource(), e); - return false; - } catch (InstantiationException e) { - s_logger.warn("Unablet to instantiate class " + host.getResource(), e); - return false; - } catch (IllegalAccessException e) { - s_logger.warn("Illegal access " + host.getResource(), e); - return false; - } catch (SecurityException e) { - s_logger.warn("Security error on " + host.getResource(), e); - return false; - } catch (NoSuchMethodException e) { - s_logger.warn("NoSuchMethodException error on " + host.getResource(), e); - return false; - } catch (IllegalArgumentException e) { - s_logger.warn("IllegalArgumentException error on " + host.getResource(), e); - return false; - } catch (InvocationTargetException e) { - s_logger.warn("InvocationTargetException error on " + host.getResource(), e); - return false; - } - - _hostDao.loadDetails(host); - - HashMap params = new HashMap(host.getDetails().size() + 5); - params.putAll(host.getDetails()); - - params.put("guid", host.getGuid()); - params.put("zone", Long.toString(host.getDataCenterId())); - if (host.getPodId() != null) { - params.put("pod", Long.toString(host.getPodId())); - } - if (host.getClusterId() != null) { - params.put("cluster", Long.toString(host.getClusterId())); - String guid = null; - ClusterVO cluster = _clusterDao.findById(host.getClusterId()); - if (cluster.getGuid() == null) { - guid = host.getDetail("pool"); - } else { - guid = cluster.getGuid(); - } - if (guid != null && !guid.isEmpty()) { - params.put("pool", guid); - } - } - - params.put("ipaddress", host.getPrivateIpAddress()); - params.put("secondary.storage.vm", "false"); - params.put("max.template.iso.size", _configDao.getValue("max.template.iso.size")); - - try { - resource.configure(host.getName(), params); - } catch (ConfigurationException e) { - e.printStackTrace(); - s_logger.warn("Unable to configure resource due to ", e); - return false; - } - - if (!resource.start()) { - s_logger.warn("Unable to start the resource"); - return false; - } + try { + String resourceName = host.getResource(); + try { + Class clazz = Class.forName(resourceName); + Constructor constructor = clazz.getConstructor(); + resource = (ServerResource) constructor.newInstance(); + } catch (ClassNotFoundException e) { + s_logger.warn("Unable to find class " + host.getResource(), e); + return false; + } catch (InstantiationException e) { + s_logger.warn("Unablet to instantiate class " + host.getResource(), e); + return false; + } catch (IllegalAccessException e) { + s_logger.warn("Illegal access " + host.getResource(), e); + return false; + } catch (SecurityException e) { + s_logger.warn("Security error on " + host.getResource(), e); + return false; + } catch (NoSuchMethodException e) { + s_logger.warn("NoSuchMethodException error on " + host.getResource(), e); + return false; + } catch (IllegalArgumentException e) { + s_logger.warn("IllegalArgumentException error on " + host.getResource(), e); + return false; + } catch (InvocationTargetException e) { + s_logger.warn("InvocationTargetException error on " + host.getResource(), e); + return false; + } + + _hostDao.loadDetails(host); + + HashMap params = new HashMap(host.getDetails().size() + 5); + params.putAll(host.getDetails()); + + params.put("guid", host.getGuid()); + params.put("zone", Long.toString(host.getDataCenterId())); + if (host.getPodId() != null) { + params.put("pod", Long.toString(host.getPodId())); + } + if (host.getClusterId() != null) { + params.put("cluster", Long.toString(host.getClusterId())); + String guid = null; + ClusterVO cluster = _clusterDao.findById(host.getClusterId()); + if (cluster.getGuid() == null) { + guid = host.getDetail("pool"); + } else { + guid = cluster.getGuid(); + } + if (guid != null && !guid.isEmpty()) { + params.put("pool", guid); + } + } + + params.put("ipaddress", host.getPrivateIpAddress()); + params.put("secondary.storage.vm", "false"); + params.put("max.template.iso.size", _configDao.getValue(Config.MaxTemplateAndIsoSize.toString())); + params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); + + try { + resource.configure(host.getName(), params); + } catch (ConfigurationException e) { + s_logger.warn("Unable to configure resource due to ", e); + return false; + } + + if (!resource.start()) { + s_logger.warn("Unable to start the resource"); + return false; + } + + initialized = true; + } finally { + if(!initialized) { + if (host != null) { + _hostDao.updateStatus(host, Event.AgentDisconnected, _nodeId); + } + } + } if (forRebalance) { AgentAttache attache = simulateStart(host.getId(), resource, host.getDetails(), false, null, null, true); @@ -1464,11 +1467,6 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { @Override public Answer easySend(final Long hostId, final Command cmd) { - return easySend(hostId, cmd, _wait); - } - - @Override - public Answer easySend(final Long hostId, final Command cmd, int timeout) { try { Host h = _hostDao.findById(hostId); if (h == null || h.getRemoved() != null) { @@ -1479,7 +1477,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { if (!status.equals(Status.Up) && !status.equals(Status.Connecting)) { return null; } - final Answer answer = send(hostId, cmd, timeout); + final Answer answer = send(hostId, cmd); if (answer == null) { s_logger.warn("send returns null answer"); return null; @@ -1503,14 +1501,18 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { } } - @Override - public Answer send(final Long hostId, final Command cmd) throws AgentUnavailableException, OperationTimedoutException { - return send(hostId, cmd, _wait); - } - @Override public Answer[] send(final Long hostId, Commands cmds) throws AgentUnavailableException, OperationTimedoutException { - return send(hostId, cmds, _wait); + int wait = 0; + for( Command cmd : cmds ) { + if ( cmd.getWait() > wait ) { + wait = cmd.getWait(); + } + } + if ( wait == 0 ) { + wait = _wait; + } + return send(hostId, cmds, wait); } @Override @@ -2134,7 +2136,7 @@ public class AgentManagerImpl implements AgentManager, HandlerFactory, Manager { host.setSpeed(scc.getSpeed()); HypervisorType hyType = scc.getHypervisorType(); host.setHypervisorType(hyType); - + host.setHypervisorVersion(scc.getHypervisorVersion()); } else if (startup instanceof StartupStorageCommand) { final StartupStorageCommand ssc = (StartupStorageCommand) startup; host.setParent(ssc.getParent()); diff --git a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index bb9a3c0ae57..d34f16a8681 100755 --- a/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/server/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -878,6 +878,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // if the thread: // 1) timed out waiting for the host to reconnect // 2) recipient management server is not active any more + // 3) if the management server doesn't own the host any more // remove the host from re-balance list and delete from op_host_transfer DB // no need to do anything with the real attache as we haven't modified it yet Date cutTime = DateUtil.currentGMTTime(); @@ -886,7 +887,14 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust iterator.remove(); _hostTransferDao.completeAgentTransfer(hostId); continue; - } + } + + if (attache.forForward()) { + s_logger.debug("Management server " + _nodeId + " doesn't own host id=" + hostId + " any more, skipping rebalance for the host"); + iterator.remove(); + _hostTransferDao.completeAgentTransfer(hostId); + continue; + } HostTransferMapVO transferMap = _hostTransferDao.findByIdAndCurrentOwnerId(hostId, _nodeId); @@ -1017,9 +1025,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust logD(requestToTransfer.getBytes(), "Failed to route request to peer"); } - requestToTransfer = forwardAttache.getRequestToTransfer(); - } - s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance"); + requestToTransfer = forwardAttache.getRequestToTransfer(); + } + + s_logger.debug("Management server " + _nodeId + " completed agent " + hostId + " rebalance to " + futureOwnerId); } else { failRebalance(hostId); diff --git a/server/src/com/cloud/agent/manager/SynchronousListener.java b/server/src/com/cloud/agent/manager/SynchronousListener.java index de501be5c22..5834bb2d8c4 100755 --- a/server/src/com/cloud/agent/manager/SynchronousListener.java +++ b/server/src/com/cloud/agent/manager/SynchronousListener.java @@ -97,7 +97,7 @@ public class SynchronousListener implements Listener { return waitFor(-1); } - public synchronized Answer[] waitFor(int ms) throws InterruptedException { + public synchronized Answer[] waitFor(int s) throws InterruptedException { if (_disconnected) { return null; } @@ -108,9 +108,10 @@ public class SynchronousListener implements Listener { Profiler profiler = new Profiler(); profiler.start(); - if (ms <= 0) { + if (s <= 0) { wait(); } else { + int ms = s * 1000; wait(ms); } profiler.stop(); diff --git a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java index d5337fb3c76..201a8fc4b90 100755 --- a/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java +++ b/server/src/com/cloud/agent/manager/allocator/impl/FirstFitAllocator.java @@ -38,6 +38,8 @@ import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.offering.ServiceOffering; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.GuestOSCategoryVO; @@ -55,6 +57,7 @@ import com.cloud.vm.dao.ConsoleProxyDao; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.SecondaryStorageVmDao; import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.VMInstanceDao; /** * An allocator that tries to find a fit on a computing host. This allocator does not care whether or not the host supports routing. @@ -73,6 +76,8 @@ public class FirstFitAllocator implements HostAllocator { @Inject ConfigurationDao _configDao = null; @Inject GuestOSDao _guestOSDao = null; @Inject GuestOSCategoryDao _guestOSCategoryDao = null; + @Inject HypervisorCapabilitiesDao _hypervisorCapabilitiesDao = null; + @Inject VMInstanceDao _vmInstanceDao = null; float _factor = 1; protected String _allocationAlgorithm = "random"; @Inject CapacityManager _capacityMgr; @@ -189,6 +194,16 @@ public class FirstFitAllocator implements HostAllocator { } continue; } + + //find number of guest VMs occupying capacity on this host. + Long vmCount = _vmInstanceDao.countRunningByHostId(host.getId()); + Long maxGuestLimit = getHostMaxGuestLimit(host); + if (vmCount.longValue() == maxGuestLimit.longValue()){ + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host name: " + host.getName() + ", hostId: "+ host.getId() +" already has max Running VMs(count includes system VMs), limit is: " + maxGuestLimit + " , skipping this and trying other available hosts"); + } + continue; + } boolean numCpusGood = host.getCpus().intValue() >= offering.getCpu(); int cpu_requested = offering.getCpu() * offering.getSpeed(); @@ -327,6 +342,14 @@ public class FirstFitAllocator implements HostAllocator { return guestOSCategory.getName(); } + protected Long getHostMaxGuestLimit(HostVO host) { + HypervisorType hypervisorType = host.getHypervisorType(); + String hypervisorVersion = host.getHypervisorVersion(); + + Long maxGuestLimit = _hypervisorCapabilitiesDao.getMaxGuestsLimit(hypervisorType, hypervisorVersion); + return maxGuestLimit; + } + @Override public boolean configure(String name, Map params) throws ConfigurationException { _name = name; diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index 7c2189092cf..9d17aa85cec 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -50,6 +50,7 @@ import com.cloud.api.response.ExtractResponse; import com.cloud.api.response.FirewallResponse; import com.cloud.api.response.FirewallRuleResponse; import com.cloud.api.response.HostResponse; +import com.cloud.api.response.HypervisorCapabilitiesResponse; import com.cloud.api.response.IPAddressResponse; import com.cloud.api.response.IngressRuleResponse; import com.cloud.api.response.IngressRuleResultObject; @@ -104,6 +105,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.host.Host; import com.cloud.host.HostStats; import com.cloud.host.HostVO; +import com.cloud.hypervisor.HypervisorCapabilities; import com.cloud.network.IPAddressVO; import com.cloud.network.IpAddress; import com.cloud.network.Network; @@ -584,6 +586,7 @@ public class ApiResponseHelper implements ResponseGenerator { hostResponse.setMemoryAllocated(mem); hostResponse.setMemoryUsed(mem); hostResponse.setHostTags(ApiDBUtils.getHostTags(host.getId())); + hostResponse.setHypervisorVersion(host.getHypervisorVersion()); } else if (host.getType().toString().equals("Storage")) { hostResponse.setDiskSizeTotal(host.getTotalSize()); hostResponse.setDiskSizeAllocated(0L); @@ -1147,7 +1150,7 @@ public class ApiResponseHelper implements ResponseGenerator { } else if (network.getTrafficType() == TrafficType.Control) { routerResponse.setLinkLocalIp(singleNicProfile.getIp4Address()); routerResponse.setLinkLocalMacAddress(singleNicProfile.getMacAddress()); - routerResponse.setLinkLocalMacAddress(singleNicProfile.getNetmask()); + routerResponse.setLinkLocalNetmask(singleNicProfile.getNetmask()); routerResponse.setLinkLocalNetworkId(singleNicProfile.getNetworkId()); } else if (network.getTrafficType() == TrafficType.Guest) { routerResponse.setGuestIpAddress(singleNicProfile.getIp4Address()); @@ -2419,5 +2422,15 @@ public class ApiResponseHelper implements ResponseGenerator { return userVmResponse; } + + public HypervisorCapabilitiesResponse createHypervisorCapabilitiesResponse(HypervisorCapabilities hpvCapabilities){ + HypervisorCapabilitiesResponse hpvCapabilitiesResponse = new HypervisorCapabilitiesResponse(); + hpvCapabilitiesResponse.setId(hpvCapabilities.getId()); + hpvCapabilitiesResponse.setHypervisor(hpvCapabilities.getHypervisorType()); + hpvCapabilitiesResponse.setHypervisorVersion(hpvCapabilities.getHypervisorVersion()); + hpvCapabilitiesResponse.setIsSecurityGroupEnabled(hpvCapabilities.isSecurityGroupEnabled()); + hpvCapabilitiesResponse.setMaxGuestsLimit(hpvCapabilities.getMaxGuestsLimit()); + return hpvCapabilitiesResponse; + } } diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 1dd107fce07..a10f9ed0b9f 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -32,6 +32,7 @@ import java.net.URLDecoder; import java.net.URLEncoder; import java.security.SecureRandom; import java.text.DateFormat; +import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collections; @@ -53,10 +54,6 @@ import javax.crypto.spec.SecretKeySpec; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; -import net.sf.ehcache.Cache; -import net.sf.ehcache.CacheManager; -import net.sf.ehcache.Element; - import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; import org.apache.http.HttpRequest; @@ -123,6 +120,7 @@ public class ApiServer implements HttpRequestHandler { public static final short RESOURCE_DOMAIN_ADMIN_COMMAND = 2; public static final short USER_COMMAND = 8; public static boolean encodeApiResponse = false; + public static String jsonContentType = "text/javascript"; private Properties _apiCommands = null; private ApiDispatcher _dispatcher; private ManagementServer _ms = null; @@ -158,7 +156,6 @@ public class ApiServer implements HttpRequestHandler { if (s_instance == null) { s_instance = new ApiServer(); s_instance.init(apiConfig); - s_instance.createCache(); } } @@ -237,6 +234,11 @@ public class ApiServer implements HttpRequestHandler { } encodeApiResponse = Boolean.valueOf(configDao.getValue(Config.EncodeApiResponse.key())); + + String jsonType = configDao.getValue(Config.JavaScriptDefaultContentType.key()); + if (jsonType != null) { + jsonContentType = jsonType; + } ListenerThread listenerThread = new ListenerThread(this, apiPort); listenerThread.start(); @@ -598,17 +600,18 @@ public class ApiServer implements HttpRequestHandler { return false; } synchronized (_dateFormat) { - expiresTS = _dateFormat.parse(expires); + try{ + expiresTS = _dateFormat.parse(expires); + } catch (ParseException pe){ + s_logger.info("Incorrect date format for Expires parameter", pe); + return false; + } } Date now = new Date(System.currentTimeMillis()); if(expiresTS.before(now)){ s_logger.info("Request expired -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); return false; } - if(_cache.isKeyInCache(signature)){ - s_logger.info("Duplicate signature -- ignoring ...sig: " + signature + ", apiKey: " + apiKey); - return false; - } } Transaction txn = Transaction.open(Transaction.CLOUD_DB); @@ -655,11 +658,6 @@ public class ApiServer implements HttpRequestHandler { boolean equalSig = signature.equals(computedSignature); if (!equalSig) { s_logger.info("User signature: " + signature + " is not equaled to computed signature: " + computedSignature); - } else { - if("3".equals(signatureVersion)){ - //Add signature along with its time to live calculated based on expires timestamp - _cache.put(new Element(signature, "", false, 0, (int)(expiresTS.getTime() - System.currentTimeMillis())/1000)); - } } return equalSig; } catch (Exception ex) { @@ -731,7 +729,7 @@ public class ApiServer implements HttpRequestHandler { return; } - throw new CloudAuthenticationException("Unable to find user " + username + " in domain " + domainId); + throw new CloudAuthenticationException("Failed to authenticate user " + username + " in domain " + domainId + "; please provide valid credentials"); } public void logoutUser(long userId) { @@ -781,7 +779,7 @@ public class ApiServer implements HttpRequestHandler { BasicHttpEntity body = new BasicHttpEntity(); if (BaseCmd.RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { // JSON response - body.setContentType("text/javascript"); + body.setContentType(jsonContentType); if (responseText == null) { body.setContent(new ByteArrayInputStream("{ \"error\" : { \"description\" : \"Internal Server Error\" } }".getBytes("UTF-8"))); } @@ -933,16 +931,4 @@ public class ApiServer implements HttpRequestHandler { } return responseText; } - - protected Cache _cache; - protected void createCache() { - final CacheManager cm = CacheManager.create(); - //ToDo: Make following values configurable - final int maxElements = 100; - final int live = 300; - final int idle = 300; - _cache = new Cache("signaturesCache", maxElements, false, false, live, idle); - cm.addCache(_cache); - s_logger.info("Cache created: " + _cache.toString()); - } } diff --git a/server/src/com/cloud/api/ApiServlet.java b/server/src/com/cloud/api/ApiServlet.java index ec705e3a2fb..4e1fe904584 100755 --- a/server/src/com/cloud/api/ApiServlet.java +++ b/server/src/com/cloud/api/ApiServlet.java @@ -352,7 +352,7 @@ public class ApiServlet extends HttpServlet { private void writeResponse(HttpServletResponse resp, String response, int responseCode, String responseType) { try { if (BaseCmd.RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - resp.setContentType("text/javascript; charset=UTF-8"); + resp.setContentType(ApiServer.jsonContentType + "; charset=UTF-8"); } else { resp.setContentType("text/xml; charset=UTF-8"); } diff --git a/server/src/com/cloud/api/response/SecurityGroupResultObject.java b/server/src/com/cloud/api/response/SecurityGroupResultObject.java index 221955b4238..6b25382101f 100644 --- a/server/src/com/cloud/api/response/SecurityGroupResultObject.java +++ b/server/src/com/cloud/api/response/SecurityGroupResultObject.java @@ -175,11 +175,6 @@ public class SecurityGroupResultObject { currentGroup = groupResult; } - SecurityGroupRulesVO dummyIngressobj=new SecurityGroupRulesVO(); - SecurityGroupEgressRulesVO dummyEgressobj=new SecurityGroupEgressRulesVO() ; -String str=dummyIngressobj.getClass().getName(); - -String s1=netGroupRule.getClass().getSimpleName(); if (netGroupRule.getRuleId() != null && netGroupRule.getClass().getSimpleName().indexOf("SecurityGroupRulesVO") != -1) { // there's at least one ingress rule for this network group, add the ingress rule data @@ -246,6 +241,9 @@ String s1=netGroupRule.getClass().getSimpleName(); if (!ingressDataList.isEmpty()) { currentGroup.setIngressRules(ingressDataList); } + if (!egressDataList.isEmpty()) { + currentGroup.setEgressRules(egressDataList); + } resultObjects.add(currentGroup); } } diff --git a/server/src/com/cloud/async/AsyncJobManagerImpl.java b/server/src/com/cloud/async/AsyncJobManagerImpl.java index 037d08e993e..652b922c815 100644 --- a/server/src/com/cloud/async/AsyncJobManagerImpl.java +++ b/server/src/com/cloud/async/AsyncJobManagerImpl.java @@ -629,7 +629,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe if(blockItems != null && blockItems.size() > 0) { for(SyncQueueItemVO item : blockItems) { if(item.getContentType().equalsIgnoreCase("AsyncJob")) { - completeAsyncJob(item.getContentId(), 2, 0, getResetResultMessage("Job is cancelled as it has been blocking others for too long")); + completeAsyncJob(item.getContentId(), 2, 0, getResetResultResponse("Job is cancelled as it has been blocking others for too long")); } // purge the item and resume queue processing @@ -667,7 +667,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe Long jobId = item.getContentId(); if(jobId != null) { s_logger.warn("Mark job as failed as its correspoding queue-item has been discarded. job id: " + jobId); - completeAsyncJob(jobId, AsyncJobResult.STATUS_FAILED, 0, getResetResultMessage("Execution was cancelled because of server shutdown")); + completeAsyncJob(jobId, AsyncJobResult.STATUS_FAILED, 0, getResetResultResponse("Execution was cancelled because of server shutdown")); } } _queueMgr.purgeItem(item.getId()); @@ -754,7 +754,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe List items = _queueMgr.getActiveQueueItems(msHost.getId(), true); cleanupPendingJobs(items); _queueMgr.resetQueueProcess(msHost.getId()); - _jobDao.resetJobProcess(msHost.getId(), BaseCmd.INTERNAL_ERROR, getResetResultMessage("job cancelled because of management server restart")); + _jobDao.resetJobProcess(msHost.getId(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); txn.commit(); } catch(Throwable e) { s_logger.warn("Unexpected exception ", e); @@ -775,7 +775,7 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe List l = _queueMgr.getActiveQueueItems(getMsid(), false); cleanupPendingJobs(l); _queueMgr.resetQueueProcess(getMsid()); - _jobDao.resetJobProcess(getMsid(), BaseCmd.INTERNAL_ERROR, getResetResultMessage("job cancelled because of management server restart")); + _jobDao.resetJobProcess(getMsid(), BaseCmd.INTERNAL_ERROR, getSerializedErrorMessage("job cancelled because of management server restart")); } catch(Throwable e) { s_logger.error("Unexpected exception " + e.getMessage(), e); } @@ -788,11 +788,15 @@ public class AsyncJobManagerImpl implements AsyncJobManager, ClusterManagerListe return true; } - private static String getResetResultMessage(String messageText) { + private static ExceptionResponse getResetResultResponse(String errorMessage) { ExceptionResponse resultObject = new ExceptionResponse(); resultObject.setErrorCode(BaseCmd.INTERNAL_ERROR); - resultObject.setErrorText(messageText); - return ApiSerializerHelper.toSerializedStringOld(resultObject); + resultObject.setErrorText(errorMessage); + return resultObject; + } + + private static String getSerializedErrorMessage(String errorMessage) { + return ApiSerializerHelper.toSerializedStringOld(getResetResultResponse(errorMessage)); } @Override diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 6a20c3e17aa..b84e3fb468c 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -33,6 +33,7 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.allocator.StoragePoolAllocator; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.template.TemplateManager; import com.cloud.vm.UserVmManager; public enum Config { @@ -56,7 +57,13 @@ public enum Config { TotalRetries("Storage", AgentManager.class, Integer.class, "total.retries", "4", "The number of times each command sent to a host should be retried in case of failure.", null), StoragePoolMaxWaitSeconds("Storage", ManagementServer.class, Integer.class, "storage.pool.max.waitseconds", "3600", "Timeout (in seconds) to synchronize storage pool operations.", null), StorageTemplateCleanupEnabled("Storage", ManagementServer.class, Boolean.class, "storage.template.cleanup.enabled", "true", "Enable/disable template cleanup activity, only take effect when overall storage cleanup is enabled", null), - + PrimaryStorageDownloadWait("Storage", TemplateManager.class, Integer.class, "primary.storage.download.wait", "10800", "In second, timeout for download template to primary storage", null), + CreateVolumeFromSnapshotWait("Storage", StorageManager.class, Integer.class, "create.volume.from.snapshot.wait", "10800", "In second, timeout for create template from snapshot", null), + CopyVolumeWait("Storage", StorageManager.class, Integer.class, "copy.volume.wait", "10800", "In second, timeout for copy volume command", null), + CreatePrivateTemplateFromVolumeWait("Storage", UserVmManager.class, Integer.class, "create.private.template.from.volume.wait", "10800", "In second, timeout for CreatePrivateTemplateFromVolumeCommand", null), + CreatePrivateTemplateFromSnapshotWait("Storage", UserVmManager.class, Integer.class, "create.private.template.from.snapshot.wait", "10800", "In second, timeout for CreatePrivateTemplateFromSnapshotCommand", null), + BackupSnapshotWait("Storage", StorageManager.class, Integer.class, "backup.snapshot.wait", "10800", "In second, timeout for BackupSnapshotCommand", null), + // Network NetworkLBHaproxyStatsVisbility("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.visibility", "global", "Load Balancer(haproxy) stats visibilty, it can take the following four parameters : global,guest-network,link-local,disabled", null), NetworkLBHaproxyStatsUri("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.uri","/admin?stats","Load Balancer(haproxy) uri.",null), @@ -87,7 +94,6 @@ public enum Config { RemoteAccessVpnUserLimit("Network", AgentManager.class, String.class, "remote.access.vpn.user.limit", "8", "The maximum number of VPN users that can be created per account", null), // Usage - CapacityCheckPeriod("Usage", ManagementServer.class, Integer.class, "capacity.check.period", "300000", "The interval in milliseconds between capacity checks", null), StorageAllocatedCapacityThreshold("Usage", ManagementServer.class, Float.class, "storage.allocated.capacity.threshold", "0.85", "Percentage (as a value between 0 and 1) of allocated storage utilization above which alerts will be sent about low storage available.", null), StorageCapacityThreshold("Usage", ManagementServer.class, Float.class, "storage.capacity.threshold", "0.85", "Percentage (as a value between 0 and 1) of storage utilization above which alerts will be sent about low storage available.", null), @@ -153,7 +159,7 @@ public enum Config { UpdateWait("Advanced", AgentManager.class, Integer.class, "update.wait", "600", "Time to wait (in seconds) before alerting on a updating agent", null), Wait("Advanced", AgentManager.class, Integer.class, "wait", "1800", "Time in seconds to wait for control commands to return", null), XapiWait("Advanced", AgentManager.class, Integer.class, "xapiwait", "600", "Time (in seconds) to wait for XAPI to return", null), - CmdsWait("Advanced", AgentManager.class, Integer.class, "cmd.wait", "7200", "Time (in seconds) to wait for some heavy time-consuming commands", null), + MigrateWait("Advanced", AgentManager.class, Integer.class, "migratewait", "3600", "Time (in seconds) to wait for VM migrate finish", null), Workers("Advanced", AgentManager.class, Integer.class, "workers", "5", "Number of worker threads.", null), MountParent("Advanced", ManagementServer.class, String.class, "mount.parent", "/var/lib/cloud/mnt", "The mount point on the Management Server for Secondary Storage.", null), // UpgradeURL("Advanced", ManagementServer.class, String.class, "upgrade.url", "http://example.com:8080/client/agent/update.zip", "The upgrade URL is the URL of the management server that agents will connect to in order to automatically upgrade.", null), @@ -273,7 +279,9 @@ public enum Config { DnsBasicZoneUpdates("Advanced", NetworkManager.class, String.class, "network.dns.basiczone.updates", "all", "This parameter can take 2 values: all (default) and pod. It defines if DHCP/DNS requests have to be send to all dhcp servers in cloudstack, or only to the one in the same pod", "all,pod"), ClusterMessageTimeOutSeconds("Advanced", ManagementServer.class, Integer.class, "cluster.message.timeout.seconds", "300", "Time (in seconds) to wait before a inter-management server message post times out.", null), - AgentLoadThreshold("Advanced", ManagementServer.class, Float.class, "agent.load.threshold", "0.7", "Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening", null); + AgentLoadThreshold("Advanced", ManagementServer.class, Float.class, "agent.load.threshold", "0.7", "Percentage (as a value between 0 and 1) of connected agents after which agent load balancing will start happening", null), + + JavaScriptDefaultContentType("Advanced", ManagementServer.class, String.class, "json.content.type", "text/javascript", "Http response content type for .js files (default is text/javascript)", null); private final String _category; private final Class _componentClass; diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 13c4d38a5c4..0ba3dbb0c8d 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -123,6 +123,7 @@ import com.cloud.user.UserContext; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.component.Adapters; import com.cloud.utils.component.ComponentLocator; @@ -1163,7 +1164,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura } - @Override + @Override @DB public DataCenter editZone(UpdateZoneCmd cmd) { // Parameter validation as from execute() method in V1 Long zoneId = cmd.getId(); @@ -1172,24 +1173,21 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura String dns2 = cmd.getDns2(); String internalDns1 = cmd.getInternalDns1(); String internalDns2 = cmd.getInternalDns2(); - String vnetRange = cmd.getVlan(); + String newVnetRangeString = cmd.getVlan(); String guestCidr = cmd.getGuestCidrAddress(); List dnsSearchOrder = cmd.getDnsSearchOrder(); - Long userId = UserContext.current().getCallerUserId(); - int startVnetRange = 0; - int stopVnetRange = 0; Boolean isPublic = cmd.isPublic(); String allocationStateStr = cmd.getAllocationState(); String dhcpProvider = cmd.getDhcpProvider(); - Map detailsMap = cmd.getDetails(); + Map detailsMap = cmd.getDetails(); String networkDomain = cmd.getDomain(); Map newDetails = new HashMap(); if (detailsMap != null) { - Collection zoneDetailsCollection = detailsMap.values(); - Iterator iter = zoneDetailsCollection.iterator(); + Collection zoneDetailsCollection = detailsMap.values(); + Iterator iter = zoneDetailsCollection.iterator(); while (iter.hasNext()) { - HashMap detail = (HashMap)iter.next(); + HashMap detail = (HashMap)iter.next(); String key = (String)detail.get("key"); String value = (String)detail.get("value"); if ((key == null) || (value == null)) { @@ -1215,10 +1213,6 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura newDetails.put(ZoneConfig.DnsSearchOrder.getName(), StringUtils.join(dnsSearchOrder, ",")); } - if (userId == null) { - userId = Long.valueOf(User.UID_SYSTEM); - } - DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("unable to find zone by id " + zoneId); @@ -1230,7 +1224,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura // if zone is of Basic type, don't allow to add vnet range and cidr if (zone.getNetworkType() == NetworkType.Basic) { - if (vnetRange != null) { + if (newVnetRangeString != null) { throw new InvalidParameterValueException("Can't add vnet range for the zone that supports " + zone.getNetworkType() + " network"); } else if (guestCidr != null) { throw new InvalidParameterValueException("Can't add cidr for the zone that supports " + zone.getNetworkType() + " network"); @@ -1246,37 +1240,60 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura throw new InvalidParameterValueException("A zone with ID: " + zoneId + " does not exist."); } - // If the Vnet range is being changed, make sure there are no allocated VNets - if (vnetRange != null) { - if (zoneHasAllocatedVnets(zoneId)) { - throw new CloudRuntimeException("The vlan range is not editable because there are allocated vlans."); - } + // Vnet range can be extended only + boolean replaceVnet = false; + ArrayList> vnetsToAdd = new ArrayList>(2); + + if (newVnetRangeString != null) { + Integer newStartVnet = 0; + Integer newEndVnet = 0; + String[] newVnetRange = newVnetRangeString.split("-"); - String[] startStopRange = new String[2]; - startStopRange = vnetRange.split("-"); - - if (startStopRange.length == 1) { + if (newVnetRange.length < 2) { throw new InvalidParameterValueException("Please provide valid vnet range between 0-4096"); } - if (startStopRange[0] == null || startStopRange[1] == null) { + if (newVnetRange[0] == null || newVnetRange[1] == null) { throw new InvalidParameterValueException("Please provide valid vnet range between 0-4096"); } try { - startVnetRange = Integer.parseInt(startStopRange[0]); - stopVnetRange = Integer.parseInt(startStopRange[1]); + newStartVnet = Integer.parseInt(newVnetRange[0]); + newEndVnet = Integer.parseInt(newVnetRange[1]); } catch (NumberFormatException e) { s_logger.warn("Unable to parse vnet range:", e); throw new InvalidParameterValueException("Please provide valid vnet range between 0-4096"); } - if (startVnetRange < 0 || stopVnetRange > 4096) { + if (newStartVnet < 0 || newEndVnet > 4096) { throw new InvalidParameterValueException("Vnet range has to be between 0-4096"); } - if (startVnetRange > stopVnetRange) { + if (newStartVnet > newEndVnet) { throw new InvalidParameterValueException("Vnet range has to be between 0-4096 and start range should be lesser than or equal to stop range"); + } + + if (zoneHasAllocatedVnets(zoneId)) { + String[] existingRange = zone.getVnet().split("-"); + int existingStartVnet = Integer.parseInt(existingRange[0]); + int existingEndVnet = Integer.parseInt(existingRange[1]); + + //check if vnet is being extended + if (!(newStartVnet.intValue() <= existingStartVnet && newEndVnet.intValue() >= existingEndVnet)) { + throw new InvalidParameterValueException("Can's shrink existing vnet range as it the range has vnets allocated. Only extending existing vnet is supported"); + } + + if (newStartVnet < existingStartVnet) { + vnetsToAdd.add(new Pair(newStartVnet, existingStartVnet - 1)); + } + + if (newEndVnet > existingEndVnet) { + vnetsToAdd.add(new Pair(existingEndVnet + 1, newEndVnet)); + } + + } else { + vnetsToAdd.add(new Pair(newStartVnet, newEndVnet)); + replaceVnet = true; } } @@ -1286,12 +1303,6 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura zoneName = oldZoneName; } - boolean dnsUpdate = false; - - if (dns1 != null || dns2 != null) { - dnsUpdate = true; - } - if (dns1 == null) { dns1 = zone.getDns1(); } @@ -1339,8 +1350,8 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura zone.setGuestNetworkCidr(guestCidr); zone.setDomain(networkDomain); - if (vnetRange != null) { - zone.setVnet(vnetRange); + if (newVnetRangeString != null) { + zone.setVnet(newVnetRangeString); } // update a private zone to public; not vice versa @@ -1349,6 +1360,9 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura zone.setDomain(null); } + Transaction txn = Transaction.currentTxn(); + txn.start(); + Map updatedDetails = new HashMap(); _zoneDao.loadDetails(zone); if(zone.getDetails() != null){ @@ -1365,20 +1379,22 @@ public class ConfigurationManagerImpl implements ConfigurationManager, Configura if(dhcpProvider != null){ zone.setDhcpProvider(dhcpProvider); } - + if (!_zoneDao.update(zoneId, zone)) { throw new CloudRuntimeException("Failed to edit zone. Please contact Cloud Support."); } - if (vnetRange != null) { - String[] tokens = vnetRange.split("-"); - int begin = Integer.parseInt(tokens[0]); - int end = tokens.length == 1 ? (begin) : Integer.parseInt(tokens[1]); - + if (replaceVnet) { + s_logger.debug("Deleting existing vnet range for the zone id=" + zoneId + " as a part of updateZone call"); _zoneDao.deleteVnet(zoneId); - _zoneDao.addVnet(zone.getId(), begin, end); } + for (Pair vnetToAdd : vnetsToAdd) { + s_logger.debug("Adding vnet range " + vnetToAdd.first() + "-" + vnetToAdd.second() + " for the zone id=" + zoneId + " as a part of updateZone call"); + _zoneDao.addVnet(zone.getId(), vnetToAdd.first(), vnetToAdd.second()); + } + + txn.commit(); return zone; } diff --git a/server/src/com/cloud/configuration/DefaultComponentLibrary.java b/server/src/com/cloud/configuration/DefaultComponentLibrary.java index de7610b2485..a1b3a52fc6a 100755 --- a/server/src/com/cloud/configuration/DefaultComponentLibrary.java +++ b/server/src/com/cloud/configuration/DefaultComponentLibrary.java @@ -64,6 +64,7 @@ import com.cloud.host.dao.HostDaoImpl; import com.cloud.host.dao.HostDetailsDaoImpl; import com.cloud.host.dao.HostTagsDaoImpl; import com.cloud.hypervisor.HypervisorGuruManagerImpl; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDaoImpl; import com.cloud.keystore.KeystoreDaoImpl; import com.cloud.keystore.KeystoreManagerImpl; import com.cloud.maint.UpgradeManagerImpl; @@ -100,8 +101,8 @@ import com.cloud.network.security.SecurityGroupManagerImpl; import com.cloud.network.security.dao.EgressRuleDaoImpl; import com.cloud.network.security.dao.IngressRuleDaoImpl; import com.cloud.network.security.dao.SecurityGroupDaoImpl; -import com.cloud.network.security.dao.SecurityGroupRulesDaoImpl; import com.cloud.network.security.dao.SecurityGroupEgressRulesDaoImpl; +import com.cloud.network.security.dao.SecurityGroupRulesDaoImpl; import com.cloud.network.security.dao.SecurityGroupVMMapDaoImpl; import com.cloud.network.security.dao.SecurityGroupWorkDaoImpl; import com.cloud.network.security.dao.VmRulesetLogDaoImpl; @@ -275,7 +276,9 @@ public class DefaultComponentLibrary extends ComponentLibraryBase implements Com addDao("ProjectDao", ProjectDaoImpl.class); addDao("InlineLoadBalancerNicMapDao", InlineLoadBalancerNicMapDaoImpl.class); addDao("ElasticLbVmMap", ElasticLbVmMapDaoImpl.class); - + info = addDao("HypervisorCapabilitiesDao",HypervisorCapabilitiesDaoImpl.class); + info.addParameter("cache.size", "100"); + info.addParameter("cache.time.to.live", "600"); } @Override diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 57aba765d8a..b155abf42b3 100644 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -121,6 +121,7 @@ import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.SystemVmLoadScanHandler; import com.cloud.vm.SystemVmLoadScanner; +import com.cloud.vm.UserVmVO; import com.cloud.vm.SystemVmLoadScanner.AfterScanAction; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -130,6 +131,7 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineName; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.ConsoleProxyDao; +import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -195,6 +197,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx NetworkOfferingDao _networkOfferingDao; @Inject StoragePoolDao _storagePoolDao; + @Inject + UserVmDetailsDao _vmDetailsDao; private ConsoleProxyListener _listener; @@ -1288,6 +1292,11 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { + + ConsoleProxyVO vm = profile.getVirtualMachine(); + Map details = _vmDetailsDao.findDetails(vm.getId()); + vm.setDetails(details); + StringBuilder buf = profile.getBootArgsBuilder(); buf.append(" template=domP type=consoleproxy"); buf.append(" host=").append(_mgmt_host); diff --git a/server/src/com/cloud/domain/dao/DomainDao.java b/server/src/com/cloud/domain/dao/DomainDao.java index 6daa75a0133..5ccd860d512 100644 --- a/server/src/com/cloud/domain/dao/DomainDao.java +++ b/server/src/com/cloud/domain/dao/DomainDao.java @@ -29,5 +29,6 @@ public interface DomainDao extends GenericDao { public boolean isChildDomain(Long parentId, Long childId); DomainVO findImmediateChildForParent(Long parentId); List findImmediateChildrenForParent(Long parentId); - List findAllChildren(String path, Long parentId); + List findAllChildren(String path, Long parentId); + List findInactiveDomains(); } diff --git a/server/src/com/cloud/domain/dao/DomainDaoImpl.java b/server/src/com/cloud/domain/dao/DomainDaoImpl.java index 69cc7b56634..51ef94de4e9 100644 --- a/server/src/com/cloud/domain/dao/DomainDaoImpl.java +++ b/server/src/com/cloud/domain/dao/DomainDaoImpl.java @@ -27,6 +27,7 @@ import javax.ejb.Local; import org.apache.log4j.Logger; +import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -44,6 +45,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom protected SearchBuilder DomainPairSearch; protected SearchBuilder ImmediateChildDomainSearch; protected SearchBuilder FindAllChildrenSearch; + protected SearchBuilder AllFieldsSearch; public DomainDaoImpl () { DomainNameLikeSearch = createSearchBuilder(); @@ -67,6 +69,14 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom FindAllChildrenSearch.and("path", FindAllChildrenSearch.entity().getPath(), SearchCriteria.Op.LIKE); FindAllChildrenSearch.and("id", FindAllChildrenSearch.entity().getId(), SearchCriteria.Op.NEQ); FindAllChildrenSearch.done(); + + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("owner", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("path", AllFieldsSearch.entity().getPath(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("parent", AllFieldsSearch.entity().getParent(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); } @@ -238,5 +248,12 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom } } return result; + } + + @Override + public List findInactiveDomains() { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("state", Domain.State.Inactive); + return listBy(sc); } } diff --git a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java index dbafb8f1f22..238063bc406 100644 --- a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java @@ -79,7 +79,7 @@ public abstract class AbstractInvestigatorImpl implements Investigator { protected Status testIpAddress(Long hostId, String testHostIp) { try { - Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp), 30 * 1000); + Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(testHostIp)); if(pingTestAnswer == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("host (" + testHostIp + ") returns null answer"); diff --git a/server/src/com/cloud/ha/CheckOnAgentInvestigator.java b/server/src/com/cloud/ha/CheckOnAgentInvestigator.java index 16c70425eb8..11c8e0fb513 100644 --- a/server/src/com/cloud/ha/CheckOnAgentInvestigator.java +++ b/server/src/com/cloud/ha/CheckOnAgentInvestigator.java @@ -51,7 +51,7 @@ public class CheckOnAgentInvestigator extends AdapterBase implements Investigato public Boolean isVmAlive(VMInstanceVO vm, HostVO host) { CheckVirtualMachineCommand cmd = new CheckVirtualMachineCommand(vm.getInstanceName()); try { - CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer)_agentMgr.send(vm.getHostId(), cmd, 10 * 1000); + CheckVirtualMachineAnswer answer = (CheckVirtualMachineAnswer)_agentMgr.send(vm.getHostId(), cmd); if (!answer.getResult()) { s_logger.debug("Unable to get vm state on " + vm.toString()); return null; diff --git a/server/src/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/com/cloud/ha/UserVmDomRInvestigator.java index 24a7f0f8cf7..31944fc5537 100644 --- a/server/src/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/com/cloud/ha/UserVmDomRInvestigator.java @@ -183,21 +183,16 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { } for (Long hostId : otherHosts) { try { - Answer pingTestAnswer = _agentMgr.send(hostId, new PingTestCommand(routerPrivateIp, privateIp), 30 * 1000); + Answer pingTestAnswer = _agentMgr.easySend(hostId, new PingTestCommand(routerPrivateIp, privateIp)); if (pingTestAnswer.getResult()) { if (s_logger.isDebugEnabled()) { s_logger.debug("user vm " + vm.getHostName() + " has been successfully pinged, returning that it is alive"); } return Boolean.TRUE; } - } catch (AgentUnavailableException e) { + } catch (Exception e) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Couldn't reach " + e.getResourceId()); - } - continue; - } catch (OperationTimedoutException e) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Operation timed out: " + e.getMessage()); + s_logger.debug("Couldn't reach due to" + e.toString()); } continue; } diff --git a/server/src/com/cloud/ha/XenServerInvestigator.java b/server/src/com/cloud/ha/XenServerInvestigator.java index 23cabbf9ab7..57d70e1e06a 100644 --- a/server/src/com/cloud/ha/XenServerInvestigator.java +++ b/server/src/com/cloud/ha/XenServerInvestigator.java @@ -57,7 +57,7 @@ public class XenServerInvestigator extends AdapterBase implements Investigator { continue; } Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); - if (answer != null) { + if (answer != null && answer.getResult()) { CheckOnHostAnswer ans = (CheckOnHostAnswer)answer; if (!ans.isDetermined()) { s_logger.debug("Host " + neighbor + " couldn't determine the status of " + agent); diff --git a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 67458afca8f..b8161a3a5a0 100644 --- a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -65,8 +65,6 @@ import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Storage; -import com.cloud.storage.StorageStats; -import com.cloud.storage.resource.DummySecondaryStorageResource; import com.cloud.utils.component.Inject; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.MacAddress; @@ -355,7 +353,8 @@ public class CloudZonesStartupProcessor implements StartupCommandProcessor { host.setSpeed(scc.getSpeed()); HypervisorType hyType = scc.getHypervisorType(); host.setHypervisorType(hyType); - + host.setHypervisorVersion(scc.getHypervisorVersion()); + } private boolean checkCIDR(Host.Type type, HostPodVO pod, diff --git a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java new file mode 100644 index 00000000000..078e29fadb8 --- /dev/null +++ b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java @@ -0,0 +1,17 @@ +package com.cloud.hypervisor.dao; + +import java.util.List; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorCapabilitiesVO; +import com.cloud.utils.db.GenericDao; + +public interface HypervisorCapabilitiesDao extends GenericDao { + + List listAllByHypervisorType(HypervisorType hypervisorType); + + HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion); + + Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion); + +} diff --git a/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java new file mode 100644 index 00000000000..a277547c137 --- /dev/null +++ b/server/src/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java @@ -0,0 +1,77 @@ +package com.cloud.hypervisor.dao; + +import java.util.List; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; + +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorCapabilitiesVO; +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; + +@Local(value=HypervisorCapabilitiesDao.class) +public class HypervisorCapabilitiesDaoImpl extends GenericDaoBase implements HypervisorCapabilitiesDao { + + private static final Logger s_logger = Logger.getLogger(HypervisorCapabilitiesDaoImpl.class); + + protected final SearchBuilder HypervisorTypeSearch; + protected final SearchBuilder HypervisorTypeAndVersionSearch; + protected final GenericSearchBuilder MaxGuestLimitByHypervisorSearch; + + protected HypervisorCapabilitiesDaoImpl() { + HypervisorTypeSearch = createSearchBuilder(); + HypervisorTypeSearch.and("hypervisorType", HypervisorTypeSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + HypervisorTypeSearch.done(); + + HypervisorTypeAndVersionSearch = createSearchBuilder(); + HypervisorTypeAndVersionSearch.and("hypervisorType", HypervisorTypeAndVersionSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + HypervisorTypeAndVersionSearch.and("hypervisorVersion", HypervisorTypeAndVersionSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ); + HypervisorTypeAndVersionSearch.done(); + + MaxGuestLimitByHypervisorSearch = createSearchBuilder(Long.class); + MaxGuestLimitByHypervisorSearch.selectField(MaxGuestLimitByHypervisorSearch.entity().getMaxGuestsLimit()); + MaxGuestLimitByHypervisorSearch.and("hypervisorType", MaxGuestLimitByHypervisorSearch.entity().getHypervisorType(), SearchCriteria.Op.EQ); + MaxGuestLimitByHypervisorSearch.and("hypervisorVersion", MaxGuestLimitByHypervisorSearch.entity().getHypervisorVersion(), SearchCriteria.Op.EQ); + MaxGuestLimitByHypervisorSearch.done(); + } + + @Override + public List listAllByHypervisorType(HypervisorType hypervisorType){ + SearchCriteria sc = HypervisorTypeSearch.create(); + sc.setParameters("hypervisorType", hypervisorType); + return search(sc, null); + } + + @Override + public HypervisorCapabilitiesVO findByHypervisorTypeAndVersion(HypervisorType hypervisorType, String hypervisorVersion){ + SearchCriteria sc = HypervisorTypeAndVersionSearch.create(); + sc.setParameters("hypervisorType", hypervisorType); + sc.setParameters("hypervisorVersion", hypervisorVersion); + return findOneBy(sc); + } + + @Override + public Long getMaxGuestsLimit(HypervisorType hypervisorType, String hypervisorVersion){ + Long defaultLimit = new Long(50); + Long result = null; + if(hypervisorVersion != null){ + SearchCriteria sc = MaxGuestLimitByHypervisorSearch.create(); + sc.setParameters("hypervisorType", hypervisorType); + sc.setParameters("hypervisorVersion", hypervisorVersion); + result = customSearch(sc, null).get(0); + }else{ + List capabilities = listAllByHypervisorType(hypervisorType); + if(!capabilities.isEmpty()){ + result = capabilities.get(0).getMaxGuestsLimit(); + } + } + if(result == null){ + return defaultLimit; + } + return result; + } +} \ No newline at end of file diff --git a/server/src/com/cloud/hypervisor/guru/VMwareGuru.java b/server/src/com/cloud/hypervisor/guru/VMwareGuru.java index 19f92ee5b0d..8f5dac30392 100644 --- a/server/src/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/server/src/com/cloud/hypervisor/guru/VMwareGuru.java @@ -3,6 +3,7 @@ */ package com.cloud.hypervisor.guru; +import java.util.HashMap; import java.util.Map; import javax.ejb.Local; @@ -29,6 +30,7 @@ import com.cloud.hypervisor.HypervisorGuru; import com.cloud.hypervisor.HypervisorGuruBase; import com.cloud.hypervisor.vmware.VmwareCleanupMaid; import com.cloud.hypervisor.vmware.manager.VmwareManager; +import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; import com.cloud.storage.GuestOSVO; @@ -39,9 +41,12 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.Inject; import com.cloud.utils.db.DB; import com.cloud.utils.net.NetUtils; +import com.cloud.vm.ConsoleProxyVO; +import com.cloud.vm.DomainRouterVO; import com.cloud.vm.SecondaryStorageVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; +import com.cloud.vm.VmDetailConstants; @Local(value=HypervisorGuru.class) public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru { @@ -66,9 +71,43 @@ public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru { } @Override - public VirtualMachineTO implement(VirtualMachineProfile vm) { + public VirtualMachineTO implement(VirtualMachineProfile vm) { VirtualMachineTO to = toVirtualMachineTO(vm); - to.setBootloader(BootloaderType.HVM); + to.setBootloader(BootloaderType.HVM); + + Map details = to.getDetails(); + if(details == null) + details = new HashMap(); + + String nicDeviceType = details.get(VmDetailConstants.NIC_ADAPTER); + if(vm.getVirtualMachine() instanceof DomainRouterVO || vm.getVirtualMachine() instanceof ConsoleProxyVO + || vm.getVirtualMachine() instanceof SecondaryStorageVmVO) { + + // for system VMs, use Vmxnet3 as default + if(nicDeviceType == null) { + details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.Vmxnet3.toString()); + } else { + try { + VirtualEthernetCardType.valueOf(nicDeviceType); + } catch (Exception e) { + s_logger.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); + details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); + } + } + } else { + // for user-VM, use E1000 as default + if(nicDeviceType == null) { + details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); + } else { + try { + VirtualEthernetCardType.valueOf(nicDeviceType); + } catch (Exception e) { + s_logger.warn("Invalid NIC device type " + nicDeviceType + " is specified in VM details, switch to default E1000"); + details.put(VmDetailConstants.NIC_ADAPTER, VirtualEthernetCardType.E1000.toString()); + } + } + } + to.setDetails(details); // Determine the VM's OS description GuestOSVO guestOS = _guestOsDao.findById(vm.getVirtualMachine().getGuestOSId()); diff --git a/server/src/com/cloud/hypervisor/vmware/VmwareManagerImpl.java b/server/src/com/cloud/hypervisor/vmware/VmwareManagerImpl.java index 4dc5d632ae7..a1ba9c9ff72 100755 --- a/server/src/com/cloud/hypervisor/vmware/VmwareManagerImpl.java +++ b/server/src/com/cloud/hypervisor/vmware/VmwareManagerImpl.java @@ -39,9 +39,7 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterDetailsDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; -import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.DiscoveredWithErrorException; -import com.cloud.exception.ResourceUnavailableException; import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; @@ -64,7 +62,6 @@ import com.cloud.org.Cluster.ClusterType; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.serializer.GsonHelper; import com.cloud.storage.StorageLayer; -import com.cloud.user.UserContext; import com.cloud.utils.FileUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; @@ -77,8 +74,6 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.vm.DomainRouterVO; -import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.dao.DomainRouterDao; import com.google.gson.Gson; import com.vmware.apputils.vim25.ServiceUtil; import com.vmware.vim25.HostConnectSpec; @@ -106,7 +101,6 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis @Inject ClusterManager _clusterMgr; @Inject CheckPointManager _checkPointMgr; @Inject VirtualNetworkApplianceManager _routerMgr; - @Inject DomainRouterDao _routerDao; String _mountParent; StorageLayer _storage; @@ -805,29 +799,7 @@ public class VmwareManagerImpl implements VmwareManager, VmwareStorageMount, Lis @Override public boolean processDisconnect(long agentId, Status state) { - UserContext context = UserContext.current(); - context.setAccountId(1); - /* Stopped VMware Host's virtual routers */ - HostVO host = _hostDao.findById(agentId); - if (host.getHypervisorType() != HypervisorType.VMware) { - return true; - } - List routers = _routerDao.listByHostId(agentId); - for (DomainRouterVO router : routers) { - try { - State oldState = router.getState(); - _routerMgr.stopRouter(router.getId(), true); - //In case only vCenter is disconnected, we want to shut down router directly - if (oldState == State.Running) { - shutdownRouterVM(router); - } - } catch (ResourceUnavailableException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName() + " when host disconnected!", e); - } catch (ConcurrentOperationException e) { - s_logger.warn("Fail to stop router " + router.getInstanceName() + " when host disconnected!", e); - } - } - return true; + return false; } @Override diff --git a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index a5d3a0ad528..d916bb51265 100755 --- a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -45,6 +45,7 @@ import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; +import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.AgentUnavailableException; @@ -106,6 +107,7 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L @Inject VMTemplateDao _tmpltDao; @Inject VMTemplateHostDao _vmTemplateHostDao; @Inject ClusterDao _clusterDao; + @Inject protected ConfigurationDao _configDao; protected XcpServerDiscoverer() { } @@ -277,8 +279,9 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L params.put("storage.network.device2", _storageNic2); details.put("storage.network.device2", _storageNic2); } - params.put(Config.Wait.toString().toLowerCase(), Integer.toString(_wait)); - details.put(Config.Wait.toString().toLowerCase(), Integer.toString(_wait)); + params.put("wait", Integer.toString(_wait)); + details.put("wait", Integer.toString(_wait)); + params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString())); params.put(Config.InstanceName.toString().toLowerCase(), _instance); details.put(Config.InstanceName.toString().toLowerCase(), _instance); try { diff --git a/server/src/com/cloud/network/ExternalNetworkManager.java b/server/src/com/cloud/network/ExternalNetworkManager.java index 6e62bba7f5a..83eab3939f8 100644 --- a/server/src/com/cloud/network/ExternalNetworkManager.java +++ b/server/src/com/cloud/network/ExternalNetworkManager.java @@ -44,6 +44,7 @@ public interface ExternalNetworkManager extends Manager { public static final ExternalNetworkDeviceType F5BigIP = new ExternalNetworkDeviceType("F5BigIP"); public static final ExternalNetworkDeviceType JuniperSRX = new ExternalNetworkDeviceType("JuniperSRX"); + public static final ExternalNetworkDeviceType NetscalerMPX = new ExternalNetworkDeviceType("NetscalerMPX"); public ExternalNetworkDeviceType(String name) { _name = name; diff --git a/server/src/com/cloud/network/ExternalNetworkManagerImpl.java b/server/src/com/cloud/network/ExternalNetworkManagerImpl.java index 14ed6f9b28d..c44ebe6a969 100644 --- a/server/src/com/cloud/network/ExternalNetworkManagerImpl.java +++ b/server/src/com/cloud/network/ExternalNetworkManagerImpl.java @@ -81,6 +81,7 @@ import com.cloud.network.lb.LoadBalancingRule; import com.cloud.network.lb.LoadBalancingRule.LbDestination; import com.cloud.network.resource.F5BigIpResource; import com.cloud.network.resource.JuniperSrxResource; +import com.cloud.network.resource.NetscalerMPXResource; import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; @@ -123,7 +124,8 @@ import com.cloud.vm.dao.NicDao; public class ExternalNetworkManagerImpl implements ExternalNetworkManager { public enum ExternalNetworkResourceName { JuniperSrx, - F5BigIp; + F5BigIp, + NetscalerMPX; } @Inject AgentManager _agentMgr; @@ -251,12 +253,15 @@ public class ExternalNetworkManagerImpl implements ExternalNetworkManager { deviceType = cmd.getType(); if (deviceType ==null) { - deviceType = ExternalNetworkDeviceType.F5BigIP.getName(); //default it to F5 for now + deviceType = ExternalNetworkDeviceType.NetscalerMPX.getName(); //TODO: default it to NetscalerMPX for now, till UI support Netscaler & F5 } if (deviceType.equalsIgnoreCase(ExternalNetworkDeviceType.F5BigIP.getName())) { resource = new F5BigIpResource(); - guid = getExternalNetworkResourceGuid(zoneId, ExternalNetworkResourceName.F5BigIp, ipAddress); + guid = getExternalNetworkResourceGuid(zoneId, ExternalNetworkResourceName.F5BigIp, ipAddress); + } else if (deviceType.equalsIgnoreCase(ExternalNetworkDeviceType.NetscalerMPX.getName())) { + resource = new NetscalerMPXResource(); + guid = getExternalNetworkResourceGuid(zoneId, ExternalNetworkResourceName.NetscalerMPX, ipAddress); } else { throw new CloudRuntimeException("An unsupported networt device type is added as external load balancer."); } @@ -284,6 +289,8 @@ public class ExternalNetworkManagerImpl implements ExternalNetworkManager { if (host != null) { if (deviceType.equalsIgnoreCase(ExternalNetworkDeviceType.F5BigIP.getName())) { zone.setLoadBalancerProvider(Network.Provider.F5BigIp.getName()); + } else if (deviceType.equalsIgnoreCase(ExternalNetworkDeviceType.NetscalerMPX.getName())) { + zone.setLoadBalancerProvider(Network.Provider.NetscalerMPX.getName()); } _dcDao.update(zone.getId(), zone); return host; @@ -504,7 +511,7 @@ public class ExternalNetworkManagerImpl implements ExternalNetworkManager { Answer answer = _agentMgr.easySend(externalLoadBalancer.getId(), cmd); if (answer == null || !answer.getResult()) { String details = (answer != null) ? answer.getDetails() : "details unavailable"; - String msg = "Unable to apply load balancer rules to the F5 BigIp appliance in zone " + zone.getName() + " due to: " + details + "."; + String msg = "Unable to apply load balancer rules to the external load balancer appliance in zone " + zone.getName() + " due to: " + details + "."; s_logger.error(msg); throw new ResourceUnavailableException(msg, DataCenter.class, network.getDataCenterId()); } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 5f4090ce7ae..3b058c46517 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -2809,8 +2809,15 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag DataCenterVO zone = _dcDao.findById(zoneId); if (zone.getNetworkType() == NetworkType.Advanced) { - return (zone.getGatewayProvider() != null && zone.getGatewayProvider().equals(Network.Provider.JuniperSRX.getName()) && - zone.getFirewallProvider() != null && zone.getFirewallProvider().equals(Network.Provider.JuniperSRX.getName())); + + if (zone.getGatewayProvider() != null && zone.getGatewayProvider().equals(Network.Provider.JuniperSRX.getName()) && + zone.getFirewallProvider() != null && zone.getFirewallProvider().equals(Network.Provider.JuniperSRX.getName())) { + return true; + } else if (zone.getGatewayProvider() != null && zone.getLoadBalancerProvider() != null && zone.getLoadBalancerProvider().equals(Network.Provider.NetscalerMPX.getName())) { + return true; + } else { + return false; + } } else { return (zone.getFirewallProvider() != null && zone.getFirewallProvider().equals(Network.Provider.JuniperSRX.getName())); } @@ -3121,7 +3128,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag vm = _vmDao.findById(vmId); } Network network = getNetwork(networkId); - NetworkOffering networkOffering = _configMgr.getNetworkOffering(network.getNetworkOfferingId()); + NetworkOffering ntwkOff = _configMgr.getNetworkOffering(network.getNetworkOfferingId()); // For default userVm Default network and domR guest/public network, get rate information from the service offering; for other situations get information // from the network offering @@ -3130,14 +3137,14 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag if (vm != null) { if (vm.getType() == Type.User && network.isDefault()) { isUserVmsDefaultNetwork = true; - } else if (vm.getType() == Type.DomainRouter && networkOffering.getTrafficType() == TrafficType.Public && networkOffering.getGuestType() == null) { + } else if (vm.getType() == Type.DomainRouter && ((ntwkOff.getTrafficType() == TrafficType.Public && ntwkOff.getGuestType() == null) || (ntwkOff.getGuestType() != null && ntwkOff.getTrafficType() == TrafficType.Guest))) { isDomRGuestOrPublicNetwork = true; } } if (isUserVmsDefaultNetwork || isDomRGuestOrPublicNetwork) { return _configMgr.getServiceOfferingNetworkRate(vm.getServiceOfferingId()); } else { - return _configMgr.getNetworkOfferingNetworkRate(networkOffering.getId()); + return _configMgr.getNetworkOfferingNetworkRate(ntwkOff.getId()); } } diff --git a/server/src/com/cloud/network/element/ExternalLoadBalancerElement.java b/server/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java similarity index 97% rename from server/src/com/cloud/network/element/ExternalLoadBalancerElement.java rename to server/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java index 2e1bd0ae683..ff4d51a28be 100644 --- a/server/src/com/cloud/network/element/ExternalLoadBalancerElement.java +++ b/server/src/com/cloud/network/element/F5ExternalLoadBalancerElement.java @@ -53,9 +53,9 @@ import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value=NetworkElement.class) -public class ExternalLoadBalancerElement extends AdapterBase implements NetworkElement { +public class F5ExternalLoadBalancerElement extends AdapterBase implements NetworkElement { - private static final Logger s_logger = Logger.getLogger(ExternalLoadBalancerElement.class); + private static final Logger s_logger = Logger.getLogger(F5ExternalLoadBalancerElement.class); @Inject NetworkManager _networkManager; @Inject ExternalNetworkManager _externalNetworkManager; diff --git a/server/src/com/cloud/network/element/NetscalerExternalLoadBalancerElement.java b/server/src/com/cloud/network/element/NetscalerExternalLoadBalancerElement.java new file mode 100644 index 00000000000..1b618d2463c --- /dev/null +++ b/server/src/com/cloud/network/element/NetscalerExternalLoadBalancerElement.java @@ -0,0 +1,162 @@ +/** + * * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved +* + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package com.cloud.network.element; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; + +import org.apache.log4j.Logger; + +import com.cloud.configuration.ConfigurationManager; +import com.cloud.dc.DataCenter; +import com.cloud.deploy.DeployDestination; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InsufficientNetworkCapacityException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.network.ExternalNetworkManager; +import com.cloud.network.Network; +import com.cloud.network.Network.Capability; +import com.cloud.network.Network.Provider; +import com.cloud.network.Network.Service; +import com.cloud.network.NetworkManager; +import com.cloud.network.Networks.TrafficType; +import com.cloud.network.PublicIpAddress; +import com.cloud.network.rules.FirewallRule; +import com.cloud.network.rules.StaticNat; +import com.cloud.offering.NetworkOffering; +import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.component.Inject; +import com.cloud.vm.NicProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Local(value=NetworkElement.class) +public class NetscalerExternalLoadBalancerElement extends AdapterBase implements NetworkElement { + + private static final Logger s_logger = Logger.getLogger(NetscalerExternalLoadBalancerElement.class); + + @Inject NetworkManager _networkManager; + @Inject ExternalNetworkManager _externalNetworkManager; + @Inject ConfigurationManager _configMgr; + + private boolean canHandle(Network config) { + DataCenter zone = _configMgr.getZone(config.getDataCenterId()); + if (config.getGuestType() != Network.GuestIpType.Virtual || config.getTrafficType() != TrafficType.Guest) { + s_logger.trace("Not handling network with guest Type " + config.getGuestType() + " and traffic type " + config.getTrafficType()); + return false; + } + + return (_networkManager.zoneIsConfiguredForExternalNetworking(zone.getId()) && + zone.getLoadBalancerProvider() != null && zone.getLoadBalancerProvider().equals(Network.Provider.NetscalerMPX.getName())); + } + + @Override + public boolean implement(Network guestConfig, NetworkOffering offering, DeployDestination dest, ReservationContext context) throws ResourceUnavailableException, ConcurrentOperationException, InsufficientNetworkCapacityException { + + if (!canHandle(guestConfig)) { + return false; + } + + return _externalNetworkManager.manageGuestNetworkWithExternalLoadBalancer(true, guestConfig); + } + + @Override + public boolean prepare(Network config, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientNetworkCapacityException, ResourceUnavailableException { + return true; + } + + @Override + public boolean release(Network config, NicProfile nic, VirtualMachineProfile vm, ReservationContext context) { + return true; + } + + @Override + public boolean shutdown(Network guestConfig, ReservationContext context) throws ResourceUnavailableException, ConcurrentOperationException { + if (!canHandle(guestConfig)) { + return false; + } + + return _externalNetworkManager.manageGuestNetworkWithExternalLoadBalancer(false, guestConfig); + } + + @Override + public boolean destroy(Network config) { + return true; + } + + @Override + public boolean applyIps(Network network, List ipAddress) throws ResourceUnavailableException { + return true; + } + + @Override + public boolean applyRules(Network config, List rules) throws ResourceUnavailableException { + if (!canHandle(config)) { + return false; + } + + return _externalNetworkManager.applyLoadBalancerRules(config, rules); + } + + @Override + public Map> getCapabilities() { + Map> capabilities = new HashMap>(); + + // Set capabilities for LB service + Map lbCapabilities = new HashMap(); + + // Specifies that the RoundRobin and Leastconn algorithms are supported for load balancing rules + lbCapabilities.put(Capability.SupportedLBAlgorithms, "roundrobin,leastconn"); + + // Specifies that load balancing rules can be made for either TCP or UDP traffic + lbCapabilities.put(Capability.SupportedProtocols, "tcp,udp"); + + // Specifies that this element can measure network usage on a per public IP basis + lbCapabilities.put(Capability.TrafficStatistics, "per public ip"); + + // Specifies that load balancing rules can only be made with public IPs that aren't source NAT IPs + lbCapabilities.put(Capability.LoadBalancingSupportedIps, "additional"); + + capabilities.put(Service.Lb, lbCapabilities); + + return capabilities; + } + + @Override + public Provider getProvider() { + return Provider.NetscalerMPX; + } + + @Override + public boolean restart(Network network, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException{ + return true; + } + + @Override + public boolean applyStaticNats(Network config, List rules) throws ResourceUnavailableException { + return false; + } + +} diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 0e8fee0dd55..a7b987b71e7 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -176,6 +176,7 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.MacAddress; import com.cloud.utils.net.NetUtils; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicProfile; @@ -191,9 +192,11 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineName; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.VirtualMachineProfile.Param; +import com.cloud.vm.VmDetailConstants; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; /** @@ -300,6 +303,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian VolumeDao _volumeDao = null; @Inject FirewallRulesCidrsDao _firewallCidrsDao; + @Inject + UserVmDetailsDao _vmDetailsDao; int _routerRamSize; int _routerCpuMHz; @@ -315,7 +320,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian private String _dnsBasicZoneUpdates = "all"; private boolean _disable_rp_filter = false; - + private long mgmtSrvrId = MacAddress.getMacAddress().toLong(); + ScheduledExecutorService _executor; ScheduledExecutorService _checkExecutor; @@ -704,13 +710,14 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian @Override public void run() { - final List routers = _routerDao.listByStateAndNetworkType(State.Running, GuestIpType.Virtual); + final List routers = _routerDao.listByStateAndNetworkType(State.Running, GuestIpType.Virtual, mgmtSrvrId); s_logger.debug("Found " + routers.size() + " running routers. "); for (DomainRouterVO router : routers) { String privateIP = router.getPrivateIpAddress(); if (privateIP != null) { final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName()); + UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(), router.getDataCenterIdToDeployIn(), router.getNetworkId(), null, router.getId(), router.getType().toString()); final NetworkUsageAnswer answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd); if (answer != null) { Transaction txn = Transaction.open(Transaction.CLOUD_DB); @@ -725,9 +732,16 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian s_logger.warn("unable to find stats for account: " + router.getAccountId()); continue; } + + if(previousStats != null + && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))){ + s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. Ignoring current answer. Router: "+answer.getRouterName()+" Rcvd: " + answer.getBytesReceived()+ "Sent: " +answer.getBytesSent()); + continue; + } + if (stats.getCurrentBytesReceived() > answer.getBytesReceived()) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Reported: " + answer.getBytesReceived() + s_logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Router: "+answer.getRouterName()+" Reported: " + answer.getBytesReceived() + " Stored: " + stats.getCurrentBytesReceived()); } stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); @@ -735,7 +749,7 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian stats.setCurrentBytesReceived(answer.getBytesReceived()); if (stats.getCurrentBytesSent() > answer.getBytesSent()) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Reported: " + answer.getBytesSent() + s_logger.debug("Received # of bytes that's less than the last one. Assuming something went wrong and persisting it. Router: "+answer.getRouterName()+" Reported: " + answer.getBytesSent() + " Stored: " + stats.getCurrentBytesSent()); } stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); @@ -1063,14 +1077,14 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian for (DomainRouterVO router : routers) { boolean skip = false; - if (router.getHostId() != null) { + State state = router.getState(); + if (router.getHostId() != null && state != State.Running) { HostVO host = _hostDao.findById(router.getHostId()); if (host == null || host.getStatus() != Status.Up) { skip = true; } } if (!skip) { - State state = router.getState(); if (state != State.Running) { router = startVirtualRouter(router, _accountService.getSystemUser(), _accountService.getSystemAccount(), params); } @@ -1202,6 +1216,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { DomainRouterVO router = profile.getVirtualMachine(); + Map details = _vmDetailsDao.findDetails(router.getId()); + router.setDetails(details); NetworkVO network = _networkDao.findById(router.getNetworkId()); String type = null; @@ -1662,6 +1678,8 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian String zoneName = _dcDao.findById(network.getDataCenterId()).getName(); boolean isZoneBasic = (dc.getNetworkType() == NetworkType.Basic); + List connectedRouters = new ArrayList(); + List disconnectedRouters = new ArrayList(); for (DomainRouterVO router : routers) { if (router.getState() != State.Running) { s_logger.warn("Unable to add virtual machine " + profile.getVirtualMachine() + " to the router " + router + " as the router is not in Running state"); @@ -1737,9 +1755,16 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian } try { _agentMgr.send(router.getHostId(), cmds); + } catch (AgentUnavailableException e){ + s_logger.warn("Unable to reach the agent " + router.getHostId(), e); + disconnectedRouters.add(router); + continue; } catch (OperationTimedoutException e) { - throw new AgentUnavailableException("Unable to reach the agent ", router.getHostId(), e); + s_logger.warn("Connection timeout on host " + router.getHostId(), e); + disconnectedRouters.add(router); + continue; } + connectedRouters.add(router); Answer answer = cmds.getAnswer("dhcp"); if (!answer.getResult()) { @@ -1771,6 +1796,20 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian rets.add(router); } + + String msg = "Unable to add new VM into network on disconnected router "; + if (!connectedRouters.isEmpty()) { + // These disconnected ones are out of sync now, stop them for synchronization + stopDisconnectedRouters(disconnectedRouters, true, msg); + } else if (!disconnectedRouters.isEmpty()) { + for (VirtualRouter router : disconnectedRouters) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + } + } + throw new ResourceUnavailableException(msg, VirtualRouter.class, disconnectedRouters.get(0).getId()); + } + return rets; } @@ -1780,6 +1819,7 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian } @Override + //FIXME add partial success and STOP state support public String[] applyVpnUsers(Network network, List users, List routers) throws ResourceUnavailableException { if (routers == null || routers.isEmpty()) { s_logger.warn("Failed to add/remove VPN users: no router found for account and zone"); @@ -2143,6 +2183,31 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian } return true; } + + protected void stopDisconnectedRouters(List routers, boolean force, String reason) + { + if (routers.isEmpty()) { + return; + } + for (VirtualRouter router : routers) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("About to stop the router " + router.getInstanceName() + " due to: " + reason); + } + String title = "Virtual router " + router.getInstanceName() + " would be stopped, due to " + reason; + String context = "Virtual router (name: " + router.getInstanceName() + ", id: " + router.getId() + ") would be stopped, due to: " + reason; + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_DOMAIN_ROUTER, + router.getDataCenterIdToDeployIn(), router.getPodIdToDeployIn(), title, context); + if (router.getIsRedundantRouter()) { + try { + stopRouter(router.getId(), force); + } catch (ConcurrentOperationException e) { + s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + } catch (ResourceUnavailableException e) { + s_logger.warn("Fail to stop router " + router.getInstanceName(), e); + } + } + } + } @Override public boolean associateIP(Network network, List ipAddress, List routers) throws ResourceUnavailableException { @@ -2151,23 +2216,48 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian throw new ResourceUnavailableException("Unable to assign ip addresses", DataCenter.class, network.getDataCenterId()); } + List connectedRouters = new ArrayList(); + List disconnectedRouters = new ArrayList(); boolean result = true; + String msg = "Unable to associate ip addresses on disconnected router "; for (VirtualRouter router : routers) { if (router.getState() == State.Running) { Commands cmds = new Commands(OnError.Continue); // Have to resend all already associated ip addresses createAssociateIPCommands(router, ipAddress, cmds, 0); - result = result && sendCommandsToRouter(router, cmds); - } else if (router.getState() != State.Stopped) { + try{ + result = sendCommandsToRouter(router, cmds); + connectedRouters.add(router); + } catch (AgentUnavailableException e) { + s_logger.warn(msg + router.getInstanceName(), e); + disconnectedRouters.add(router); + } + + //If rules fail to apply on one domR, no need to proceed with the rest + if (!result) { + throw new ResourceUnavailableException("Unable to apply firewall rules on router ", VirtualRouter.class, router.getId()); + } + + } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { + s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + + ", so not sending associate ip address commands to the backend"); + } else { s_logger.warn("Unable to associate ip addresses, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to assign ip addresses, domR is not in right state " + router.getState(), DataCenter.class, network.getDataCenterId()); } + } - //If rules fail to apply on one domR, no need to proceed with the rest - if (!result) { - throw new ResourceUnavailableException("Unable to apply firewall rules on router ", VirtualRouter.class, router.getId()); + if (!connectedRouters.isEmpty()) { + // These disconnected ones are out of sync now, stop them for synchronization + stopDisconnectedRouters(disconnectedRouters, true, msg); + } else if (!disconnectedRouters.isEmpty()) { + for (VirtualRouter router : disconnectedRouters) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + } } + throw new ResourceUnavailableException(msg, VirtualRouter.class, disconnectedRouters.get(0).getId()); } return result; } @@ -2179,44 +2269,67 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian throw new ResourceUnavailableException("Unable to apply firewall rules", DataCenter.class, network.getDataCenterId()); } + List connectedRouters = new ArrayList(); + List disconnectedRouters = new ArrayList(); + String msg = "Unable to apply firewall rules on disconnected router "; boolean result = true; for (VirtualRouter router : routers) { if (router.getState() == State.Running) { if (rules != null && !rules.isEmpty()) { - if (rules.get(0).getPurpose() == Purpose.LoadBalancing) { - // for load balancer we have to resend all lb rules for the network - List lbs = _loadBalancerDao.listByNetworkId(network.getId()); - List lbRules = new ArrayList(); - for (LoadBalancerVO lb : lbs) { - List dstList = _lbMgr.getExistingDestinations(lb.getId()); - LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList); - lbRules.add(loadBalancing); + try { + if (rules.get(0).getPurpose() == Purpose.LoadBalancing) { + // for load balancer we have to resend all lb rules for the network + List lbs = _loadBalancerDao.listByNetworkId(network.getId()); + List lbRules = new ArrayList(); + for (LoadBalancerVO lb : lbs) { + List dstList = _lbMgr.getExistingDestinations(lb.getId()); + LoadBalancingRule loadBalancing = new LoadBalancingRule(lb, dstList); + lbRules.add(loadBalancing); + } + result = result && applyLBRules(router, lbRules); + } else if (rules.get(0).getPurpose() == Purpose.PortForwarding) { + result = result && applyPortForwardingRules(router, (List) rules); + } else if (rules.get(0).getPurpose() == Purpose.StaticNat) { + result = result && applyStaticNatRules(router, (List) rules); + } else if (rules.get(0).getPurpose() == Purpose.Firewall) { + result = result && applyFirewallRules(router, (List) rules); + } else { + s_logger.warn("Unable to apply rules of purpose: " + rules.get(0).getPurpose()); + result = false; } - result = result && applyLBRules(router, lbRules); - } else if (rules.get(0).getPurpose() == Purpose.PortForwarding) { - result = result && applyPortForwardingRules(router, (List) rules); - } else if (rules.get(0).getPurpose() == Purpose.StaticNat) { - result = result && applyStaticNatRules(router, (List) rules); - } else if (rules.get(0).getPurpose() == Purpose.Firewall) { - result = result && applyFirewallRules(router, (List) rules); - }else { - s_logger.warn("Unable to apply rules of purpose: " + rules.get(0).getPurpose()); - result = false; + connectedRouters.add(router); + } catch (AgentUnavailableException e) { + s_logger.warn(msg + router.getInstanceName(), e); + disconnectedRouters.add(router); } } - - //If rules fail to apply on one domR, no need to proceed with the rest + + //If rules fail to apply on one domR and not due to disconnection, no need to proceed with the rest if (!result) { throw new ResourceUnavailableException("Unable to apply firewall rules on router ", VirtualRouter.class, router.getId()); } } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router is in " + router.getState() + ", so not sending apply firewall rules commands to the backend"); + s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + + ", so not sending apply firewall rules commands to the backend"); } else { s_logger.warn("Unable to apply firewall rules, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply firewall rules, virtual router is not in the right state", VirtualRouter.class, router.getId()); } } - return result; + + if (!connectedRouters.isEmpty()) { + // These disconnected ones are out of sync now, stop them for synchronization + stopDisconnectedRouters(disconnectedRouters, true, msg); + } else if (!disconnectedRouters.isEmpty()) { + for (VirtualRouter router : disconnectedRouters) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + } + } + throw new ResourceUnavailableException(msg, VirtualRouter.class, disconnectedRouters.get(0).getId()); + } + + return true; } protected boolean applyLBRules(VirtualRouter router, List rules) throws ResourceUnavailableException { @@ -2291,26 +2404,47 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian throw new ResourceUnavailableException("Unable to create static nat", DataCenter.class, network.getDataCenterId()); } + List connectedRouters = new ArrayList(); + List disconnectedRouters = new ArrayList(); boolean result = true; + String msg = "Unable to apply static nat on disconnected router "; for (VirtualRouter router : routers) { if (router.getState() == State.Running) { s_logger.debug("Applying " + rules.size() + " static nat in network " + network); - result = applyStaticNat(router, rules); - - //If rules fail to apply on one domR, no need to proceed with the rest + try { + result = applyStaticNat(router, rules); + connectedRouters.add(router); + } catch (AgentUnavailableException e) { + s_logger.warn(msg + router.getInstanceName(), e); + disconnectedRouters.add(router); + } + + //If rules fail to apply on one domR and not due to disconnection, no need to proceed with the rest if (!result) { throw new ResourceUnavailableException("Unable to apply static nat on router ", VirtualRouter.class, router.getId()); } - + } else if (router.getState() == State.Stopped || router.getState() == State.Stopping) { - s_logger.debug("Router is in " + router.getState() + ", so not sending apply firewall rules commands to the backend"); + s_logger.debug("Router " + router.getInstanceName() + " is in " + router.getState() + ", so not sending apply static nat commands to the backend"); } else { s_logger.warn("Unable to apply static nat, virtual router is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply static nat, virtual router is not in the right state", VirtualRouter.class, router.getId()); } } - return result; + if (!connectedRouters.isEmpty()) { + // These disconnected ones are out of sync now, stop them for synchronization + stopDisconnectedRouters(disconnectedRouters, true, msg); + } else if (!disconnectedRouters.isEmpty()) { + for (VirtualRouter router : disconnectedRouters) { + if (s_logger.isDebugEnabled()) { + s_logger.debug(msg + router.getInstanceName() + "(" + router.getId() + ")"); + } + } + throw new ResourceUnavailableException(msg, VirtualRouter.class, disconnectedRouters.get(0).getId()); + } + + return true; } diff --git a/server/src/com/cloud/network/security/SecurityGroupListener.java b/server/src/com/cloud/network/security/SecurityGroupListener.java index f2cd095e61c..46b50657bfe 100755 --- a/server/src/com/cloud/network/security/SecurityGroupListener.java +++ b/server/src/com/cloud/network/security/SecurityGroupListener.java @@ -132,7 +132,6 @@ public class SecurityGroupListener implements Listener { } catch (AgentUnavailableException e) { s_logger.warn("Unable to schedule network rules cleanup"); } - } } diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java index 789d6e99639..90a6de68893 100755 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -988,10 +988,10 @@ public class SecurityGroupManagerImpl implements SecurityGroupManager, SecurityG Account caller = UserContext.current().getCaller(); Long id = cmd.getId(); - IngressRuleVO rule = _ingressRuleDao.findById(id); + EgressRuleVO rule = _egressRuleDao.findById(id); if (rule == null) { - s_logger.debug("Unable to find ingress rule with id " + id); - throw new InvalidParameterValueException("Unable to find ingress rule with id " + id); + s_logger.debug("Unable to find egress rule with id " + id); + throw new InvalidParameterValueException("Unable to find egress rule with id " + id); } // Check permissions @@ -1010,8 +1010,8 @@ public class SecurityGroupManagerImpl implements SecurityGroupManager, SecurityG return false; } - _ingressRuleDao.remove(id); - s_logger.debug("revokeSecurityGroupIngress succeeded for ingress rule id: " + id); + _egressRuleDao.remove(id); + s_logger.debug("revokeSecurityGroupEgress succeeded for ingress rule id: " + id); final Set affectedVms = new HashSet(); affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(groupHandle.getId())); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 1e7bd894984..561469ef596 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -174,6 +174,9 @@ import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.HypervisorCapabilities; +import com.cloud.hypervisor.HypervisorCapabilitiesVO; +import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.info.ConsoleProxyInfo; import com.cloud.keystore.KeystoreManager; import com.cloud.network.IPAddressVO; @@ -331,7 +334,8 @@ public class ManagementServerImpl implements ManagementServer { private final UploadDao _uploadDao; private final SSHKeyPairDao _sshKeyPairDao; private final LoadBalancerDao _loadbalancerDao; - + private final HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; + private final KeystoreManager _ksMgr; private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); @@ -401,6 +405,7 @@ public class ManagementServerImpl implements ManagementServer { _itMgr = locator.getManager(VirtualMachineManager.class); _ksMgr = locator.getManager(KeystoreManager.class); _userAuthenticators = locator.getAdapters(UserAuthenticator.class); + _hypervisorCapabilitiesDao = locator.getDao(HypervisorCapabilitiesDao.class); if (_userAuthenticators == null || !_userAuthenticators.isSet()) { s_logger.error("Unable to find an user authenticator."); } @@ -2947,7 +2952,7 @@ public class ManagementServerImpl implements ManagementServer { String name = cmd.getDomainName(); Long parentId = cmd.getParentDomainId(); Long ownerId = UserContext.current().getCaller().getId(); - Account account = UserContext.current().getCaller(); + Account caller = UserContext.current().getCaller(); String networkDomain = cmd.getNetworkDomain(); if (ownerId == null) { @@ -2962,11 +2967,13 @@ public class ManagementServerImpl implements ManagementServer { if (parentDomain == null) { throw new InvalidParameterValueException("Unable to create domain " + name + ", parent domain " + parentId + " not found."); } - - if ((account != null) && !_domainDao.isChildDomain(account.getDomainId(), parentId)) { - throw new PermissionDeniedException("Unable to create domain " + name + ", permission denied."); + + if (parentDomain.getState().equals(Domain.State.Inactive)) { + throw new CloudRuntimeException("The domain cannot be created as the parent domain " + parentDomain.getName() + " is being deleted"); } + _accountMgr.checkAccess(caller, parentDomain); + if (networkDomain != null) { if (!NetUtils.verifyDomainName(networkDomain)) { throw new InvalidParameterValueException( @@ -2995,41 +3002,48 @@ public class ManagementServerImpl implements ManagementServer { @Override @ActionEvent(eventType = EventTypes.EVENT_DOMAIN_DELETE, eventDescription = "deleting Domain", async = true) public boolean deleteDomain(DeleteDomainCmd cmd) { - Account account = UserContext.current().getCaller(); + Account caller = UserContext.current().getCaller(); Long domainId = cmd.getId(); Boolean cleanup = cmd.getCleanup(); - - if ((domainId == DomainVO.ROOT_DOMAIN) || ((account != null) && !_domainDao.isChildDomain(account.getDomainId(), domainId))) { - throw new PermissionDeniedException("Unable to delete domain " + domainId + ", permission denied."); + + DomainVO domain = _domainDao.findById(domainId); + + if (domain == null) { + throw new InvalidParameterValueException("Failed to delete domain " + domainId + ", domain not found"); + } else if (domainId == DomainVO.ROOT_DOMAIN) { + throw new PermissionDeniedException("Can't delete ROOT domain"); } + + _accountMgr.checkAccess(caller, domain); + + //mark domain as inactive + s_logger.debug("Marking domain id=" + domainId + " as " + Domain.State.Inactive + " before actually deleting it"); + domain.setState(Domain.State.Inactive); + _domainDao.update(domainId, domain); try { - DomainVO domain = _domainDao.findById(domainId); - if (domain != null) { - long ownerId = domain.getAccountId(); - if ((cleanup != null) && cleanup.booleanValue()) { - boolean success = cleanupDomain(domainId, ownerId); - if (!success) { - s_logger.error("Failed to clean up domain resources and sub domains, delete failed on domain " + domain.getName() + " (id: " + domainId + ")."); - return false; - } - } else { + long ownerId = domain.getAccountId(); + if ((cleanup != null) && cleanup.booleanValue()) { + if (!cleanupDomain(domainId, ownerId)) { + s_logger.error("Failed to clean up domain resources and sub domains, delete failed on domain " + domain.getName() + " (id: " + domainId + ")."); + return false; + } + } else { + List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domainId); + if (accountsForCleanup.isEmpty()) { if (!_domainDao.remove(domainId)) { s_logger.error("Delete failed on domain " + domain.getName() + " (id: " + domainId + "); please make sure all users and sub domains have been removed from the domain before deleting"); return false; - } else { - domain.setState(Domain.State.Inactive); - _domainDao.update(domainId, domain); - } + } + } else { + s_logger.warn("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); + return false; } - } else { - throw new InvalidParameterValueException("Failed to delete domain " + domainId + ", domain not found"); } + cleanupDomainOfferings(domainId); return true; - } catch (InvalidParameterValueException ex) { - throw ex; } catch (Exception ex) { s_logger.error("Exception deleting domain with id " + domainId, ex); return false; @@ -3079,21 +3093,25 @@ public class ManagementServerImpl implements ManagementServer { } } - { - // delete users which will also delete accounts and release resources for those accounts - SearchCriteria sc = _accountDao.createSearchCriteria(); - sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); - List accounts = _accountDao.search(sc, null); - for (AccountVO account : accounts) { - success = (success && _accountMgr.deleteAccount(account, UserContext.current().getCallerUserId(), UserContext.current().getCaller())); - if (!success) { - s_logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); - } + // delete users which will also delete accounts and release resources for those accounts + SearchCriteria sc = _accountDao.createSearchCriteria(); + sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + List accounts = _accountDao.search(sc, null); + for (AccountVO account : accounts) { + success = (success && _accountMgr.deleteAccount(account, UserContext.current().getCallerUserId(), UserContext.current().getCaller())); + if (!success) { + s_logger.warn("Failed to cleanup account id=" + account.getId() + " as a part of domain cleanup"); } } - - // delete the domain itself - boolean deleteDomainSuccess = _domainDao.remove(domainId); + + //don't remove the domain if there are accounts required cleanup + boolean deleteDomainSuccess = true; + List accountsForCleanup = _accountDao.findCleanupsForRemovedAccounts(domainId); + if (accountsForCleanup.isEmpty()) { + deleteDomainSuccess = _domainDao.remove(domainId); + } else { + s_logger.debug("Can't delete the domain yet because it has " + accountsForCleanup.size() + "accounts that need a cleanup"); + } return success && deleteDomainSuccess; } @@ -4375,9 +4393,10 @@ public class ManagementServerImpl implements ManagementServer { _asyncMgr.updateAsyncJobAttachment(job.getId(), Upload.Type.VOLUME.toString(), volumeId); _asyncMgr.updateAsyncJobStatus(job.getId(), AsyncJobResult.STATUS_IN_PROGRESS, resultObj); } - + String value = _configs.get(Config.CopyVolumeWait.toString()); + int copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); // Copy the volume from the source storage pool to secondary storage - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true, copyvolumewait); CopyVolumeAnswer cvAnswer = null; try { cvAnswer = (CopyVolumeAnswer) _storageMgr.sendToPool(srcPool, cvCmd); @@ -4848,4 +4867,54 @@ public class ManagementServerImpl implements ManagementServer { } return null; } + + @Override + public List listHypervisorCapabilities(Long id, HypervisorType hypervisorType, Long startIndex, Long pageSizeVal){ + Filter searchFilter = new Filter(HypervisorCapabilitiesVO.class, "id", true, startIndex, pageSizeVal); + SearchCriteria sc = _hypervisorCapabilitiesDao.createSearchCriteria(); + + if (id != null) { + sc.addAnd("id", SearchCriteria.Op.EQ, id); + } + + if (hypervisorType != null) { + sc.addAnd("hypervisorType", SearchCriteria.Op.EQ, hypervisorType); + } + + return _hypervisorCapabilitiesDao.search(sc, searchFilter); + + } + + @Override + public HypervisorCapabilities updateHypervisorCapabilities(Long id, Long maxGuestsLimit, Boolean securityGroupEnabled){ + HypervisorCapabilitiesVO hpvCapabilities = _hypervisorCapabilitiesDao.findById(id, true); + + if(hpvCapabilities == null){ + throw new InvalidParameterValueException("unable to find the hypervisor capabilities " + id); + } + + boolean updateNeeded = (maxGuestsLimit != null || securityGroupEnabled != null); + if (!updateNeeded) { + return hpvCapabilities; + } + + + hpvCapabilities = _hypervisorCapabilitiesDao.createForUpdate(id); + + if(maxGuestsLimit != null){ + hpvCapabilities.setMaxGuestsLimit(maxGuestsLimit); + } + + if(securityGroupEnabled != null){ + hpvCapabilities.setSecurityGroupEnabled(securityGroupEnabled); + } + + if(_hypervisorCapabilitiesDao.update(id, hpvCapabilities)){ + hpvCapabilities = _hypervisorCapabilitiesDao.findById(id); + UserContext.current().setEventDetails("Hypervisor Capabilities id=" + hpvCapabilities.getId()); + return hpvCapabilities; + }else{ + return null; + } + } } diff --git a/server/src/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/com/cloud/servlet/ConsoleProxyServlet.java index b7d6ce13425..4b2f6391ca2 100644 --- a/server/src/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/com/cloud/servlet/ConsoleProxyServlet.java @@ -39,6 +39,7 @@ import org.apache.log4j.Logger; import com.cloud.host.HostVO; import com.cloud.server.ManagementServer; +import com.cloud.storage.GuestOSVO; import com.cloud.user.Account; import com.cloud.user.User; import com.cloud.utils.Pair; @@ -324,6 +325,12 @@ public class ConsoleProxyServlet extends HttpServlet { sb.append("&tag=").append(tag); sb.append("&ticket=").append(ticket); + // for console access, we need guest OS type to help implement keyboard + long guestOs = vm.getGuestOSId(); + GuestOSVO guestOsVo = _ms.getGuestOs(guestOs); + if(guestOsVo.getCategoryId() == 6) + sb.append("&guest=windows"); + if(s_logger.isDebugEnabled()) { s_logger.debug("Compose console url: " + sb.toString()); } diff --git a/server/src/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/com/cloud/storage/OCFS2ManagerImpl.java index b7bb6635b1d..81f611f3e12 100755 --- a/server/src/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/com/cloud/storage/OCFS2ManagerImpl.java @@ -65,7 +65,10 @@ public class OCFS2ManagerImpl implements OCFS2Manager, ResourceListener { Integer i = 0; List> lst = new ArrayList>(); for (HostVO h : hosts) { - String nodeName = "node_" + h.getPrivateIpAddress().replace(".", "_"); + /** + * Don't show "node" in node name otherwise OVM's utils/config_o2cb.sh will be going crazy + */ + String nodeName = "ovm_" + h.getPrivateIpAddress().replace(".", "_"); Ternary node = new Ternary(i, h.getPrivateIpAddress(), nodeName); lst.add(node); i ++; @@ -103,6 +106,16 @@ public class OCFS2ManagerImpl implements OCFS2Manager, ResourceListener { } return clusterName; + + + /** + * right now let's use "ocfs2" that is default cluster name of OVM OCFS2 service. + * Using another name is fine but requires extra effort to modify OVM's "utils/config_o2cb.sh", + * currently it doesn't receive parameter specifying which cluster to start. + * And I don't see the benefit of a cluster name rather than "ocfs2" + */ + + //return "ocfs2"; } @Override diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 65bc04d1c2f..bf0341100b0 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -299,6 +299,8 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag boolean _storageCleanupEnabled; boolean _templateCleanupEnabled = true; int _storageCleanupInterval; + private int _createVolumeFromSnapshotWait; + private int _copyvolumewait; int _storagePoolAcquisitionWaitSeconds = 1800; // 30 minutes protected int _retry = 2; protected int _pingInterval = 60; // seconds @@ -307,7 +309,6 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag private long _maxVolumeSizeInGb; private long _serverId; - private int _snapshotTimeout; public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException { @@ -634,26 +635,29 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag if( snapshot.getSwiftName() != null ) { _snapshotMgr.downloadSnapshotsFromSwift(snapshot); } - CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand(primaryStoragePoolNameLabel, secondaryStoragePoolUrl, dcId, accountId, volumeId, - backedUpSnapshotUuid, snapshot.getName()); - String basicErrMsg = "Failed to create volume from " + snapshot.getName(); + CreateVolumeFromSnapshotCommand createVolumeFromSnapshotCommand = new CreateVolumeFromSnapshotCommand(primaryStoragePoolNameLabel, secondaryStoragePoolUrl, dcId, accountId, volumeId, + backedUpSnapshotUuid, snapshot.getName(), _createVolumeFromSnapshotWait); + CreateVolumeFromSnapshotAnswer answer; if (!_snapshotDao.lockInLockTable(snapshotId.toString(), 10)) { throw new CloudRuntimeException("failed to create volume from " + snapshotId + " due to this snapshot is being used, try it later "); } + String basicErrMsg = "Failed to create volume from " + snapshot.getName() + " on pool " + pool; try { answer = (CreateVolumeFromSnapshotAnswer) sendToPool(pool, createVolumeFromSnapshotCommand); if (answer != null && answer.getResult()) { vdiUUID = answer.getVdi(); } else { - s_logger.error(basicErrMsg + " due to " + answer.getDetails()); + s_logger.error(basicErrMsg + " due to " + ((answer == null)?"null":answer.getDetails())); + throw new CloudRuntimeException(basicErrMsg); } } catch (StorageUnavailableException e) { s_logger.error(basicErrMsg); } finally { _snapshotDao.unlockFromLockTable(snapshotId.toString()); } + return new Pair(vdiUUID, basicErrMsg); } @@ -800,11 +804,10 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag if (overProvisioningFactorStr != null) { _overProvisioningFactor = Float.parseFloat(overProvisioningFactorStr); } - + _retry = NumbersUtil.parseInt(configs.get(Config.StartRetry.key()), 10); _pingInterval = NumbersUtil.parseInt(configs.get("ping.interval"), 60); _hostRetry = NumbersUtil.parseInt(configs.get("host.retry"), 2); - _snapshotTimeout = NumbersUtil.parseInt(Config.CmdsWait.key(), 2 * 60 * 60 * 1000); _storagePoolAcquisitionWaitSeconds = NumbersUtil.parseInt(configs.get("pool.acquisition.wait.seconds"), 1800); s_logger.info("pool.acquisition.wait.seconds is configured as " + _storagePoolAcquisitionWaitSeconds + " seconds"); @@ -813,7 +816,13 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag String storageCleanupEnabled = configs.get("storage.cleanup.enabled"); _storageCleanupEnabled = (storageCleanupEnabled == null) ? true : Boolean.parseBoolean(storageCleanupEnabled); - String value = configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); + String value = configDao.getValue(Config.CreateVolumeFromSnapshotWait.toString()); + _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue())); + + value = configDao.getValue(Config.CopyVolumeWait.toString()); + _copyvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CopyVolumeWait.getDefaultValue())); + + value = configDao.getValue(Config.StorageTemplateCleanupEnabled.key()); _templateCleanupEnabled = (value == null ? true : Boolean.parseBoolean(value)); String time = configs.get("storage.cleanup.interval"); @@ -1533,7 +1542,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); // Copy the volume from the source storage pool to secondary storage - CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true); + CopyVolumeCommand cvCmd = new CopyVolumeCommand(volume.getId(), volume.getPath(), srcPool, secondaryStorageURL, true, _copyvolumewait); CopyVolumeAnswer cvAnswer; try { cvAnswer = (CopyVolumeAnswer) sendToPool(srcPool, cvCmd); @@ -1549,7 +1558,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag // Copy the volume from secondary storage to the destination storage // pool - cvCmd = new CopyVolumeCommand(volume.getId(), secondaryStorageVolumePath, destPool, secondaryStorageURL, false); + cvCmd = new CopyVolumeCommand(volume.getId(), secondaryStorageVolumePath, destPool, secondaryStorageURL, false, _copyvolumewait); try { cvAnswer = (CopyVolumeAnswer) sendToPool(destPool, cvCmd); } catch (StorageUnavailableException e1) { @@ -1874,6 +1883,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag + storagePool.getDataCenterId() + ", HostOrPoolId - " + storagePool.getId() + ", PodId " + storagePool.getPodId()); } + @Override public Pair sendToPool(StoragePool pool, long[] hostIdsToTryFirst, List hostIdsToAvoid, Commands cmds) throws StorageUnavailableException { SearchCriteria sc = UpHostsInPoolSearch.create(); @@ -1903,11 +1913,7 @@ public class StorageManagerImpl implements StorageManager, StorageService, Manag for (Command cmd : cmdArray) { long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd); - if (cmd instanceof BackupSnapshotCommand) { - answers.add(_agentMgr.send(targetHostId, cmd, _snapshotTimeout)); - } else { - answers.add(_agentMgr.send(targetHostId, cmd)); - } + answers.add(_agentMgr.send(targetHostId, cmd)); } return new Pair(hostId, answers.toArray(new Answer[answers.size()])); } catch (AgentUnavailableException e) { diff --git a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java index 0bf743b92d3..163314f382f 100644 --- a/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/server/src/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -26,19 +26,16 @@ import javax.ejb.Local; import org.apache.log4j.Logger; -import com.cloud.storage.SnapshotVO; import com.cloud.storage.Snapshot.Type; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.VirtualMachine.State; +import com.cloud.utils.db.Transaction; @Local (value={SnapshotDao.class}) public class SnapshotDaoImpl extends GenericDaoBase implements SnapshotDao { diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index d2c022f1abe..d18aecf8c28 100644 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -108,6 +108,7 @@ import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineName; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.SecondaryStorageVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; import com.cloud.vm.dao.VMInstanceDao; // @@ -186,6 +187,8 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V protected VMInstanceDao _vmDao; @Inject protected CapacityDao _capacityDao; + @Inject + UserVmDetailsDao _vmDetailsDao; private long _capacityScanInterval = DEFAULT_CAPACITY_SCAN_INTERVAL; @@ -940,6 +943,10 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { + SecondaryStorageVmVO vm = profile.getVirtualMachine(); + Map details = _vmDetailsDao.findDetails(vm.getId()); + vm.setDetails(details); + HostVO secHost = _hostDao.findSecondaryStorageHost(dest.getDataCenter().getId()); assert (secHost != null); diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index fbb7cde393b..0a40ebf9565 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -46,6 +46,7 @@ import com.cloud.api.commands.ListRecurringSnapshotScheduleCmd; import com.cloud.api.commands.ListSnapshotPoliciesCmd; import com.cloud.api.commands.ListSnapshotsCmd; import com.cloud.async.AsyncJobManager; +import com.cloud.configuration.Config; import com.cloud.configuration.ResourceCount.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.dc.ClusterVO; @@ -63,8 +64,8 @@ import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDetailsDao; import com.cloud.host.dao.HostDao; +import com.cloud.host.dao.HostDetailsDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Status; @@ -167,6 +168,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma private int _totalRetries; private int _pauseInterval; private int _deltaSnapshotMax; + private int _backupsnapshotwait; protected SearchBuilder PolicySnapshotSearch; protected SearchBuilder PoliciesForSnapSearch; @@ -345,7 +347,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma VolumeVO volume = null; boolean backedUp = false; // does the caller have the authority to act on this volume - checkAccountPermissions(v.getAccountId(), v.getDomainId(), "volume", volumeId); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, v); try { if (v != null && _volsDao.getHypervisorType(v.getId()).equals(HypervisorType.KVM)) { /* KVM needs to lock on the vm of volume, because it takes snapshot on behalf of vm, not volume */ @@ -561,7 +563,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma String vmName = _storageMgr.getVmNameOnVolume(volume); StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId()); BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(primaryStoragePoolNameLabel, secondaryStoragePoolUrl, dcId, accountId, volumeId, snapshot.getId(), volume.getPath(), srcPool, snapshotUuid, - snapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName); + snapshot.getName(), prevSnapshotUuid, prevBackupUuid, isVolumeInactive, vmName, _backupsnapshotwait); if ( swift != null ) { backupSnapshotCommand.setSwift(toSwiftTO(swift)); @@ -666,34 +668,12 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma } } - private Long checkAccountPermissions(long targetAccountId, long targetDomainId, String targetDesc, long targetId) { - Long accountId = null; - - Account account = UserContext.current().getCaller(); - if (account != null) { - - /* - * if (!isAdmin(account.getType())) { if (account.getId() != targetAccountId) { throw new - * InvalidParameterValueException("Unable to find a " + targetDesc + " with id " + targetId + " for this account"); - * } } else if (!_domainDao.isChildDomain(account.getDomainId(), targetDomainId)) { throw new - * PermissionDeniedException("Unable to perform operation for " + targetDesc + " with id " + targetId + - * ", permission denied."); } accountId = account.getId(); - */ - _accountMgr.checkAccess(account, _domainDao.findById(targetDomainId)); - } - - return accountId; - } - - private static boolean isAdmin(short accountType) { - return ((accountType == Account.ACCOUNT_TYPE_ADMIN) || (accountType == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) || (accountType == Account.ACCOUNT_TYPE_READ_ONLY_ADMIN)); - } - @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_DELETE, eventDescription = "deleting snapshot", async = true) public boolean deleteSnapshot(DeleteSnapshotCmd cmd) { Long snapshotId = cmd.getId(); + Account caller = UserContext.current().getCaller(); // Verify parameters Snapshot snapshotCheck = _snapshotDao.findByIdIncludingRemoved(snapshotId.longValue()); @@ -701,20 +681,9 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma throw new InvalidParameterValueException("unable to find a snapshot with id " + snapshotId); } - // If an account was passed in, make sure that it matches the account of the snapshot - Account snapshotOwner = _accountDao.findById(snapshotCheck.getAccountId()); - if (snapshotOwner == null) { - throw new InvalidParameterValueException("Snapshot id " + snapshotId + " does not have a valid account"); - } - checkAccountPermissions(snapshotOwner.getId(), snapshotOwner.getDomainId(), "snapshot", snapshotId); + _accountMgr.checkAccess(caller, null, snapshotCheck); - boolean status = deleteSnapshotInternal(snapshotId); - if (!status) { - s_logger.warn("Failed to delete snapshot"); - throw new CloudRuntimeException("Failed to delete snapshot:" + snapshotId); - } - - return status; + return deleteSnapshotInternal(snapshotId); } @DB @@ -726,7 +695,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma SnapshotVO snapshot = _snapshotDao.findById(snapshotId); if (snapshot.getBackupSnapshotId() != null) { List snaps = _snapshotDao.listByBackupUuid(snapshot.getVolumeId(), snapshot.getBackupSnapshotId()); - if (snaps != null && snaps.size() > 1) { + if (!snaps.isEmpty()) { snapshot.setBackupSnapshotId(null); _snapshotDao.update(snapshot.getId(), snapshot); } @@ -849,16 +818,15 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma if (volumeId != null) { VolumeVO volume = _volsDao.findById(volumeId); if (volume != null) { - checkAccountPermissions(volume.getAccountId(), volume.getDomainId(), "volume", volumeId); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, volume); } - } Account account = UserContext.current().getCaller(); Long domainId = cmd.getDomainId(); String accountName = cmd.getAccountName(); Long accountId = null; - if ((account == null) || isAdmin(account.getType())) { + if ((account == null) || _accountMgr.isAdmin(account.getType())) { if (domainId != null) { if ((account != null) && !_domainDao.isChildDomain(account.getDomainId(), domainId)) { throw new PermissionDeniedException("Unable to list templates for domain " + domainId + ", permission denied."); @@ -1059,9 +1027,8 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma AccountVO owner = _accountDao.findById(volume.getAccountId()); DomainVO domain = _domainDao.findById(owner.getDomainId()); - - // If an account was passed in, make sure that it matches the account of the volume - checkAccountPermissions(volume.getAccountId(), volume.getDomainId(), "volume", volumeId); + + _accountMgr.checkAccess(UserContext.current().getCaller(), null, volume); Long instanceId = volume.getInstanceId(); if (instanceId != null) { @@ -1143,7 +1110,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma if (volume == null) { throw new InvalidParameterValueException("Unable to find a volume with id " + volumeId); } - checkAccountPermissions(volume.getAccountId(), volume.getDomainId(), "volume", volumeId); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, volume); return listPoliciesforVolume(cmd.getVolumeId()); } @@ -1207,7 +1174,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma if (account != null) { long volAcctId = volume.getAccountId(); - if (isAdmin(account.getType())) { + if (_accountMgr.isAdmin(account.getType())) { Account userAccount = _accountDao.findById(Long.valueOf(volAcctId)); if (!_domainDao.isChildDomain(account.getDomainId(), userAccount.getDomainId())) { throw new PermissionDeniedException("Unable to list snapshot schedule for volume " + volumeId + ", permission denied."); @@ -1328,6 +1295,9 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma if (configDao == null) { throw new ConfigurationException("Unable to get the configuration dao."); } + + String value = configDao.getValue(Config.BackupSnapshotWait.toString()); + _backupsnapshotwait = NumbersUtil.parseInt(value, Integer.parseInt(Config.BackupSnapshotWait.getDefaultValue())); Type.HOURLY.setMax(NumbersUtil.parseInt(configDao.getValue("snapshot.max.hourly"), HOURLYMAX)); Type.DAILY.setMax(NumbersUtil.parseInt(configDao.getValue("snapshot.max.daily"), DAILYMAX)); @@ -1385,8 +1355,7 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma throw new InvalidParameterValueException("Policy id given: " + policy + " does not belong to a valid volume"); } - // If an account was passed in, make sure that it matches the account of the volume - checkAccountPermissions(volume.getAccountId(), volume.getDomainId(), "volume", volume.getId()); + _accountMgr.checkAccess(UserContext.current().getCaller(), null, volume); } boolean success = true; diff --git a/server/src/com/cloud/template/HyervisorTemplateAdapter.java b/server/src/com/cloud/template/HyervisorTemplateAdapter.java old mode 100644 new mode 100755 index d6e3e9424d2..2d7244c2beb --- a/server/src/com/cloud/template/HyervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HyervisorTemplateAdapter.java @@ -100,6 +100,13 @@ public class HyervisorTemplateAdapter extends TemplateAdapterBase implements Tem throw new InvalidParameterValueException("Please specify a valid "+ cmd.getFormat().toLowerCase()); } + if ((cmd.getFormat().equalsIgnoreCase("vhd") && (!url.toLowerCase().endsWith("vhd") && !url.toLowerCase().endsWith("vhd.zip") && !url.toLowerCase().endsWith("vhd.bz2") && !url.toLowerCase().endsWith("vhd.gz") )) + || (cmd.getFormat().equalsIgnoreCase("qcow2") && (!url.toLowerCase().endsWith("qcow2") && !url.toLowerCase().endsWith("qcow2.zip") && !url.toLowerCase().endsWith("qcow2.bz2") && !url.toLowerCase().endsWith("qcow2.gz") )) + || (cmd.getFormat().equalsIgnoreCase("ova") && (!url.toLowerCase().endsWith("ova") && !url.toLowerCase().endsWith("ova.zip") && !url.toLowerCase().endsWith("ova.bz2") && !url.toLowerCase().endsWith("ova.gz"))) + || (cmd.getFormat().equalsIgnoreCase("raw") && (!url.toLowerCase().endsWith("img") && !url.toLowerCase().endsWith("raw")))) { + throw new InvalidParameterValueException("Please specify a valid URL. URL:" + url + " is an invalid for the format " + cmd.getFormat().toLowerCase()); + } + profile.setUrl(validateUrl(url)); return profile; } diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index ff7952d24c3..2ee263b7840 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -161,10 +161,10 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe @Inject StorageManager _storageMgr; @Inject AsyncJobManager _asyncMgr; @Inject UserVmManager _vmMgr; - @Inject ConfigurationDao _configDao; @Inject UsageEventDao _usageEventDao; @Inject HypervisorGuruManager _hvGuruMgr; - @Inject AccountService _accountService; + @Inject AccountService _accountService; + int _primaryStorageDownloadWait; protected SearchBuilder HostTemplateStatesSearch; int _storagePoolMaxWaitSeconds = 3600; @@ -280,7 +280,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe throw new IllegalArgumentException("Please specify a valid zone."); } - if (!template.isExtractable()) { + if (!_accountMgr.isRootAdmin(caller.getType()) && !template.isExtractable()) { throw new InvalidParameterValueException("Unable to extract template id=" + templateId + " as it's not extractable"); } @@ -451,7 +451,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe } String url = origUrl + "/" + templateHostRef.getInstallPath(); PrimaryStorageDownloadCommand dcmd = new PrimaryStorageDownloadCommand(template.getUniqueName(), url, template.getFormat(), - template.getAccountId(), pool.getId(), pool.getUuid()); + template.getAccountId(), pool.getId(), pool.getUuid(), _primaryStorageDownloadWait); HostVO secondaryStorageHost = _hostDao.findById(templateHostRef.getHostId()); assert(secondaryStorageHost != null); dcmd.setSecondaryStorageUrl(secondaryStorageHost.getStorageUrl()); @@ -466,7 +466,7 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe // set 120 min timeout for this command PrimaryStorageDownloadAnswer answer = (PrimaryStorageDownloadAnswer)_agentMgr.easySend( - _hvGuruMgr.getGuruProcessedCommandTargetHost(vo.getHostId(), dcmd), dcmd, 120*60*1000); + _hvGuruMgr.getGuruProcessedCommandTargetHost(vo.getHostId(), dcmd), dcmd); if (answer != null && answer.getResult() ) { templateStoragePoolRef.setDownloadPercent(100); templateStoragePoolRef.setDownloadState(Status.DOWNLOADED); @@ -733,7 +733,10 @@ public class TemplateManagerImpl implements TemplateManager, Manager, TemplateSe final Map configs = configDao.getConfiguration("AgentManager", params); _routerTemplateId = NumbersUtil.parseInt(configs.get("router.template.id"), 1); - + + String value = configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + _primaryStorageDownloadWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + HostTemplateStatesSearch = _tmpltHostDao.createSearchBuilder(); HostTemplateStatesSearch.and("id", HostTemplateStatesSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); HostTemplateStatesSearch.and("state", HostTemplateStatesSearch.entity().getDownloadState(), SearchCriteria.Op.EQ); diff --git a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index c4111be6035..a85b1cc01fe 100755 --- a/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -42,6 +42,7 @@ import com.cloud.upgrade.dao.Upgrade217to218; import com.cloud.upgrade.dao.Upgrade218to22; import com.cloud.upgrade.dao.Upgrade218to224DomainVlans; import com.cloud.upgrade.dao.Upgrade2210to2211; +import com.cloud.upgrade.dao.Upgrade2211to2212; import com.cloud.upgrade.dao.Upgrade221to222; import com.cloud.upgrade.dao.Upgrade222to224; import com.cloud.upgrade.dao.Upgrade224to225; @@ -73,19 +74,20 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { public DatabaseUpgradeChecker() { _dao = ComponentLocator.inject(VersionDaoImpl.class); - _upgradeMap.put("2.1.7", new DbUpgrade[] { new Upgrade217to218(), new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228(),new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211() }); - _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211()}); + _upgradeMap.put("2.1.7", new DbUpgrade[] { new Upgrade217to218(), new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228(),new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212() }); + _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.11", new DbUpgrade[] { new Upgrade2211to2212()}); } protected void runScript(Connection conn, File file) { diff --git a/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java b/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java index fcc79bb87fc..e6bb45bd3df 100755 --- a/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java +++ b/server/src/com/cloud/upgrade/PremiumDatabaseUpgradeChecker.java @@ -25,6 +25,7 @@ import com.cloud.upgrade.dao.Upgrade217to218; import com.cloud.upgrade.dao.Upgrade218to224DomainVlans; import com.cloud.upgrade.dao.Upgrade218to22Premium; import com.cloud.upgrade.dao.Upgrade2210to2211; +import com.cloud.upgrade.dao.Upgrade2211to2212; import com.cloud.upgrade.dao.Upgrade221to222Premium; import com.cloud.upgrade.dao.Upgrade222to224Premium; import com.cloud.upgrade.dao.Upgrade224to225; @@ -42,21 +43,22 @@ import com.cloud.utils.component.SystemIntegrityChecker; public class PremiumDatabaseUpgradeChecker extends DatabaseUpgradeChecker { public PremiumDatabaseUpgradeChecker() { _dao = ComponentLocator.inject(VersionDaoImpl.class); - _dao = ComponentLocator.inject(VersionDaoImpl.class); - _upgradeMap.put("2.1.7", new DbUpgrade[] { new Upgrade217to218(), new Upgrade218to22Premium(), new Upgrade221to222Premium(), new UpgradeSnapshot217to224(), new Upgrade222to224Premium(),new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); + _upgradeMap.put("2.1.7", new DbUpgrade[] { new Upgrade217to218(), new Upgrade218to22Premium(), new Upgrade221to222Premium(), new UpgradeSnapshot217to224(), new Upgrade222to224Premium(), + new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); _upgradeMap.put("2.1.8", new DbUpgrade[] { new Upgrade218to22Premium(), new Upgrade221to222Premium(), new UpgradeSnapshot217to224(), new Upgrade222to224Premium(), - new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211() }); + new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212() }); _upgradeMap.put("2.1.9", new DbUpgrade[] { new Upgrade218to22Premium(), new Upgrade221to222Premium(), new UpgradeSnapshot217to224(), new Upgrade222to224Premium(), - new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222Premium(), new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211()}); - _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211()}); + new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.1", new DbUpgrade[] { new Upgrade221to222Premium(), new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.2", new DbUpgrade[] { new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.3", new DbUpgrade[] { new Upgrade222to224Premium(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.4", new DbUpgrade[] { new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.5", new DbUpgrade[] { new Upgrade225to226(), new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.6", new DbUpgrade[] { new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.7", new DbUpgrade[] { new Upgrade227to228Premium(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.8", new DbUpgrade[] { new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.9", new DbUpgrade[] { new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.10", new DbUpgrade[] { new Upgrade2210to2211(), new Upgrade2211to2212()}); + _upgradeMap.put("2.2.11", new DbUpgrade[] { new Upgrade2211to2212()}); } } diff --git a/server/src/com/cloud/upgrade/dao/Upgrade2211to2212.java b/server/src/com/cloud/upgrade/dao/Upgrade2211to2212.java new file mode 100644 index 00000000000..81c6cc9baa3 --- /dev/null +++ b/server/src/com/cloud/upgrade/dao/Upgrade2211to2212.java @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade2211to2212 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade2211to2212.class); + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "2.2.11", "2.2.11"}; + } + + @Override + public String getUpgradedVersion() { + return "2.2.12"; + } + + @Override + public boolean supportsRollingUpgrade() { + return true; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-2211to2212.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-2211to2212.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public void performDataMigration(Connection conn) { + } + + @Override + public File[] getCleanupScripts() { + return null; + } + +} diff --git a/server/src/com/cloud/upgrade/dao/Upgrade229to2210.java b/server/src/com/cloud/upgrade/dao/Upgrade229to2210.java index 5ce4468083c..7d5e4a8f6ee 100644 --- a/server/src/com/cloud/upgrade/dao/Upgrade229to2210.java +++ b/server/src/com/cloud/upgrade/dao/Upgrade229to2210.java @@ -60,13 +60,52 @@ public class Upgrade229to2210 implements DbUpgrade { @Override public void performDataMigration(Connection conn) { updateFirewallRules(conn); + updateSnapshots(conn); } @Override public File[] getCleanupScripts() { return null; } - + + private void updateSnapshots(Connection conn) { + PreparedStatement pstmt = null; + ResultSet rs = null; + long currentSnapshotId = 0; + try { + pstmt = conn.prepareStatement("select id, prev_snap_id from snapshots where sechost_id is NULL and prev_snap_id is not NULL and status=\"BackedUp\" and removed is NULL order by id"); + rs = pstmt.executeQuery(); + while (rs.next()) { + long id = rs.getLong(1); + long preSnapId = rs.getLong(2); + currentSnapshotId = id; + pstmt = conn.prepareStatement("select sechost_id from snapshots where id=? and sechost_id is not NULL"); + pstmt.setLong(1, preSnapId); + ResultSet sechost = pstmt.executeQuery(); + if (sechost.next()) { + long secHostId = sechost.getLong(1); + pstmt = conn.prepareStatement("update snapshots set sechost_id=? where id=?"); + pstmt.setLong(1, secHostId); + pstmt.setLong(2, id); + pstmt.executeUpdate(); + } + } + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to update snapshots id=" + currentSnapshotId, e); + } finally { + try { + if (rs != null) { + rs.close(); + } + + if (pstmt != null) { + pstmt.close(); + } + } catch (SQLException e) { + } + } + } + private void updateFirewallRules(Connection conn) { PreparedStatement pstmt = null; ResultSet rs = null; diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 11eb0d7a6b9..1d2742a68eb 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -1069,11 +1069,13 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag List userTemplates = _templateDao.listByAccountId(accountId); boolean allTemplatesDeleted = true; for (VMTemplateVO template : userTemplates) { - try { - allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); - } catch (Exception e) { - s_logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); - allTemplatesDeleted = false; + if (template.getRemoved() == null) { + try { + allTemplatesDeleted = _tmpltMgr.delete(callerUserId, template.getId(), null); + } catch (Exception e) { + s_logger.warn("Failed to delete template while removing account: " + template.getName() + " due to: ", e); + allTemplatesDeleted = false; + } } } @@ -1846,12 +1848,15 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag txn = Transaction.open(Transaction.CLOUD_DB); //Cleanup removed accounts - List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(); + List removedAccounts = _accountDao.findCleanupsForRemovedAccounts(null); s_logger.info("Found " + removedAccounts.size() + " removed accounts to cleanup"); for (AccountVO account : removedAccounts) { s_logger.debug("Cleaning up " + account.getId()); try { - cleanupAccount(account, getSystemUser().getId(), getSystemAccount()); + if (cleanupAccount(account, getSystemUser().getId(), getSystemAccount())) { + account.setNeedsCleanup(false); + _accountDao.update(account.getId(), account); + } } catch (Exception e) { s_logger.error("Skipping due to error on account " + account.getId(), e); } @@ -1872,6 +1877,23 @@ public class AccountManagerImpl implements AccountManager, AccountService, Manag } } + //cleanup inactive domains + List inactiveDomains = _domainDao.findInactiveDomains(); + s_logger.info("Found " + inactiveDomains.size() + " inactive domains to cleanup"); + for (DomainVO inactiveDomain : inactiveDomains) { + long domainId = inactiveDomain.getId(); + try { + List accountsForCleanupInDomain = _accountDao.findCleanupsForRemovedAccounts(domainId); + if (accountsForCleanupInDomain.isEmpty()) { + s_logger.debug("Removing inactive domain id=" + domainId); + _domainDao.remove(domainId); + } else { + s_logger.debug("Can't remove inactive domain id=" + domainId + " as it has accounts that need clenaup"); + } + } catch (Exception e) { + s_logger.error("Skipping due to error on domain " + domainId, e); + } + } } catch (Exception e) { s_logger.error("Exception ", e); diff --git a/server/src/com/cloud/user/dao/AccountDao.java b/server/src/com/cloud/user/dao/AccountDao.java index 2ff13f983ff..12465b27257 100644 --- a/server/src/com/cloud/user/dao/AccountDao.java +++ b/server/src/com/cloud/user/dao/AccountDao.java @@ -37,7 +37,7 @@ public interface AccountDao extends GenericDao { List findActiveAccounts(Long maxAccountId, Filter filter); List findRecentlyDeletedAccounts(Long maxAccountId, Date earliestRemovedDate, Filter filter); List findNewAccounts(Long minAccountId, Filter filter); - List findCleanupsForRemovedAccounts(); + List findCleanupsForRemovedAccounts(Long domainId); List findAdminAccountsForDomain(Long domainId); List findActiveAccountsForDomain(Long domain); void markForCleanup(long accountId); diff --git a/server/src/com/cloud/user/dao/AccountDaoImpl.java b/server/src/com/cloud/user/dao/AccountDaoImpl.java index 1d000b9b92a..57ad47018ce 100755 --- a/server/src/com/cloud/user/dao/AccountDaoImpl.java +++ b/server/src/com/cloud/user/dao/AccountDaoImpl.java @@ -71,7 +71,8 @@ public class AccountDaoImpl extends GenericDaoBase implements A CleanupForRemovedAccountsSearch = createSearchBuilder(); CleanupForRemovedAccountsSearch.and("cleanup", CleanupForRemovedAccountsSearch.entity().getNeedsCleanup(), SearchCriteria.Op.EQ); - CleanupForRemovedAccountsSearch.and("removed", CleanupForRemovedAccountsSearch.entity().getRemoved(), SearchCriteria.Op.NNULL); + CleanupForRemovedAccountsSearch.and("removed", CleanupForRemovedAccountsSearch.entity().getRemoved(), SearchCriteria.Op.NNULL); + CleanupForRemovedAccountsSearch.and("domainid", CleanupForRemovedAccountsSearch.entity().getDomainId(), SearchCriteria.Op.EQ); CleanupForRemovedAccountsSearch.done(); CleanupForDisabledAccountsSearch = createSearchBuilder(); @@ -82,9 +83,13 @@ public class AccountDaoImpl extends GenericDaoBase implements A } @Override - public List findCleanupsForRemovedAccounts() { + public List findCleanupsForRemovedAccounts(Long domainId) { SearchCriteria sc = CleanupForRemovedAccountsSearch.create(); - sc.setParameters("cleanup", true); + sc.setParameters("cleanup", true); + + if (domainId != null) { + sc.setParameters("domainid", domainId); + } return searchIncludingRemoved(sc, null, null, false); } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index 232f28a4744..12af7d4a275 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -75,6 +75,7 @@ import com.cloud.async.AsyncJobManager; import com.cloud.async.AsyncJobVO; import com.cloud.async.BaseAsyncJobExecutor; import com.cloud.capacity.dao.CapacityDao; +import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ResourceCount.ResourceType; import com.cloud.configuration.dao.ConfigurationDao; @@ -340,7 +341,8 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager protected String _zone; private ConfigurationDao _configDao; - + private int _createprivatetemplatefromvolumewait; + private int _createprivatetemplatefromsnapshotwait; @Override public UserVmVO getVirtualMachine(long vmId) { return _vmDao.findById(vmId); @@ -1131,6 +1133,12 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager if (_instance == null) { _instance = "DEFAULT"; } + + String value = _configDao.getValue(Config.CreatePrivateTemplateFromVolumeWait.toString()); + _createprivatetemplatefromvolumewait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreatePrivateTemplateFromVolumeWait.getDefaultValue())); + + value = _configDao.getValue(Config.CreatePrivateTemplateFromSnapshotWait.toString()); + _createprivatetemplatefromsnapshotwait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreatePrivateTemplateFromSnapshotWait.getDefaultValue())); String workers = configs.get("expunge.workers"); int wrks = NumbersUtil.parseInt(workers, 10); @@ -1525,7 +1533,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager _snapshotMgr.downloadSnapshotsFromSwift(snapshot); } cmd = new CreatePrivateTemplateFromSnapshotCommand(pool.getUuid(), secondaryStorageURL, dcId, accountId, snapshot.getVolumeId(), backupSnapshotUUID, snapshot.getName(), - origTemplateInstallPath, templateId, name); + origTemplateInstallPath, templateId, name, _createprivatetemplatefromsnapshotwait); } else if (volumeId != null) { VolumeVO volume = _volsDao.findById(volumeId); if (volume == null) { @@ -1546,7 +1554,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager String secondaryStorageURL = secondaryStorageHost.getStorageUrl(); pool = _storagePoolDao.findById(volume.getPoolId()); - cmd = new CreatePrivateTemplateFromVolumeCommand(secondaryStorageURL, templateId, accountId, command.getTemplateName(), uniqueName, volume.getPath(), vmName); + cmd = new CreatePrivateTemplateFromVolumeCommand(secondaryStorageURL, templateId, accountId, command.getTemplateName(), uniqueName, volume.getPath(), vmName, _createprivatetemplatefromvolumewait); } else { throw new CloudRuntimeException("Creating private Template need to specify snapshotId or volumeId"); @@ -2522,7 +2530,7 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager } if(keyboard != null && !keyboard.isEmpty()) - vm.setDetail(VirtualMachine.PARAM_KEY_KEYBOARD, keyboard); + vm.setDetail(VmDetailConstants.KEYBOARD, keyboard); if (isIso) { vm.setIsoId(template.getId()); diff --git a/server/src/com/cloud/vm/dao/DomainRouterDao.java b/server/src/com/cloud/vm/dao/DomainRouterDao.java index 76bd8d18627..564d1e50cc3 100755 --- a/server/src/com/cloud/vm/dao/DomainRouterDao.java +++ b/server/src/com/cloud/vm/dao/DomainRouterDao.java @@ -94,10 +94,10 @@ public interface DomainRouterDao extends GenericDao { List listActive(long networkId); /** - * List domain routers by state and network type + * List domain routers by state and network type which reside on Host managed by the specified management server * @return */ - List listByStateAndNetworkType(State state, GuestIpType ipType); + List listByStateAndNetworkType(State state, GuestIpType ipType, long mgmtSrvrId); List findByNetworkOutsideThePod(long networkId, long podId, State state, Role role); diff --git a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java index f9a69246ae1..31405a04eeb 100755 --- a/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java +++ b/server/src/com/cloud/vm/dao/DomainRouterDaoImpl.java @@ -23,6 +23,8 @@ import javax.ejb.Local; import org.apache.log4j.Logger; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDaoImpl; import com.cloud.network.Network.GuestIpType; import com.cloud.network.NetworkVO; import com.cloud.network.dao.NetworkDaoImpl; @@ -48,7 +50,8 @@ public class DomainRouterDaoImpl extends GenericDaoBase im protected final SearchBuilder StateNetworkTypeSearch; protected final SearchBuilder OutsidePodSearch; NetworkDaoImpl _networksDao = ComponentLocator.inject(NetworkDaoImpl.class); - + HostDaoImpl _hostsDao = ComponentLocator.inject(HostDaoImpl.class); + protected DomainRouterDaoImpl() { AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("dc", AllFieldsSearch.entity().getDataCenterIdToDeployIn(), Op.EQ); @@ -81,6 +84,9 @@ public class DomainRouterDaoImpl extends GenericDaoBase im SearchBuilder joinStateNetwork = _networksDao.createSearchBuilder(); joinStateNetwork.and("guestType", joinStateNetwork.entity().getGuestType(), Op.EQ); StateNetworkTypeSearch.join("network", joinStateNetwork, joinStateNetwork.entity().getId(), StateNetworkTypeSearch.entity().getNetworkId(), JoinType.INNER); + SearchBuilder joinHost = _hostsDao.createSearchBuilder(); + joinHost.and("mgmtServerId", joinHost.entity().getManagementServerId(), Op.EQ); + StateNetworkTypeSearch.join("host", joinHost, joinHost.entity().getId(), StateNetworkTypeSearch.entity().getHostId(), JoinType.INNER); StateNetworkTypeSearch.done(); OutsidePodSearch = createSearchBuilder(); @@ -198,10 +204,11 @@ public class DomainRouterDaoImpl extends GenericDaoBase im } @Override - public List listByStateAndNetworkType(State state, GuestIpType ipType) { + public List listByStateAndNetworkType(State state, GuestIpType ipType, long mgmtSrvrId) { SearchCriteria sc = StateNetworkTypeSearch.create(); sc.setParameters("state", state); sc.setJoinParameters("network", "guestType", ipType); + sc.setJoinParameters("host", "mgmtServerId", mgmtSrvrId); return listBy(sc); } diff --git a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java index f5fa0516ebd..306040af3a5 100755 --- a/server/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/server/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -22,19 +22,16 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; import java.util.Date; -import java.util.Enumeration; -import java.util.HashSet; import java.util.Hashtable; import java.util.List; import java.util.Map; -import java.util.Set; import javax.ejb.Local; import org.apache.log4j.Logger; +import com.cloud.host.dao.HostDaoImpl; import com.cloud.user.Account; -import com.cloud.uservm.UserVm; import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; @@ -67,7 +64,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use protected final SearchBuilder AccountHostSearch; protected final SearchBuilder DestroySearch; - protected SearchBuilder AccountDataCenterVirtualSearch = null; + protected SearchBuilder AccountDataCenterVirtualSearch; protected GenericSearchBuilder CountByAccountPod; protected GenericSearchBuilder CountByAccount; protected GenericSearchBuilder PodsHavingVmsForAccount; @@ -107,6 +104,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use "where vm_instance.id in ("; protected final UserVmDetailsDaoImpl _detailsDao = ComponentLocator.inject(UserVmDetailsDaoImpl.class); + protected final NicDaoImpl _nicDao = ComponentLocator.inject(NicDaoImpl.class); protected UserVmDaoImpl() { AccountSearch = createSearchBuilder(); @@ -171,6 +169,18 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use CountByAccount.and("type", CountByAccount.entity().getType(), SearchCriteria.Op.EQ); CountByAccount.and("state", CountByAccount.entity().getState(), SearchCriteria.Op.NIN); CountByAccount.done(); + + + SearchBuilder nicSearch = _nicDao.createSearchBuilder(); + nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); + nicSearch.and("ip4Address", nicSearch.entity().getIp4Address(), SearchCriteria.Op.NNULL); + + AccountDataCenterVirtualSearch = createSearchBuilder(); + AccountDataCenterVirtualSearch.and("account", AccountDataCenterVirtualSearch.entity().getAccountId(), SearchCriteria.Op.EQ); + AccountDataCenterVirtualSearch.and("dc", AccountDataCenterVirtualSearch.entity().getDataCenterIdToDeployIn(), SearchCriteria.Op.EQ); + AccountDataCenterVirtualSearch.join("nicSearch", nicSearch, AccountDataCenterVirtualSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); + AccountDataCenterVirtualSearch.done(); + _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; @@ -247,18 +257,6 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use @Override public List listVirtualNetworkInstancesByAcctAndZone(long accountId, long dcId, long networkId) { - if (AccountDataCenterVirtualSearch == null) { - NicDao _nicDao = ComponentLocator.getLocator("management-server").getDao(NicDao.class); - SearchBuilder nicSearch = _nicDao.createSearchBuilder(); - nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); - nicSearch.and("ip4Address", nicSearch.entity().getIp4Address(), SearchCriteria.Op.NNULL); - - AccountDataCenterVirtualSearch = createSearchBuilder(); - AccountDataCenterVirtualSearch.and("account", AccountDataCenterVirtualSearch.entity().getAccountId(), SearchCriteria.Op.EQ); - AccountDataCenterVirtualSearch.and("dc", AccountDataCenterVirtualSearch.entity().getDataCenterIdToDeployIn(), SearchCriteria.Op.EQ); - AccountDataCenterVirtualSearch.join("nicSearch", nicSearch, AccountDataCenterVirtualSearch.entity().getId(), nicSearch.entity().getInstanceId(), JoinBuilder.JoinType.INNER); - AccountDataCenterVirtualSearch.done(); - } SearchCriteria sc = AccountDataCenterVirtualSearch.create(); sc.setParameters("account", accountId); diff --git a/server/src/com/cloud/vm/dao/VMInstanceDao.java b/server/src/com/cloud/vm/dao/VMInstanceDao.java index 80440bc15bf..c4e7f33c8c4 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDao.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDao.java @@ -83,4 +83,6 @@ public interface VMInstanceDao extends GenericDao, StateDao< List listByClusterId(long clusterId); List listVmsMigratingFromHost(Long hostId); + + public Long countRunningByHostId(long hostId); } diff --git a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index ef721783433..336e23b9a4c 100644 --- a/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/server/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -26,18 +26,15 @@ import javax.ejb.Local; import org.apache.log4j.Logger; -import com.cloud.cluster.agentlb.HostTransferMapVO; -import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDaoImpl; -import com.cloud.org.Managed; import com.cloud.utils.component.ComponentLocator; import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.UpdateBuilder; @@ -65,6 +62,8 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem protected final SearchBuilder HostIdUpTypesSearch; protected final SearchBuilder HostUpSearch; protected final GenericSearchBuilder CountVirtualRoutersByAccount; + protected GenericSearchBuilder CountRunningByHost; + protected final Attribute _updateTimeAttr; @@ -143,6 +142,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem CountVirtualRoutersByAccount.and("type", CountVirtualRoutersByAccount.entity().getType(), SearchCriteria.Op.EQ); CountVirtualRoutersByAccount.and("state", CountVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); CountVirtualRoutersByAccount.done(); + + CountRunningByHost = createSearchBuilder(Long.class); + CountRunningByHost.select(null, Func.COUNT, null); + CountRunningByHost.and("host", CountRunningByHost.entity().getHostId(), SearchCriteria.Op.EQ); + CountRunningByHost.and("state", CountRunningByHost.entity().getState(), SearchCriteria.Op.EQ); + CountRunningByHost.done(); _updateTimeAttr = _allAttributes.get("updateTime"); assert _updateTimeAttr != null : "Couldn't get this updateTime attribute"; @@ -303,12 +308,19 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem int result = update(vmi, sc); if (result == 0 && s_logger.isDebugEnabled()) { + VMInstanceVO vo = findByIdIncludingRemoved(vm.getId()); - StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); - str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()).append("; time=").append(vo.getUpdateTime()); - str.append("} New Data: {Host=").append(vm.getHostId()).append("; State=").append(vm.getState().toString()).append("; updated=").append(vmi.getUpdated()).append("; time=").append(vo.getUpdateTime()); - str.append("} Stale Data: {Host=").append(oldHostId).append("; State=").append(oldState).append("; updated=").append(oldUpdated).append("; time=").append(oldUpdateDate).append("}"); - s_logger.debug(str.toString()); + + if (vo != null) { + StringBuilder str = new StringBuilder("Unable to update ").append(vo.toString()); + str.append(": DB Data={Host=").append(vo.getHostId()).append("; State=").append(vo.getState().toString()).append("; updated=").append(vo.getUpdated()).append("; time=").append(vo.getUpdateTime()); + str.append("} New Data: {Host=").append(vm.getHostId()).append("; State=").append(vm.getState().toString()).append("; updated=").append(vmi.getUpdated()).append("; time=").append(vo.getUpdateTime()); + str.append("} Stale Data: {Host=").append(oldHostId).append("; State=").append(oldState).append("; updated=").append(oldUpdated).append("; time=").append(oldUpdateDate).append("}"); + s_logger.debug(str.toString()); + + } else { + s_logger.debug("Unable to update the vm id=" + vm.getId() + "; the vm either doesn't exist or already removed"); + } } return result > 0; } @@ -337,4 +349,12 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem sc.setParameters("state", State.Migrating); return listBy(sc); } + + @Override + public Long countRunningByHostId(long hostId){ + SearchCriteria sc = CountRunningByHost.create(); + sc.setParameters("host", hostId); + sc.setParameters("state", State.Running); + return customSearch(sc, null).get(0); + } } diff --git a/setup/bindir/cloud-sysvmadm.in b/setup/bindir/cloud-sysvmadm.in index 64006e3ae7f..411f89e4882 100755 --- a/setup/bindir/cloud-sysvmadm.in +++ b/setup/bindir/cloud-sysvmadm.in @@ -5,7 +5,7 @@ #set -x usage() { - printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-e]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -e - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 5.\n -l - log file location. Default is cloud.log under current directory.\n\n" $(basename $0) >&2 + printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-e]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -e - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n\n" $(basename $0) >&2 } @@ -17,7 +17,7 @@ ms=localhost user=root password= help= -maxthreads=5 +maxthreads=10 LOGFILE=cloud.log @@ -58,14 +58,16 @@ length_secondary=(${#secondary[@]}) length_console=(${#console[@]}) -echo -e "\nStopping and starting secondary storage vms..." -echo -e "Stopping and starting secondary storage vms..." >>$LOGFILE +echo -e "\nStopping and starting $length_secondary secondary storage vm(s)..." +echo -e "Stopping and starting $length_secondary secondary storage vm(s)..." >>$LOGFILE for d in "${secondary[@]}"; do + echo "INFO: Stopping secondary storage vm with id $d" >>$LOGFILE jobresult=$(send_request stopSystemVm $d) if [ "$jobresult" != "1" ]; then echo "ERROR: Failed to stop secondary storage vm with id $d" >>$LOGFILE else + echo "INFO: Starting secondary storage vm with id $d" >>$LOGFILE jobresult=$(send_request startSystemVm $d SSVM) if [ "$jobresult" != "1" ]; then echo "ERROR: Failed to start secondary storage vm with id $d" >>$LOGFILE @@ -76,27 +78,32 @@ done if [ "$length_secondary" == "0" ];then echo -e "No running secondary storage vms found \n" else - echo -e "Done stopping and starting secondary storage vms" - echo -e "Done stopping and starting secondary storage vms." >>$LOGFILE + echo -e "Done stopping and starting secondary storage vm(s)" + echo -e "Done stopping and starting secondary storage vm(s)." >>$LOGFILE fi -echo -e "\nStopping and starting console proxy vms..." -echo -e "Stopping and starting console proxy vms..." >>$LOGFILE +echo -e "\nStopping and starting $length_console console proxy vm(s)..." +echo -e "Stopping and starting $length_console console proxy vm(s)..." >>$LOGFILE for d in "${console[@]}"; do + echo "INFO: Stopping console proxy with id $d" >>$LOGFILE jobresult=$(send_request stopSystemVm $d) if [ "$jobresult" != "1" ]; then - echo "ERROR: Failed to stop console proxy vm with id $d" >>$LOGFILE + echo "ERROR: Failed to stop console proxy vm with id $d" >>$LOGFILE else - jobresult=$(send_request startSystemVm $d consoleProxy) + echo "INFO: Starting console proxy vm with id $d" >>$LOGFILE + jobresult=$(send_request startSystemVm $d consoleProxy) + if [ "$jobresult" != "1" ]; then + echo "ERROR: Failed to start console proxy vm with id $d" >>$LOGFILE + fi fi done if [ "$length_console" == "0" ];then echo -e "No running console proxy vms found \n" else - echo "Done stopping and starting console proxy vms." - echo "Done stopping and starting console proxy vms." >>$LOGFILE + echo "Done stopping and starting console proxy vm(s)." + echo "Done stopping and starting console proxy vm(s)." >>$LOGFILE fi } @@ -104,8 +111,8 @@ stop_start_router() { router=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vm_instance where state=\"Running\" and type=\"DomainRouter\""`) length_router=(${#router[@]}) - echo -e "\nStopping and starting running routing vms... " - echo -e "Stopping and starting running routing vms... " >>$LOGFILE + echo -e "\nStopping and starting $length_router running routing vm(s)... " + echo -e "Stopping and starting $length_router running routing vm(s)... " >>$LOGFILE #Spawn reboot router in parallel - run commands in chunks - number of threads is configurable @@ -148,7 +155,6 @@ stop_start_router() { fi done - if [ "$length_router" == "0" ];then echo -e "No running router vms found \n" >>$LOGFILE @@ -157,8 +163,8 @@ stop_start_router() { sleep 10 done - echo -e "Done restarting routers. \n" - echo -e "Done restarting routers. \n" >>$LOGFILE + echo -e "Done restarting router(s). \n" + echo -e "Done restarting router(s). \n" >>$LOGFILE fi } @@ -180,6 +186,7 @@ send_request(){ reboot_router(){ + echo "INFO: Restarting router with id $1" >>$LOGFILE jobid=`curl -sS "http://$ms:8096/?command=rebootRouter&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` if [ "$jobid" == "" ]; then echo "ERROR: Failed to restart domainRouter with id $1" >>$LOGFILE @@ -191,8 +198,10 @@ reboot_router(){ if [ "$jobresult" != "1" ]; then echo "ERROR: Failed to restart domainRouter with id $1" >>$LOGFILE + exit 0 else echo "INFO: Successfully restarted domainRouter with id $1" >>$LOGFILE + exit 0 fi } diff --git a/setup/db/create-schema.sql b/setup/db/create-schema.sql index 264d698d7b0..ae84655c427 100755 --- a/setup/db/create-schema.sql +++ b/setup/db/create-schema.sql @@ -682,6 +682,7 @@ CREATE TABLE `cloud`.`host` ( `url` varchar(255) COMMENT 'iqn for the servers', `fs_type` varchar(32), `hypervisor_type` varchar(32) COMMENT 'hypervisor type, can be NONE for storage', + `hypervisor_version` varchar(32) COMMENT 'hypervisor version', `ram` bigint unsigned, `resource` varchar(255) DEFAULT NULL COMMENT 'If it is a local resource, this is the class name', `version` varchar(40) NOT NULL, @@ -918,13 +919,14 @@ CREATE TABLE `cloud`.`user_vm` ( CONSTRAINT `fk_user_vm__id` FOREIGN KEY `fk_user_vm__id` (`id`) REFERENCES `vm_instance`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +-- note, uer_vm_details is now used for all VMs (not just for user vms) CREATE TABLE `cloud`.`user_vm_details` ( `id` bigint unsigned NOT NULL auto_increment, `vm_id` bigint unsigned NOT NULL COMMENT 'vm id', `name` varchar(255) NOT NULL, `value` varchar(1024) NOT NULL, PRIMARY KEY (`id`), - CONSTRAINT `fk_user_vm_details__vm_id` FOREIGN KEY `fk_user_vm_details__vm_id`(`vm_id`) REFERENCES `user_vm`(`id`) ON DELETE CASCADE + CONSTRAINT `fk_user_vm_details__vm_id` FOREIGN KEY `fk_user_vm_details__vm_id`(`vm_id`) REFERENCES `vm_instance`(`id`) ON DELETE CASCADE ) ENGINE=InnoDB DEFAULT CHARSET=utf8; @@ -1347,6 +1349,15 @@ CREATE TABLE `cloud`.`guest_os_category` ( PRIMARY KEY (`id`) ) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; +CREATE TABLE `cloud`.`hypervisor_capabilities` ( + `id` bigint unsigned NOT NULL auto_increment, + `hypervisor_type` varchar(32) NOT NULL, + `hypervisor_version` varchar(32), + `max_guests_limit` bigint unsigned DEFAULT 50, + `security_group_enabled` int(1) unsigned DEFAULT 1 COMMENT 'Is security group supported', + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + CREATE TABLE `cloud`.`launch_permission` ( `id` bigint unsigned NOT NULL auto_increment, `template_id` bigint unsigned NOT NULL, diff --git a/setup/db/db/schema-2210to2211.sql b/setup/db/db/schema-2210to2211.sql index cd7d30c6cd0..e69de29bb2d 100644 --- a/setup/db/db/schema-2210to2211.sql +++ b/setup/db/db/schema-2210to2211.sql @@ -1,6 +0,0 @@ ---; --- Schema upgrade from 2.2.10 to 2.2.11; ---; - -ALTER TABLE `cloud`.`vm_template` ADD COLUMN `template_tag` varchar(255) COMMENT 'template tag'; - diff --git a/setup/db/db/schema-2210to30.sql b/setup/db/db/schema-2210to30.sql deleted file mode 100755 index d2911c7ebf7..00000000000 --- a/setup/db/db/schema-2210to30.sql +++ /dev/null @@ -1,6 +0,0 @@ ---; --- Schema upgrade from 2.2.10 to 3.0; ---; - -ALTER TABLE `cloud`.`template_host_ref` DROP COLUMN `pool_id`; - diff --git a/setup/db/db/schema-2211to2212.sql b/setup/db/db/schema-2211to2212.sql new file mode 100644 index 00000000000..6eafa4b9ab5 --- /dev/null +++ b/setup/db/db/schema-2211to2212.sql @@ -0,0 +1,12 @@ +--; +-- Schema upgrade from 2.2.11 to 2.2.12; +--; + +ALTER TABLE `cloud`.`vm_template` ADD COLUMN `template_tag` varchar(255) COMMENT 'template tag'; + +UPDATE vm_instance SET state='Error' WHERE state='Creating' AND removed IS null; + +INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'json.content.type', 'text/javascript', 'Http response content type for .js files (default is text/javascript)'); + +ALTER IGNORE TABLE `cloud`.`user_vm_details` DROP FOREIGN KEY `fk_user_vm_details__vm_id`; +ALTER TABLE `cloud`.`user_vm_details` ADD CONSTRAINT `fk_user_vm_details__vm_id` FOREIGN KEY `fk_user_vm_details__vm_id`(`vm_id`) REFERENCES `vm_instance`(`id`) ON DELETE CASCADE; diff --git a/setup/db/db/schema-2212to30.sql b/setup/db/db/schema-2212to30.sql new file mode 100644 index 00000000000..7ff5417806d --- /dev/null +++ b/setup/db/db/schema-2212to30.sql @@ -0,0 +1,25 @@ +--; +-- Schema upgrade from 2.2.12 to 3.0; +--; + +ALTER TABLE `cloud`.`template_host_ref` DROP COLUMN `pool_id`; + +ALTER TABLE `cloud`.`host` ADD COLUMN `hypervisor_version` varchar(32) COMMENT 'hypervisor version' AFTER hypervisor_type; + +CREATE TABLE `cloud`.`hypervisor_capabilities` ( + `id` bigint unsigned NOT NULL auto_increment, + `hypervisor_type` varchar(32) NOT NULL, + `hypervisor_version` varchar(32), + `max_guests_limit` bigint unsigned DEFAULT 50, + `security_group_enabled` int(1) unsigned DEFAULT 1 COMMENT 'Is security group supported', + PRIMARY KEY (`id`) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8; + +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', 'XCP 1.0', 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6', 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 FP1', 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '5.6 SP2', 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('XenServer', '6.0 beta', 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('VMware', null, 128, 0); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('KVM', null, 50, 1); +INSERT INTO `cloud`.`hypervisor_capabilities`(hypervisor_type, hypervisor_version, max_guests_limit, security_group_enabled) VALUES ('Ovm', '2.3', 25, 1); \ No newline at end of file diff --git a/setup/db/db/schema-229to2210.sql b/setup/db/db/schema-229to2210.sql index eb6d6f3671d..8e7aaec5445 100644 --- a/setup/db/db/schema-229to2210.sql +++ b/setup/db/db/schema-229to2210.sql @@ -47,6 +47,9 @@ INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-serv INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'vmware.reserve.mem', 'false', 'Specify whether or not to reserve memory based on memory overprovisioning factor'); INSERT IGNORE INTO configuration VALUES ('Advanced', 'DEFAULT', 'management-server', 'mem.overprovisioning.factor', '1', 'Used for memory overprovisioning calculation'); +INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'AgentManager', 'remote.access.vpn.psk.length', '24', 'The length of the ipsec preshared key (minimum 8, maximum 256)'); +INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'AgentManager', 'remote.access.vpn.client.iprange', '10.1.2.1-10.1.2.8', 'The range of ips to be allocated to remote access vpn clients. The first ip in the range is used by the VPN server'); +INSERT IGNORE INTO configuration VALUES ('Network', 'DEFAULT', 'AgentManager', 'remote.access.vpn.user.limit', '8', 'The maximum number of VPN users that can be created per account'); CREATE TABLE IF NOT exists `cloud`.`elastic_lb_vm_map` ( `id` bigint unsigned NOT NULL auto_increment, @@ -60,14 +63,3 @@ CREATE TABLE IF NOT exists `cloud`.`elastic_lb_vm_map` ( ) ENGINE=InnoDB DEFAULT CHARSET=utf8; UPDATE `cloud`.`network_offerings` SET lb_service=1 where unique_name='System-Guest-Network'; - -UPDATE `cloud`.`vm_template` SET type='SYSTEM' WHERE name='systemvm-xenserver-2.2.10'; -UPDATE `cloud`.`vm_template` SET type='SYSTEM' WHERE name='systemvm-kvm-2.2.10'; -UPDATE `cloud`.`vm_template` SET type='SYSTEM' WHERE name='systemvm-vSphere-2.2.10'; - -UPDATE vm_instance SET vm_template_id=(SELECT id FROM vm_template WHERE name='systemvm-xenserver-2.2.10' AND removed IS NULL) where vm_template_id=1; -UPDATE vm_instance SET vm_template_id=(SELECT id FROM vm_template WHERE name='systemvm-kvm-2.2.10' AND removed IS NULL) where vm_template_id=3; -UPDATE vm_instance SET vm_template_id=(SELECT id FROM vm_template WHERE name='systemvm-vSphere-2.2.10' AND removed IS NULL) where vm_template_id=8; - --- Update system Vms using systemvm-xenserver-2.2.4 template; -UPDATE vm_instance SET vm_template_id=(SELECT id FROM vm_template WHERE name='systemvm-xenserver-2.2.10' AND removed IS NULL) where vm_template_id=(SELECT id FROM vm_template WHERE name='systemvm-xenserver-2.2.4' AND removed IS NULL); diff --git a/test/.classpath b/test/.classpath index 7dd07c2f699..688081a40f4 100644 --- a/test/.classpath +++ b/test/.classpath @@ -4,14 +4,10 @@ - - - - - + diff --git a/tools/.classpath b/tools/.classpath index f497797e014..072d10b8fe2 100644 --- a/tools/.classpath +++ b/tools/.classpath @@ -30,5 +30,7 @@ + + diff --git a/tools/testClient/cloudstackTestCase.py b/tools/testClient/cloudstackTestCase.py index 39696b2ca9f..8625d683cb5 100644 --- a/tools/testClient/cloudstackTestCase.py +++ b/tools/testClient/cloudstackTestCase.py @@ -1,5 +1,8 @@ from cloudstackAPI import * -import unittest +try: + import unittest2 as unittest +except ImportError: + import unittest import cloudstackTestClient class cloudstackTestCase(unittest.case.TestCase): def __init__(self, args): diff --git a/deps/cloud-selenium-java-client-driver.jar b/tools/tooljars/cloud-selenium-java-client-driver.jar similarity index 100% rename from deps/cloud-selenium-java-client-driver.jar rename to tools/tooljars/cloud-selenium-java-client-driver.jar diff --git a/deps/cloud-selenium-server.jar b/tools/tooljars/cloud-selenium-server.jar similarity index 100% rename from deps/cloud-selenium-server.jar rename to tools/tooljars/cloud-selenium-server.jar diff --git a/ui/index.jsp b/ui/index.jsp index 8286c509ff0..6118cae3ca3 100644 --- a/ui/index.jsp +++ b/ui/index.jsp @@ -91,6 +91,7 @@ 'label.path': '', 'label.SR.name': '', 'label.nfs': '', + 'label.ocfs2': '', 'label.SharedMountPoint': '', 'label.PreSetup': '', 'label.iscsi': '', diff --git a/ui/scripts/cloud.core.pod.js b/ui/scripts/cloud.core.pod.js index ebcf7a54f08..14a79edf02c 100644 --- a/ui/scripts/cloud.core.pod.js +++ b/ui/scripts/cloud.core.pod.js @@ -808,6 +808,8 @@ function bindAddPrimaryStorageButton($leftmenuItem1) { if (protocol == "nfs" || protocol == "PreSetup" || protocol == "SharedMountPoint") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); + } else if(protocol == "ocfs2") { + isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); } else if(protocol == "iscsi") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Target IQN", $thisDialog.find("#add_pool_iqn"), $thisDialog.find("#add_pool_iqn_errormsg")); @@ -846,7 +848,13 @@ function bindAddPrimaryStorageButton($leftmenuItem1) { if(path.substring(0,1)!="/") path = "/" + path; url = presetupURL(server, path); - } + } + else if (protocol == "ocfs2") { + var path = trim($thisDialog.find("#add_pool_path").val()); + if(path.substring(0,1)!="/") + path = "/" + path; + url = ocfs2URL(server, path); + } else if (protocol == "SharedMountPoint") { var path = trim($thisDialog.find("#add_pool_path").val()); if(path.substring(0,1)!="/") @@ -1103,6 +1111,15 @@ function presetupURL(server, path) { return url; } +function ocfs2URL(server, path) { + var url; + if(server.indexOf("://")==-1) + url = "ocfs2://" + server + path; + else + url = server + path; + return url; +} + function SharedMountPointURL(server, path) { var url; if(server.indexOf("://")==-1) diff --git a/ui/scripts/cloud.core.resource.js b/ui/scripts/cloud.core.resource.js index 4c94ab520ff..287dd38fcda 100644 --- a/ui/scripts/cloud.core.resource.js +++ b/ui/scripts/cloud.core.resource.js @@ -1818,6 +1818,8 @@ function initAddPrimaryStorageShortcut($midmenuAddLink2, currentPageInRightPanel if (protocol == "nfs" || protocol == "PreSetup" || protocol == "SharedMountPoint") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); + } else if(protocol == "ocfs2") { + isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); } else if(protocol == "iscsi") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Target IQN", $thisDialog.find("#add_pool_iqn"), $thisDialog.find("#add_pool_iqn_errormsg")); @@ -1865,7 +1867,13 @@ function initAddPrimaryStorageShortcut($midmenuAddLink2, currentPageInRightPanel if(path.substring(0,1)!="/") path = "/" + path; url = presetupURL(server, path); - } + } + else if (protocol == "ocfs2") { + var path = trim($thisDialog.find("#add_pool_path").val()); + if(path.substring(0,1)!="/") + path = "/" + path; + url = ocfs2URL(server, path); + } else if (protocol == "SharedMountPoint") { var path = trim($thisDialog.find("#add_pool_path").val()); if(path.substring(0,1)!="/") @@ -1958,7 +1966,8 @@ function bindEventHandlerToDialogAddPool($dialogAddPool) { else if(clusterObj.hypervisortype == "Ovm") { $protocolSelector.empty(); $protocolSelector.append(''); - } + $protocolSelector.append(''); + } else { $protocolSelector.empty(); } @@ -1976,8 +1985,17 @@ function bindEventHandlerToDialogAddPool($dialogAddPool) { $dialogAddPool.find("#add_pool_nfs_server").val(""); $dialogAddPool.find("#add_pool_server_container").show(); - } - if($(this).val() == "PreSetup") { + } + else if($(this).val() == "ocfs2") {//ocfs2 is the same as nfs, except no server field. + $('li[input_group="nfs"]', $dialogAddPool).show(); + $('li[input_group="iscsi"]', $dialogAddPool).hide(); + $('li[input_group="vmfs"]', $dialogAddPool).hide(); + $dialogAddPool.find("#add_pool_path_container").find("label").text(g_dictionary["label.path"]+":"); + + $dialogAddPool.find("#add_pool_nfs_server").val(""); + $dialogAddPool.find("#add_pool_server_container").hide(); + } + else if($(this).val() == "PreSetup") { $("#add_pool_server_container", $dialogAddPool).show(); $('li[input_group="nfs"]', $dialogAddPool).show(); $('li[input_group="iscsi"]', $dialogAddPool).hide(); diff --git a/ui/scripts/cloud.core.zone.js b/ui/scripts/cloud.core.zone.js index b8919de9973..202b01f7fa5 100644 --- a/ui/scripts/cloud.core.zone.js +++ b/ui/scripts/cloud.core.zone.js @@ -985,6 +985,8 @@ function bindAddPrimaryStorageButtonOnZonePage($button, zoneId, zoneName) { if (protocol == "nfs" || protocol == "PreSetup" || protocol == "SharedMountPoint") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); + } else if(protocol == "ocfs2") { + isValid &= validateString("Path", $thisDialog.find("#add_pool_path"), $thisDialog.find("#add_pool_path_errormsg")); } else if(protocol == "iscsi") { isValid &= validateString("Server", $thisDialog.find("#add_pool_nfs_server"), $thisDialog.find("#add_pool_nfs_server_errormsg")); isValid &= validateString("Target IQN", $thisDialog.find("#add_pool_iqn"), $thisDialog.find("#add_pool_iqn_errormsg")); @@ -1032,6 +1034,12 @@ function bindAddPrimaryStorageButtonOnZonePage($button, zoneId, zoneName) { path = "/" + path; url = presetupURL(server, path); } + else if (protocol == "ocfs2") { + var path = trim($thisDialog.find("#add_pool_path").val()); + if(path.substring(0,1)!="/") + path = "/" + path; + url = ocfs2URL(server, path); + } else if (protocol == "SharedMountPoint") { var path = trim($thisDialog.find("#add_pool_path").val()); if(path.substring(0,1)!="/") diff --git a/usage/.classpath b/usage/.classpath new file mode 100644 index 00000000000..4246dc6a6e3 --- /dev/null +++ b/usage/.classpath @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/usage/.project b/usage/.project new file mode 100644 index 00000000000..9870136a749 --- /dev/null +++ b/usage/.project @@ -0,0 +1,17 @@ + + + usage + + + + + + org.eclipse.jdt.core.javabuilder + + + + + + org.eclipse.jdt.core.javanature + + diff --git a/usage/conf/log4j-cloud_usage.xml.in b/usage/conf/log4j-cloud_usage.xml.in new file mode 100644 index 00000000000..02f5cf588a0 --- /dev/null +++ b/usage/conf/log4j-cloud_usage.xml.in @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/usage/conf/usage-components.xml.in b/usage/conf/usage-components.xml.in new file mode 100644 index 00000000000..bda902fe88f --- /dev/null +++ b/usage/conf/usage-components.xml.in @@ -0,0 +1,57 @@ + + + + + + + + 50 + -1 + + + + + + + + + + + + + + + + + + + + + + DAILY + + + + + diff --git a/usage/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-usage.in b/usage/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-usage.in new file mode 100755 index 00000000000..168a8b64b06 --- /dev/null +++ b/usage/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-usage.in @@ -0,0 +1,82 @@ +#!/bin/bash + +# chkconfig: 35 99 10 +# description: CloudStack Usage Monitor + +# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well + +. /etc/rc.d/init.d/functions + +whatami=cloud-usage + +# set environment variables + +SHORTNAME="$whatami" +PIDFILE=@PIDDIR@/"$whatami".pid +LOCKFILE=@LOCKDIR@/"$SHORTNAME" +LOGFILE=@USAGELOG@ +PROGNAME="CloudStack Usage Monitor" +USER=@MSUSER@ + +unset OPTIONS +[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME" +DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize +PROG=@LIBEXECDIR@/usage-runner + +start() { + echo -n $"Starting $PROGNAME: " + if hostname --fqdn >/dev/null 2>&1 ; then + daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \ + -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS + RETVAL=$? + echo + else + failure + echo + echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr + RETVAL=9 + fi + [ $RETVAL = 0 ] && touch ${LOCKFILE} + return $RETVAL +} + +stop() { + echo -n $"Stopping $PROGNAME: " + killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME + RETVAL=$? + echo + [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} +} + + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status -p ${PIDFILE} $SHORTNAME + RETVAL=$? + ;; + restart) + stop + sleep 3 + start + ;; + condrestart) + if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then + stop + sleep 3 + start + fi + ;; + *) + echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}" + RETVAL=3 +esac + +exit $RETVAL + diff --git a/usage/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-usage.in b/usage/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-usage.in new file mode 100755 index 00000000000..168a8b64b06 --- /dev/null +++ b/usage/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-usage.in @@ -0,0 +1,82 @@ +#!/bin/bash + +# chkconfig: 35 99 10 +# description: CloudStack Usage Monitor + +# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well + +. /etc/rc.d/init.d/functions + +whatami=cloud-usage + +# set environment variables + +SHORTNAME="$whatami" +PIDFILE=@PIDDIR@/"$whatami".pid +LOCKFILE=@LOCKDIR@/"$SHORTNAME" +LOGFILE=@USAGELOG@ +PROGNAME="CloudStack Usage Monitor" +USER=@MSUSER@ + +unset OPTIONS +[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME" +DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize +PROG=@LIBEXECDIR@/usage-runner + +start() { + echo -n $"Starting $PROGNAME: " + if hostname --fqdn >/dev/null 2>&1 ; then + daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \ + -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS + RETVAL=$? + echo + else + failure + echo + echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr + RETVAL=9 + fi + [ $RETVAL = 0 ] && touch ${LOCKFILE} + return $RETVAL +} + +stop() { + echo -n $"Stopping $PROGNAME: " + killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME + RETVAL=$? + echo + [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} +} + + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status -p ${PIDFILE} $SHORTNAME + RETVAL=$? + ;; + restart) + stop + sleep 3 + start + ;; + condrestart) + if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then + stop + sleep 3 + start + fi + ;; + *) + echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}" + RETVAL=3 +esac + +exit $RETVAL + diff --git a/usage/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-usage.in b/usage/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-usage.in new file mode 100644 index 00000000000..168a8b64b06 --- /dev/null +++ b/usage/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-usage.in @@ -0,0 +1,82 @@ +#!/bin/bash + +# chkconfig: 35 99 10 +# description: CloudStack Usage Monitor + +# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well + +. /etc/rc.d/init.d/functions + +whatami=cloud-usage + +# set environment variables + +SHORTNAME="$whatami" +PIDFILE=@PIDDIR@/"$whatami".pid +LOCKFILE=@LOCKDIR@/"$SHORTNAME" +LOGFILE=@USAGELOG@ +PROGNAME="CloudStack Usage Monitor" +USER=@MSUSER@ + +unset OPTIONS +[ -r @SYSCONFDIR@/sysconfig/"$SHORTNAME" ] && source @SYSCONFDIR@/sysconfig/"$SHORTNAME" +DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize +PROG=@LIBEXECDIR@/usage-runner + +start() { + echo -n $"Starting $PROGNAME: " + if hostname --fqdn >/dev/null 2>&1 ; then + daemon --check=$SHORTNAME --pidfile=${PIDFILE} "$DAEMONIZE" \ + -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS + RETVAL=$? + echo + else + failure + echo + echo The host name does not resolve properly to an IP address. Cannot start "$PROGNAME". > /dev/stderr + RETVAL=9 + fi + [ $RETVAL = 0 ] && touch ${LOCKFILE} + return $RETVAL +} + +stop() { + echo -n $"Stopping $PROGNAME: " + killproc -p ${PIDFILE} $SHORTNAME # -d 10 $SHORTNAME + RETVAL=$? + echo + [ $RETVAL = 0 ] && rm -f ${LOCKFILE} ${PIDFILE} +} + + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status -p ${PIDFILE} $SHORTNAME + RETVAL=$? + ;; + restart) + stop + sleep 3 + start + ;; + condrestart) + if status -p ${PIDFILE} $SHORTNAME >&/dev/null; then + stop + sleep 3 + start + fi + ;; + *) + echo $"Usage: $whatami {start|stop|restart|condrestart|status|help}" + RETVAL=3 +esac + +exit $RETVAL + diff --git a/usage/distro/ubuntu/SYSCONFDIR/init.d/cloud-usage.in b/usage/distro/ubuntu/SYSCONFDIR/init.d/cloud-usage.in new file mode 100755 index 00000000000..e59fc2acecb --- /dev/null +++ b/usage/distro/ubuntu/SYSCONFDIR/init.d/cloud-usage.in @@ -0,0 +1,96 @@ +#!/bin/bash + +# chkconfig: 35 99 10 +# description: CloudStack Usage Monitor + +# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well + +. /lib/lsb/init-functions +. /etc/default/rcS + +whatami=cloud-usage + +# set environment variables + +SHORTNAME="$whatami" +PIDFILE=@PIDDIR@/"$whatami".pid +LOCKFILE=@LOCKDIR@/"$SHORTNAME" +LOGFILE=@USAGELOG@ +PROGNAME="CloudStack Usage Monitor" +USER=@MSUSER@ + +unset OPTIONS +[ -r @SYSCONFDIR@/default/"$SHORTNAME" ] && source @SYSCONFDIR@/default/"$SHORTNAME" +DAEMONIZE=@BINDIR@/@PACKAGE@-daemonize +PROG=@LIBEXECDIR@/usage-runner + +start() { + log_daemon_msg $"Starting $PROGNAME" "$SHORTNAME" + if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_progress_msg "apparently already running" + log_end_msg 0 + exit 0 + fi + if hostname --fqdn >/dev/null 2>&1 ; then + true + else + log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME" + log_end_msg 1 + exit 1 + fi + + if start-stop-daemon --start --quiet \ + --pidfile "$PIDFILE" \ + --exec "$DAEMONIZE" -- -n "$SHORTNAME" -p "$PIDFILE" -l "$LOGFILE" -u "$USER" "$PROG" $OPTIONS + RETVAL=$? + then + rc=0 + sleep 1 + if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then + log_failure_msg "$PROG failed to start" + rc=1 + fi + else + rc=1 + fi + + if [ $rc -eq 0 ]; then + log_end_msg 0 + else + log_end_msg 1 + rm -f "$PIDFILE" + fi +} + +stop() { + echo -n $"Stopping $PROGNAME" "$SHORTNAME" + start-stop-daemon --stop --quiet --oknodo --pidfile "$PIDFILE" + log_end_msg $? + rm -f "$PIDFILE" +} + + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME" + RETVAL=$? + ;; + restart) + stop + sleep 3 + start + ;; + *) + echo $"Usage: $whatami {start|stop|restart|status|help}" + RETVAL=3 +esac + +exit $RETVAL + diff --git a/usage/libexec/usage-runner.in b/usage/libexec/usage-runner.in new file mode 100755 index 00000000000..3eb948383b7 --- /dev/null +++ b/usage/libexec/usage-runner.in @@ -0,0 +1,21 @@ +#!/bin/bash + +SYSTEMJARS="@SYSTEMJARS@" +SCP=$(build-classpath $SYSTEMJARS) ; if [ $? != 0 ] ; then SCP="@SYSTEMCLASSPATH@" ; fi +DCP="@DEPSCLASSPATH@" +ACP="@USAGECLASSPATH@" +export CLASSPATH=$SCP:$DCP:$ACP:@USAGESYSCONFDIR@ +for jarfile in "@PREMIUMJAVADIR@"/* ; do + if [ ! -e "$jarfile" ] ; then continue ; fi + CLASSPATH=$jarfile:$CLASSPATH +done +for plugin in "@PLUGINJAVADIR@"/* ; do + if [ ! -e "$plugin" ] ; then continue ; fi + CLASSPATH=$plugin:$CLASSPATH +done +export CLASSPATH + +set -e +echo Current directory is "$PWD" +echo CLASSPATH to run the usage server: "$CLASSPATH" +exec java -cp "$CLASSPATH" -Dpid=$$ -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@USAGELOGDIR@ "$@" com.cloud.usage.UsageServer diff --git a/usage/scripts/usageserver.sh b/usage/scripts/usageserver.sh new file mode 100755 index 00000000000..ddc9fee5d37 --- /dev/null +++ b/usage/scripts/usageserver.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + + + + # + # Copyright (C) 2011 Cloud.com, Inc. All rights reserved. + # + + +# run the usage server + +PATHSEP=':' +if [[ $OSTYPE == "cygwin" ]]; then + PATHSEP=';' + export CATALINA_HOME=`cygpath -m $CATALINA_HOME` +fi + +CP=./$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-server.jar +CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-server-extras.jar +CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-utils.jar +CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-core.jar +CP=${CP}$PATHSEP$CATALINA_HOME/webapps/client/WEB-INF/lib/vmops-usage.jar +CP=${CP}$PATHSEP$CATALINA_HOME/conf + +for file in $CATALINA_HOME/lib/*.jar; do + CP=${CP}$PATHSEP$file +done + +#echo CP is $CP +DEBUG_OPTS= +#DEBUG_OPTS=-Xrunjdwp:transport=dt_socket,address=$1,server=y,suspend=n + +java -cp $CP $DEBUG_OPTS -Dcatalina.home=${CATALINA_HOME} -Dpid=$$ com.vmops.usage.UsageServer $* diff --git a/usage/src/com/cloud/usage/UsageAlertManagerImpl.java b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java new file mode 100644 index 00000000000..4779363fc55 --- /dev/null +++ b/usage/src/com/cloud/usage/UsageAlertManagerImpl.java @@ -0,0 +1,276 @@ +/** + * * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + + +* + * + */ + +package com.cloud.usage; + +import java.io.UnsupportedEncodingException; +import java.util.Date; +import java.util.Map; +import java.util.Properties; + +import javax.ejb.Local; +import javax.mail.Authenticator; +import javax.mail.MessagingException; +import javax.mail.PasswordAuthentication; +import javax.mail.Session; +import javax.mail.URLName; +import javax.mail.Message.RecipientType; +import javax.mail.internet.InternetAddress; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.cloud.alert.AlertManager; +import com.cloud.alert.AlertVO; +import com.cloud.alert.dao.AlertDao; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.ComponentLocator; +import com.sun.mail.smtp.SMTPMessage; +import com.sun.mail.smtp.SMTPSSLTransport; +import com.sun.mail.smtp.SMTPTransport; + +@Local(value={AlertManager.class}) +public class UsageAlertManagerImpl implements AlertManager { + private static final Logger s_logger = Logger.getLogger(UsageAlertManagerImpl.class.getName()); + + private String _name = null; + private EmailAlert _emailAlert; + private AlertDao _alertDao; + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + _name = name; + + ComponentLocator locator = ComponentLocator.getCurrentLocator(); + ConfigurationDao configDao = locator.getDao(ConfigurationDao.class); + if (configDao == null) { + s_logger.error("Unable to get the configuration dao."); + return false; + } + + Map configs = configDao.getConfiguration("management-server", params); + + // set up the email system for alerts + String emailAddressList = configs.get("alert.email.addresses"); + String[] emailAddresses = null; + if (emailAddressList != null) { + emailAddresses = emailAddressList.split(","); + } + + String smtpHost = configs.get("alert.smtp.host"); + int smtpPort = NumbersUtil.parseInt(configs.get("alert.smtp.port"), 25); + String useAuthStr = configs.get("alert.smtp.useAuth"); + boolean useAuth = ((useAuthStr == null) ? false : Boolean.parseBoolean(useAuthStr)); + String smtpUsername = configs.get("alert.smtp.username"); + String smtpPassword = configs.get("alert.smtp.password"); + String emailSender = configs.get("alert.email.sender"); + String smtpDebugStr = configs.get("alert.smtp.debug"); + boolean smtpDebug = false; + if (smtpDebugStr != null) { + smtpDebug = Boolean.parseBoolean(smtpDebugStr); + } + + _emailAlert = new EmailAlert(emailAddresses, smtpHost, smtpPort, useAuth, smtpUsername, smtpPassword, emailSender, smtpDebug); + + _alertDao = locator.getDao(AlertDao.class); + if (_alertDao == null) { + s_logger.error("Unable to get the alert dao."); + return false; + } + + return true; + } + + @Override + public String getName() { + return _name; + } + + @Override + public boolean start() { + return true; + } + + @Override + public boolean stop() { + return true; + } + + @Override + public void clearAlert(short alertType, long dataCenterId, long podId) { + try { + if (_emailAlert != null) { + _emailAlert.clearAlert(alertType, dataCenterId, podId); + } + } catch (Exception ex) { + s_logger.error("Problem clearing email alert", ex); + } + } + + @Override + public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String body) { + // TODO: queue up these messages and send them as one set of issues once a certain number of issues is reached? If that's the case, + // shouldn't we have a type/severity as part of the API so that severe errors get sent right away? + try { + if (_emailAlert != null) { + _emailAlert.sendAlert(alertType, dataCenterId, podId, subject, body); + } + } catch (Exception ex) { + s_logger.error("Problem sending email alert", ex); + } + } + + + class EmailAlert { + private Session _smtpSession; + private InternetAddress[] _recipientList; + private final String _smtpHost; + private int _smtpPort = -1; + private boolean _smtpUseAuth = false; + private final String _smtpUsername; + private final String _smtpPassword; + private final String _emailSender; + + public EmailAlert(String[] recipientList, String smtpHost, int smtpPort, boolean smtpUseAuth, final String smtpUsername, final String smtpPassword, String emailSender, boolean smtpDebug) { + if (recipientList != null) { + _recipientList = new InternetAddress[recipientList.length]; + for (int i = 0; i < recipientList.length; i++) { + try { + _recipientList[i] = new InternetAddress(recipientList[i], recipientList[i]); + } catch (Exception ex) { + s_logger.error("Exception creating address for: " + recipientList[i], ex); + } + } + } + + _smtpHost = smtpHost; + _smtpPort = smtpPort; + _smtpUseAuth = smtpUseAuth; + _smtpUsername = smtpUsername; + _smtpPassword = smtpPassword; + _emailSender = emailSender; + + if (_smtpHost != null) { + Properties smtpProps = new Properties(); + smtpProps.put("mail.smtp.host", smtpHost); + smtpProps.put("mail.smtp.port", smtpPort); + smtpProps.put("mail.smtp.auth", ""+smtpUseAuth); + if (smtpUsername != null) { + smtpProps.put("mail.smtp.user", smtpUsername); + } + + smtpProps.put("mail.smtps.host", smtpHost); + smtpProps.put("mail.smtps.port", smtpPort); + smtpProps.put("mail.smtps.auth", ""+smtpUseAuth); + if (smtpUsername != null) { + smtpProps.put("mail.smtps.user", smtpUsername); + } + + if ((smtpUsername != null) && (smtpPassword != null)) { + _smtpSession = Session.getInstance(smtpProps, new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(smtpUsername, smtpPassword); + } + }); + } else { + _smtpSession = Session.getInstance(smtpProps); + } + _smtpSession.setDebug(smtpDebug); + } else { + _smtpSession = null; + } + } + + // TODO: make sure this handles SSL transport (useAuth is true) and regular + public void sendAlert(short alertType, long dataCenterId, Long podId, String subject, String content) throws MessagingException, UnsupportedEncodingException { + AlertVO alert = null; + + if ((alertType != AlertManager.ALERT_TYPE_HOST) && + (alertType != AlertManager.ALERT_TYPE_USERVM) && + (alertType != AlertManager.ALERT_TYPE_DOMAIN_ROUTER) && + (alertType != AlertManager.ALERT_TYPE_CONSOLE_PROXY) && + (alertType != AlertManager.ALERT_TYPE_STORAGE_MISC) && + (alertType != AlertManager.ALERT_TYPE_MANAGMENT_NODE)) { + alert = _alertDao.getLastAlert(alertType, dataCenterId, podId); + } + + if (alert == null) { + // set up a new alert + AlertVO newAlert = new AlertVO(); + newAlert.setType(alertType); + newAlert.setSubject(subject); + newAlert.setPodId(podId); + newAlert.setDataCenterId(dataCenterId); + newAlert.setSentCount(1); // initialize sent count to 1 since we are now sending an alert + newAlert.setLastSent(new Date()); + _alertDao.persist(newAlert); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Have already sent: " + alert.getSentCount() + " emails for alert type '" + alertType + "' -- skipping send email"); + } + return; + } + + if (_smtpSession != null) { + SMTPMessage msg = new SMTPMessage(_smtpSession); + msg.setSender(new InternetAddress(_emailSender, _emailSender)); + msg.setFrom(new InternetAddress(_emailSender, _emailSender)); + for (InternetAddress address : _recipientList) { + msg.addRecipient(RecipientType.TO, address); + } + msg.setSubject(subject); + msg.setSentDate(new Date()); + msg.setContent(content, "text/plain"); + msg.saveChanges(); + + SMTPTransport smtpTrans = null; + if (_smtpUseAuth) { + smtpTrans = new SMTPSSLTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword)); + } else { + smtpTrans = new SMTPTransport(_smtpSession, new URLName("smtp", _smtpHost, _smtpPort, null, _smtpUsername, _smtpPassword)); + } + smtpTrans.connect(); + smtpTrans.sendMessage(msg, msg.getAllRecipients()); + smtpTrans.close(); + } + } + + public void clearAlert(short alertType, long dataCenterId, Long podId) { + if (alertType != -1) { + AlertVO alert = _alertDao.getLastAlert(alertType, dataCenterId, podId); + if (alert != null) { + AlertVO updatedAlert = _alertDao.createForUpdate(); + updatedAlert.setResolved(new Date()); + _alertDao.update(alert.getId(), updatedAlert); + } + } + } + } + + @Override + public void recalculateCapacity() { + // TODO Auto-generated method stub + + } +} diff --git a/usage/src/com/cloud/usage/UsageManager.java b/usage/src/com/cloud/usage/UsageManager.java new file mode 100644 index 00000000000..47a46459ecf --- /dev/null +++ b/usage/src/com/cloud/usage/UsageManager.java @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + +* + * + */ + +package com.cloud.usage; + +import com.cloud.usage.UsageJobVO; +import com.cloud.utils.component.Manager; + +public interface UsageManager extends Manager { + public void scheduleParse(); + public void parse(UsageJobVO job, long startDateMillis, long endDateMillis); +} diff --git a/usage/src/com/cloud/usage/UsageManagerImpl.java b/usage/src/com/cloud/usage/UsageManagerImpl.java new file mode 100644 index 00000000000..70d190d754a --- /dev/null +++ b/usage/src/com/cloud/usage/UsageManagerImpl.java @@ -0,0 +1,1425 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + + +* + * + */ + +package com.cloud.usage; + +import java.net.InetAddress; +import java.sql.SQLException; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import com.cloud.alert.AlertManager; +import com.cloud.configuration.dao.ConfigurationDao; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventVO; +import com.cloud.event.dao.UsageEventDao; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageIPAddressDao; +import com.cloud.usage.dao.UsageJobDao; +import com.cloud.usage.dao.UsageLoadBalancerPolicyDao; +import com.cloud.usage.dao.UsageNetworkDao; +import com.cloud.usage.dao.UsageNetworkOfferingDao; +import com.cloud.usage.dao.UsagePortForwardingRuleDao; +import com.cloud.usage.dao.UsageStorageDao; +import com.cloud.usage.dao.UsageVMInstanceDao; +import com.cloud.usage.dao.UsageVolumeDao; +import com.cloud.usage.parser.IPAddressUsageParser; +import com.cloud.usage.parser.LoadBalancerUsageParser; +import com.cloud.usage.parser.NetworkOfferingUsageParser; +import com.cloud.usage.parser.NetworkUsageParser; +import com.cloud.usage.parser.PortForwardingUsageParser; +import com.cloud.usage.parser.StorageUsageParser; +import com.cloud.usage.parser.VMInstanceUsageParser; +import com.cloud.usage.parser.VolumeUsageParser; +import com.cloud.user.Account; +import com.cloud.user.AccountVO; +import com.cloud.user.UserStatisticsVO; +import com.cloud.user.dao.AccountDao; +import com.cloud.user.dao.UserStatisticsDao; +import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.component.Inject; +import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Filter; +import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.exception.CloudRuntimeException; + +@Local(value={UsageManager.class}) +public class UsageManagerImpl implements UsageManager, Runnable { + public static final Logger s_logger = Logger.getLogger(UsageManagerImpl.class.getName()); + + protected static final String DAILY = "DAILY"; + protected static final String WEEKLY = "WEEKLY"; + protected static final String MONTHLY = "MONTHLY"; + + private static final int HOURLY_TIME = 60; + private static final int DAILY_TIME = 60 * 24; + private static final int THREE_DAYS_IN_MINUTES = 60 * 24 * 3; + private static final int USAGE_AGGREGATION_RANGE_MIN = 10; + + private final ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private final AccountDao m_accountDao = _locator.getDao(AccountDao.class); + private final UserStatisticsDao m_userStatsDao = _locator.getDao(UserStatisticsDao.class); + private final UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private final UsageVMInstanceDao m_usageInstanceDao = _locator.getDao(UsageVMInstanceDao.class); + private final UsageIPAddressDao m_usageIPAddressDao = _locator.getDao(UsageIPAddressDao.class); + private final UsageNetworkDao m_usageNetworkDao = _locator.getDao(UsageNetworkDao.class); + private final UsageVolumeDao m_usageVolumeDao = _locator.getDao(UsageVolumeDao.class); + private final UsageStorageDao m_usageStorageDao = _locator.getDao(UsageStorageDao.class); + private final UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao = _locator.getDao(UsageLoadBalancerPolicyDao.class); + private final UsagePortForwardingRuleDao m_usagePortForwardingRuleDao = _locator.getDao(UsagePortForwardingRuleDao.class); + private final UsageNetworkOfferingDao m_usageNetworkOfferingDao = _locator.getDao(UsageNetworkOfferingDao.class); + private final UsageJobDao m_usageJobDao = _locator.getDao(UsageJobDao.class); + @Inject protected AlertManager _alertMgr; + @Inject protected UsageEventDao _usageEventDao; + + private String m_version = null; + private String m_name = null; + private final Calendar m_jobExecTime = Calendar.getInstance(); + private int m_aggregationDuration = 0; + private int m_sanityCheckInterval = 0; + String m_hostname = null; + int m_pid = 0; + TimeZone m_usageTimezone = null; + private final GlobalLock m_heartbeatLock = GlobalLock.getInternLock("usage.job.heartbeat.check"); + + private final ScheduledExecutorService m_executor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("Usage-Job")); + private final ScheduledExecutorService m_heartbeatExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("Usage-HB")); + private final ScheduledExecutorService m_sanityExecutor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("Usage-Sanity")); + private Future m_scheduledFuture = null; + private Future m_heartbeat = null; + private Future m_sanity = null; + + protected UsageManagerImpl() { + } + + private void mergeConfigs(Map dbParams, Map xmlParams) { + for (Map.Entry param : xmlParams.entrySet()) { + dbParams.put(param.getKey(), (String)param.getValue()); + } + } + + public boolean configure(String name, Map params) throws ConfigurationException { + final String run = "usage.vmops.pid"; + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Checking to see if " + run + " exists."); + } + + final Class c = UsageServer.class; + m_version = c.getPackage().getImplementationVersion(); + if (m_version == null) { + throw new CloudRuntimeException("Unable to find the implementation version of this usage server"); + } + + if (s_logger.isInfoEnabled()) { + s_logger.info("Implementation Version is " + m_version); + } + + m_name = name; + + ComponentLocator locator = ComponentLocator.getCurrentLocator(); + ConfigurationDao configDao = locator.getDao(ConfigurationDao.class); + if (configDao == null) { + s_logger.error("Unable to get the configuration dao."); + return false; + } + + Map configs = configDao.getConfiguration(params); + + if (params != null) { + mergeConfigs(configs, params); + } + + String execTime = configs.get("usage.stats.job.exec.time"); + String aggregationRange = configs.get("usage.stats.job.aggregation.range"); + String execTimeZone = configs.get("usage.execution.timezone"); + String sanityCheckInterval = configs.get("usage.sanity.check.interval"); + if(sanityCheckInterval != null){ + m_sanityCheckInterval = Integer.parseInt(sanityCheckInterval); + } + + m_usageTimezone = TimeZone.getTimeZone("GMT"); + + try { + if ((execTime == null) || (aggregationRange == null)) { + s_logger.error("missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); + throw new ConfigurationException("Missing configuration values for usage job, usage.stats.job.exec.time = " + execTime + ", usage.stats.job.aggregation.range = " + aggregationRange); + } + String[] execTimeSegments = execTime.split(":"); + if (execTimeSegments.length != 2) { + s_logger.error("Unable to parse usage.stats.job.exec.time"); + throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "'"); + } + int hourOfDay = Integer.parseInt(execTimeSegments[0]); + int minutes = Integer.parseInt(execTimeSegments[1]); + m_jobExecTime.setTime(new Date()); + + m_jobExecTime.set(Calendar.HOUR_OF_DAY, hourOfDay); + m_jobExecTime.set(Calendar.MINUTE, minutes); + m_jobExecTime.set(Calendar.SECOND, 0); + m_jobExecTime.set(Calendar.MILLISECOND, 0); + if(execTimeZone != null){ + m_jobExecTime.setTimeZone(TimeZone.getTimeZone(execTimeZone)); + } + + // if the hour to execute the job has already passed, roll the day forward to the next day + Date execDate = m_jobExecTime.getTime(); + if (execDate.before(new Date())) { + m_jobExecTime.roll(Calendar.DAY_OF_YEAR, true); + } + + s_logger.debug("Execution Time: "+execDate.toString()); + Date currentDate = new Date(System.currentTimeMillis()); + s_logger.debug("Current Time: "+currentDate.toString()); + + m_aggregationDuration = Integer.parseInt(aggregationRange); + if (m_aggregationDuration < USAGE_AGGREGATION_RANGE_MIN) { + s_logger.warn("Usage stats job aggregation range is to small, using the minimum value of " + USAGE_AGGREGATION_RANGE_MIN); + m_aggregationDuration = USAGE_AGGREGATION_RANGE_MIN; + } + m_hostname = InetAddress.getLocalHost().getHostName() + "/" + InetAddress.getLocalHost().getHostAddress(); + } catch (NumberFormatException ex) { + throw new ConfigurationException("Unable to parse usage.stats.job.exec.time '" + execTime + "' or usage.stats.job.aggregation.range '" + aggregationRange + "', please check configuration values"); + } catch (Exception e) { + s_logger.error("Unhandled exception configuring UsageManger", e); + throw new ConfigurationException("Unhandled exception configuring UsageManager " + e.toString()); + } + m_pid = Integer.parseInt(System.getProperty("pid")); + return true; + } + + public String getName() { + return m_name; + } + + public boolean start() { + if (s_logger.isInfoEnabled()) { + s_logger.info("Starting Usage Manager"); + } + + // use the configured exec time and aggregation duration for scheduling the job + m_scheduledFuture = m_executor.scheduleAtFixedRate(this, m_jobExecTime.getTimeInMillis() - System.currentTimeMillis(), m_aggregationDuration * 60 * 1000, TimeUnit.MILLISECONDS); + + m_heartbeat = m_heartbeatExecutor.scheduleAtFixedRate(new Heartbeat(), /* start in 15 seconds...*/15*1000, /* check database every minute*/60*1000, TimeUnit.MILLISECONDS); + + if(m_sanityCheckInterval > 0){ + m_sanity = m_sanityExecutor.scheduleAtFixedRate(new SanityCheck(), 1, m_sanityCheckInterval, TimeUnit.DAYS); + } + + Transaction usageTxn = Transaction.open(Transaction.USAGE_DB); + try { + if(m_heartbeatLock.lock(3)) { // 3 second timeout + try { + UsageJobVO job = m_usageJobDao.getLastJob(); + if (job == null) { + m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING); + } + } finally { + m_heartbeatLock.unlock(); + } + } else { + if(s_logger.isTraceEnabled()) + s_logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); + } + } finally { + usageTxn.close(); + } + + return true; + } + + public boolean stop() { + m_heartbeat.cancel(true); + m_scheduledFuture.cancel(true); + m_sanity.cancel(true); + return true; + } + + public void run() { + if (s_logger.isInfoEnabled()) { + s_logger.info("starting usage job..."); + } + + // how about we update the job exec time when the job starts??? + long execTime = m_jobExecTime.getTimeInMillis(); + long now = System.currentTimeMillis() + 2000; // 2 second buffer since jobs can run a little early (though usually just by milliseconds) + + if (execTime < now) { + // if exec time is in the past, calculate the next time the job will execute...if this is a one-off job that is a result + // of scheduleParse() then don't update the next exec time... + m_jobExecTime.add(Calendar.MINUTE, m_aggregationDuration); + } + + UsageJobVO job = m_usageJobDao.isOwner(m_hostname, m_pid); + if (job != null) { + // FIXME: we really need to do a better job of not missing any events...so we should some how + // keep track of the last time usage was run, then go from there... + // For executing the job, we treat hourly and daily as special time ranges, using the previous full hour or the previous + // full day. Otherwise we just subtract off the aggregation range from the current time and use that as start date with + // current time as end date. + Calendar cal = Calendar.getInstance(m_usageTimezone); + cal.setTime(new Date()); + long startDate = 0; + long endDate = 0; + if (m_aggregationDuration == DAILY_TIME) { + cal.roll(Calendar.DAY_OF_YEAR, false); + cal.set(Calendar.HOUR_OF_DAY, 0); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + startDate = cal.getTime().getTime(); + + cal.roll(Calendar.DAY_OF_YEAR, true); + cal.add(Calendar.MILLISECOND, -1); + endDate = cal.getTime().getTime(); + } else if (m_aggregationDuration == HOURLY_TIME) { + cal.roll(Calendar.HOUR_OF_DAY, false); + cal.set(Calendar.MINUTE, 0); + cal.set(Calendar.SECOND, 0); + cal.set(Calendar.MILLISECOND, 0); + startDate = cal.getTime().getTime(); + + cal.roll(Calendar.HOUR_OF_DAY, true); + cal.add(Calendar.MILLISECOND, -1); + endDate = cal.getTime().getTime(); + } else { + endDate = cal.getTime().getTime(); // current time + cal.add(Calendar.MINUTE, -1*m_aggregationDuration); + startDate = cal.getTime().getTime(); + } + + parse(job, startDate, endDate); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Not owner of usage job, skipping..."); + } + } + if (s_logger.isInfoEnabled()) { + s_logger.info("usage job complete"); + } + } + + public void scheduleParse() { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Scheduling Usage job..."); + } + m_executor.schedule(this, 0, TimeUnit.MILLISECONDS); + } + + public void parse(UsageJobVO job, long startDateMillis, long endDateMillis) { + // TODO: Shouldn't we also allow parsing by the type of usage? + + boolean success = false; + long timeStart = System.currentTimeMillis(); + long deleteOldStatsTimeMillis = 0L; + try { + if ((endDateMillis == 0) || (endDateMillis > timeStart)) { + endDateMillis = timeStart; + } + + long lastSuccess = m_usageJobDao.getLastJobSuccessDateMillis(); + if (lastSuccess != 0) { + startDateMillis = lastSuccess+1; // 1 millisecond after + } + + if (startDateMillis >= endDateMillis) { + if (s_logger.isInfoEnabled()) { + s_logger.info("not parsing usage records since start time mills (" + startDateMillis + ") is on or after end time millis (" + endDateMillis + ")"); + } + + Transaction jobUpdateTxn = Transaction.open(Transaction.USAGE_DB); + try { + jobUpdateTxn.start(); + // everything seemed to work...set endDate as the last success date + m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success); + + // create a new job if this is a recurring job + if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) { + m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING); + } + jobUpdateTxn.commit(); + } finally { + jobUpdateTxn.close(); + } + + return; + } + deleteOldStatsTimeMillis = startDateMillis; + + Date startDate = new Date(startDateMillis); + Date endDate = new Date(endDateMillis); + if (s_logger.isInfoEnabled()) { + s_logger.info("Parsing usage records between " + startDate + " and " + endDate); + } + + List accounts = null; + List userStats = null; + Map networkStats = null; + Transaction userTxn = Transaction.open(Transaction.CLOUD_DB); + try { + Long limit = Long.valueOf(500); + Long offset = Long.valueOf(0); + Long lastAccountId = m_usageDao.getLastAccountId(); + if (lastAccountId == null) { + lastAccountId = Long.valueOf(0); + } + + do { + Filter filter = new Filter(AccountVO.class, "id", true, offset, limit); + + accounts = m_accountDao.findActiveAccounts(lastAccountId, filter); + + if ((accounts != null) && !accounts.isEmpty()) { + // now update the accounts in the cloud_usage db + m_usageDao.updateAccounts(accounts); + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((accounts != null) && !accounts.isEmpty()); + + // reset offset + offset = Long.valueOf(0); + + do { + Filter filter = new Filter(AccountVO.class, "id", true, offset, limit); + + accounts = m_accountDao.findRecentlyDeletedAccounts(lastAccountId, startDate, filter); + + if ((accounts != null) && !accounts.isEmpty()) { + // now update the accounts in the cloud_usage db + m_usageDao.updateAccounts(accounts); + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((accounts != null) && !accounts.isEmpty()); + + // reset offset + offset = Long.valueOf(0); + + do { + Filter filter = new Filter(AccountVO.class, "id", true, offset, limit); + + accounts = m_accountDao.findNewAccounts(lastAccountId, filter); + + if ((accounts != null) && !accounts.isEmpty()) { + // now copy the accounts to cloud_usage db + m_usageDao.saveAccounts(accounts); + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((accounts != null) && !accounts.isEmpty()); + + // reset offset + offset = Long.valueOf(0); + + // get all the user stats to create usage records for the network usage + Long lastUserStatsId = m_usageDao.getLastUserStatsId(); + if (lastUserStatsId == null) { + lastUserStatsId = Long.valueOf(0); + } + + SearchCriteria sc2 = m_userStatsDao.createSearchCriteria(); + sc2.addAnd("id", SearchCriteria.Op.LTEQ, lastUserStatsId); + do { + Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit); + + userStats = m_userStatsDao.search(sc2, filter); + + if ((userStats != null) && !userStats.isEmpty()) { + // now copy the accounts to cloud_usage db + m_usageDao.updateUserStats(userStats); + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((userStats != null) && !userStats.isEmpty()); + + // reset offset + offset = Long.valueOf(0); + + sc2 = m_userStatsDao.createSearchCriteria(); + sc2.addAnd("id", SearchCriteria.Op.GT, lastUserStatsId); + do { + Filter filter = new Filter(UserStatisticsVO.class, "id", true, offset, limit); + + userStats = m_userStatsDao.search(sc2, filter); + + if ((userStats != null) && !userStats.isEmpty()) { + // now copy the accounts to cloud_usage db + m_usageDao.saveUserStats(userStats); + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((userStats != null) && !userStats.isEmpty()); + } finally { + userTxn.close(); + } + + // TODO: Fetch a maximum number of events and process them before moving on to the next range of events + + // - get a list of the latest events + // - insert the latest events into the usage.events table + List events = _usageEventDao.getRecentEvents(new Date(endDateMillis)); + + + Transaction usageTxn = Transaction.open(Transaction.USAGE_DB); + try { + usageTxn.start(); + + // make sure start date is before all of our un-processed events (the events are ordered oldest + // to newest, so just test against the first event) + if ((events != null) && (events.size() > 0)) { + Date oldestEventDate = events.get(0).getCreateDate(); + if (oldestEventDate.getTime() < startDateMillis) { + startDateMillis = oldestEventDate.getTime(); + startDate = new Date(startDateMillis); + } + + // - loop over the list of events and create entries in the helper tables + // - create the usage records using the parse methods below + for (UsageEventVO event : events) { + event.setProcessed(true); + _usageEventDao.update(event.getId(), event); + createHelperRecord(event); + } + } + + // TODO: Fetch a maximum number of user stats and process them before moving on to the next range of user stats + + // get user stats in order to compute network usage + networkStats = m_usageNetworkDao.getRecentNetworkStats(); + + Calendar recentlyDeletedCal = Calendar.getInstance(m_usageTimezone); + recentlyDeletedCal.setTimeInMillis(startDateMillis); + recentlyDeletedCal.add(Calendar.MINUTE, -1*THREE_DAYS_IN_MINUTES); + Date recentlyDeletedDate = recentlyDeletedCal.getTime(); + + // Keep track of user stats for an account, across all of its public IPs + Map aggregatedStats = new HashMap(); + int startIndex = 0; + do { + userStats = m_userStatsDao.listActiveAndRecentlyDeleted(recentlyDeletedDate, startIndex, 500); + + if (userStats != null) { + for (UserStatisticsVO userStat : userStats) { + if(userStat.getDeviceId() != null){ + String hostKey = userStat.getDataCenterId() + "-" + userStat.getAccountId()+"-Host-" + userStat.getDeviceId(); + UserStatisticsVO hostAggregatedStat = aggregatedStats.get(hostKey); + if (hostAggregatedStat == null) { + hostAggregatedStat = new UserStatisticsVO(userStat.getAccountId(), userStat.getDataCenterId(), userStat.getPublicIpAddress(), + userStat.getDeviceId(), userStat.getDeviceType(), userStat.getNetworkId()); + } + + hostAggregatedStat.setNetBytesSent(hostAggregatedStat.getNetBytesSent() + userStat.getNetBytesSent()); + hostAggregatedStat.setNetBytesReceived(hostAggregatedStat.getNetBytesReceived() + userStat.getNetBytesReceived()); + hostAggregatedStat.setCurrentBytesSent(hostAggregatedStat.getCurrentBytesSent() + userStat.getCurrentBytesSent()); + hostAggregatedStat.setCurrentBytesReceived(hostAggregatedStat.getCurrentBytesReceived() + userStat.getCurrentBytesReceived()); + aggregatedStats.put(hostKey, hostAggregatedStat); + } + } + } + startIndex += 500; + } while ((userStats != null) && !userStats.isEmpty()); + + // loop over the user stats, create delta entries in the usage_network helper table + int numAcctsProcessed = 0; + for (String key : aggregatedStats.keySet()) { + UsageNetworkVO currentNetworkStats = null; + if (networkStats != null) { + currentNetworkStats = networkStats.get(key); + } + + createNetworkHelperEntry(aggregatedStats.get(key), currentNetworkStats, endDateMillis); + numAcctsProcessed++; + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("created network stats helper entries for " + numAcctsProcessed + " accts"); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting old network stats helper entries older than " + deleteOldStatsTimeMillis); + } + m_usageNetworkDao.deleteOldStats(deleteOldStatsTimeMillis); + + // commit the helper records, then start a new transaction + usageTxn.commit(); + usageTxn.start(); + + boolean parsed = false; + numAcctsProcessed = 0; + + Date currentStartDate = startDate; + Date currentEndDate = endDate; + Date tempDate = endDate; + + Calendar aggregateCal = Calendar.getInstance(m_usageTimezone); + + while ((tempDate.after(startDate)) && ((tempDate.getTime() - startDate.getTime()) > 60000)){ + currentEndDate = tempDate; + aggregateCal.setTime(tempDate); + aggregateCal.add(Calendar.MINUTE, -m_aggregationDuration); + tempDate = aggregateCal.getTime(); + } + + while (!currentEndDate.after(endDate) || (currentEndDate.getTime() -endDate.getTime() < 60000)){ + Long offset = Long.valueOf(0); + Long limit = Long.valueOf(500); + + do { + Filter filter = new Filter(AccountVO.class, "id", true, offset, limit); + accounts = m_accountDao.listAll(filter); + if ((accounts != null) && !accounts.isEmpty()) { + for (AccountVO account : accounts) { + parsed = parseHelperTables(account, currentStartDate, currentEndDate); + numAcctsProcessed++; + } + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((accounts != null) && !accounts.isEmpty()); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("processed VM/Network Usage for " + numAcctsProcessed + " ACTIVE accts"); + } + numAcctsProcessed = 0; + + // reset offset + offset = Long.valueOf(0); + + do { + Filter filter = new Filter(AccountVO.class, "id", true, offset, limit); + + accounts = m_accountDao.findRecentlyDeletedAccounts(null, recentlyDeletedDate, filter); + + if ((accounts != null) && !accounts.isEmpty()) { + for (AccountVO account : accounts) { + parsed = parseHelperTables(account, currentStartDate, currentEndDate); + List publicTemplates = m_usageDao.listPublicTemplatesByAccount(account.getId()); + for(Long templateId : publicTemplates){ + //mark public templates owned by deleted accounts as deleted + List storageVOs = m_usageStorageDao.listById(account.getId(), templateId, StorageTypes.TEMPLATE); + if (storageVOs.size() > 1) { + s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + account.getId() + "; marking them all as deleted..."); + } + for (UsageStorageVO storageVO : storageVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + } + storageVO.setDeleted(account.getRemoved()); + m_usageStorageDao.update(storageVO); + } + } + numAcctsProcessed++; + } + } + offset = new Long(offset.longValue() + limit.longValue()); + } while ((accounts != null) && !accounts.isEmpty()); + + currentStartDate = new Date(currentEndDate.getTime() + 1); + aggregateCal.setTime(currentEndDate); + aggregateCal.add(Calendar.MINUTE, m_aggregationDuration); + currentEndDate = aggregateCal.getTime(); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("processed Usage for " + numAcctsProcessed + " RECENTLY DELETED accts"); + } + + // FIXME: we don't break the above loop if something fails to parse, so it gets reset every account, + // do we want to break out of processing accounts and rollback if there are errors? + if (!parsed) { + usageTxn.rollback(); + } else { + success = true; + } + } catch (Exception ex) { + s_logger.error("Exception in usage manager", ex); + usageTxn.rollback(); + } finally { + // everything seemed to work...set endDate as the last success date + m_usageJobDao.updateJobSuccess(job.getId(), startDateMillis, endDateMillis, System.currentTimeMillis() - timeStart, success); + + // create a new job if this is a recurring job + if (job.getJobType() == UsageJobVO.JOB_TYPE_RECURRING) { + m_usageJobDao.createNewJob(m_hostname, m_pid, UsageJobVO.JOB_TYPE_RECURRING); + } + usageTxn.commit(); + usageTxn.close(); + + // switch back to CLOUD_DB + Transaction swap = Transaction.open(Transaction.CLOUD_DB); + if(!success){ + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, new Long(0), "Usage job failed. Job id: "+job.getId(), "Usage job failed. Job id: "+job.getId()); + } else { + _alertMgr.clearAlert(AlertManager.ALERT_TYPE_USAGE_SERVER_RESULT, 0, 0); + } + swap.close(); + + } + } catch (Exception e) { + s_logger.error("Usage Manager error", e); + } + } + + private boolean parseHelperTables(AccountVO account, Date currentStartDate, Date currentEndDate){ + boolean parsed = false; + + parsed = VMInstanceUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("vm usage instances successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = NetworkUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("network usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = VolumeUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("volume usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = StorageUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("storage usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = LoadBalancerUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("load balancer usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = PortForwardingUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("port forwarding usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = NetworkOfferingUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("network offering usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + parsed = IPAddressUsageParser.parse(account, currentStartDate, currentEndDate); + if (s_logger.isDebugEnabled()) { + if (!parsed) { + s_logger.debug("IPAddress usage successfully parsed? " + parsed + " (for account: " + account.getAccountName() + ", id: " + account.getId() + ")"); + } + } + + return parsed; + } + + private void createHelperRecord(UsageEventVO event) { + String eventType = event.getType(); + if (isVMEvent(eventType)) { + createVMHelperEvent(event); + } else if (isIPEvent(eventType)) { + createIPHelperEvent(event); + } else if (isVolumeEvent(eventType)) { + createVolumeHelperEvent(event); + } else if (isTemplateEvent(eventType)) { + createTemplateHelperEvent(event); + } else if (isISOEvent(eventType)) { + createISOHelperEvent(event); + } else if (isSnapshotEvent(eventType)) { + createSnapshotHelperEvent(event); + } else if (isLoadBalancerEvent(eventType)) { + createLoadBalancerHelperEvent(event); + } else if (isPortForwardingEvent(eventType)) { + createPortForwardingHelperEvent(event); + } else if (isNetworkOfferingEvent(eventType)) { + createNetworkOfferingEvent(event); + } + } + + private boolean isVMEvent(String eventType) { + if (eventType == null) return false; + return eventType.startsWith("VM."); + } + + private boolean isIPEvent(String eventType) { + if (eventType == null) return false; + return eventType.startsWith("NET.IP"); + } + + private boolean isVolumeEvent(String eventType) { + if (eventType == null) return false; + return (eventType.equals(EventTypes.EVENT_VOLUME_CREATE) || + eventType.equals(EventTypes.EVENT_VOLUME_DELETE)); + } + + private boolean isTemplateEvent(String eventType) { + if (eventType == null) return false; + return (eventType.equals(EventTypes.EVENT_TEMPLATE_CREATE) || + eventType.equals(EventTypes.EVENT_TEMPLATE_COPY) || + eventType.equals(EventTypes.EVENT_TEMPLATE_DELETE)); + } + + private boolean isISOEvent(String eventType) { + if (eventType == null) return false; + return (eventType.equals(EventTypes.EVENT_ISO_CREATE) || + eventType.equals(EventTypes.EVENT_ISO_COPY) || + eventType.equals(EventTypes.EVENT_ISO_DELETE)); + } + + private boolean isSnapshotEvent(String eventType) { + if (eventType == null) return false; + return (eventType.equals(EventTypes.EVENT_SNAPSHOT_CREATE) || + eventType.equals(EventTypes.EVENT_SNAPSHOT_DELETE)); + } + + private boolean isLoadBalancerEvent(String eventType) { + if (eventType == null) return false; + return eventType.startsWith("LB."); + } + + private boolean isPortForwardingEvent(String eventType) { + if (eventType == null) return false; + return eventType.startsWith("NET.RULE"); + } + + private boolean isNetworkOfferingEvent(String eventType) { + if (eventType == null) return false; + return (eventType.equals(EventTypes.EVENT_NETWORK_OFFERING_CREATE) || + eventType.equals(EventTypes.EVENT_NETWORK_OFFERING_DELETE) || + eventType.equals(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN) || + eventType.equals(EventTypes.EVENT_NETWORK_OFFERING_REMOVE)); + } + + private void createVMHelperEvent(UsageEventVO event) { + + // One record for handling VM.START and VM.STOP + // One record for handling VM.CREATE and VM.DESTROY + // VM events have the parameter "id=" + long vmId = event.getResourceId(); + Long soId = event.getOfferingId();; // service offering id + long zoneId = event.getZoneId(); + String vmName = event.getResourceName(); + + if (EventTypes.EVENT_VM_START.equals(event.getType())) { + // create a new usage_vm_instance row for this VM + try { + + SearchCriteria sc = m_usageInstanceDao.createSearchCriteria(); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, Long.valueOf(vmId)); + sc.addAnd("endDate", SearchCriteria.Op.NULL); + sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.RUNNING_VM); + List usageInstances = m_usageInstanceDao.search(sc, null); + if (usageInstances != null) { + if (usageInstances.size() > 0) { + s_logger.error("found entries for a vm running with id: " + vmId + ", which are not stopped. Ending them all..."); + for (UsageVMInstanceVO usageInstance : usageInstances) { + usageInstance.setEndDate(event.getCreateDate()); + m_usageInstanceDao.update(usageInstance); + } + } + } + + sc = m_usageInstanceDao.createSearchCriteria(); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, Long.valueOf(vmId)); + sc.addAnd("endDate", SearchCriteria.Op.NULL); + sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.ALLOCATED_VM); + usageInstances = m_usageInstanceDao.search(sc, null); + if (usageInstances == null || (usageInstances.size() == 0)) { + s_logger.error("Cannot find allocated vm entry for a vm running with id: " + vmId); + } + + Long templateId = event.getTemplateId(); + String hypervisorType = event.getResourceType(); + + // add this VM to the usage helper table + UsageVMInstanceVO usageInstanceNew = new UsageVMInstanceVO(UsageTypes.RUNNING_VM, zoneId, event.getAccountId(), vmId, vmName, + soId, templateId, hypervisorType, event.getCreateDate(), null); + m_usageInstanceDao.persist(usageInstanceNew); + } catch (Exception ex) { + s_logger.error("Error saving usage instance for vm: " + vmId, ex); + } + } else if (EventTypes.EVENT_VM_STOP.equals(event.getType())) { + // find the latest usage_vm_instance row, update the stop date (should be null) to the event date + // FIXME: search criteria needs to have some kind of type information so we distinguish between START/STOP and CREATE/DESTROY + SearchCriteria sc = m_usageInstanceDao.createSearchCriteria(); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, Long.valueOf(vmId)); + sc.addAnd("endDate", SearchCriteria.Op.NULL); + sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.RUNNING_VM); + List usageInstances = m_usageInstanceDao.search(sc, null); + if (usageInstances != null) { + if (usageInstances.size() > 1) { + s_logger.warn("found multiple entries for a vm running with id: " + vmId + ", ending them all..."); + } + for (UsageVMInstanceVO usageInstance : usageInstances) { + usageInstance.setEndDate(event.getCreateDate()); + // TODO: UsageVMInstanceVO should have an ID field and we should do updates through that field since we are really + // updating one row at a time here + m_usageInstanceDao.update(usageInstance); + } + } + } else if (EventTypes.EVENT_VM_CREATE.equals(event.getType())) { + try { + Long templateId = event.getTemplateId(); + String hypervisorType = event.getResourceType(); + // add this VM to the usage helper table + UsageVMInstanceVO usageInstanceNew = new UsageVMInstanceVO(UsageTypes.ALLOCATED_VM, zoneId, event.getAccountId(), vmId, vmName, + soId, templateId, hypervisorType, event.getCreateDate(), null); + m_usageInstanceDao.persist(usageInstanceNew); + } catch (Exception ex) { + s_logger.error("Error saving usage instance for vm: " + vmId, ex); + } + } else if (EventTypes.EVENT_VM_DESTROY.equals(event.getType())) { + SearchCriteria sc = m_usageInstanceDao.createSearchCriteria(); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, Long.valueOf(vmId)); + sc.addAnd("endDate", SearchCriteria.Op.NULL); + sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.ALLOCATED_VM); + List usageInstances = m_usageInstanceDao.search(sc, null); + if (usageInstances != null) { + if (usageInstances.size() > 1) { + s_logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", detroying them all..."); + } + for (UsageVMInstanceVO usageInstance : usageInstances) { + usageInstance.setEndDate(event.getCreateDate()); + m_usageInstanceDao.update(usageInstance); + } + } + } else if (EventTypes.EVENT_VM_UPGRADE.equals(event.getType())) { + SearchCriteria sc = m_usageInstanceDao.createSearchCriteria(); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, Long.valueOf(vmId)); + sc.addAnd("endDate", SearchCriteria.Op.NULL); + sc.addAnd("usageType", SearchCriteria.Op.EQ, UsageTypes.ALLOCATED_VM); + List usageInstances = m_usageInstanceDao.search(sc, null); + if (usageInstances != null) { + if (usageInstances.size() > 1) { + s_logger.warn("found multiple entries for a vm allocated with id: " + vmId + ", updating end_date for all of them..."); + } + for (UsageVMInstanceVO usageInstance : usageInstances) { + usageInstance.setEndDate(event.getCreateDate()); + m_usageInstanceDao.update(usageInstance); + } + } + + Long templateId = event.getTemplateId(); + String hypervisorType = event.getResourceType(); + // add this VM to the usage helper table + UsageVMInstanceVO usageInstanceNew = new UsageVMInstanceVO(UsageTypes.ALLOCATED_VM, zoneId, event.getAccountId(), vmId, vmName, + soId, templateId, hypervisorType, event.getCreateDate(), null); + m_usageInstanceDao.persist(usageInstanceNew); + } + } + + private void createNetworkHelperEntry(UserStatisticsVO userStat, UsageNetworkVO usageNetworkStats, long timestamp) { + long currentAccountedBytesSent = 0L; + long currentAccountedBytesReceived = 0L; + if (usageNetworkStats != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("getting current accounted bytes for... accountId: " + usageNetworkStats.getAccountId() + " in zone: " + userStat.getDataCenterId() + "; cbr: " + usageNetworkStats.getCurrentBytesReceived() + + "; cbs: " + usageNetworkStats.getCurrentBytesSent() + "; nbr: " + usageNetworkStats.getNetBytesReceived() + "; nbs: " + usageNetworkStats.getNetBytesSent()); + } + currentAccountedBytesSent = (usageNetworkStats.getCurrentBytesSent() + usageNetworkStats.getNetBytesSent()); + currentAccountedBytesReceived = (usageNetworkStats.getCurrentBytesReceived() + usageNetworkStats.getNetBytesReceived()); + } + long bytesSent = (userStat.getCurrentBytesSent() + userStat.getNetBytesSent()) - currentAccountedBytesSent; + long bytesReceived = (userStat.getCurrentBytesReceived() + userStat.getNetBytesReceived()) - currentAccountedBytesReceived; + + if (bytesSent < 0) { + s_logger.warn("Calculated negative value for bytes sent: " + bytesSent + ", user stats say: " + (userStat.getCurrentBytesSent() + userStat.getNetBytesSent()) + ", previous network usage was: " + currentAccountedBytesSent); + bytesSent = 0; + } + if (bytesReceived < 0) { + s_logger.warn("Calculated negative value for bytes received: " + bytesReceived + ", user stats say: " + (userStat.getCurrentBytesReceived() + userStat.getNetBytesReceived()) + ", previous network usage was: " + currentAccountedBytesReceived); + bytesReceived = 0; + } + + long hostId = 0; + + if(userStat.getDeviceId() != null){ + hostId = userStat.getDeviceId(); + } + + UsageNetworkVO usageNetworkVO = new UsageNetworkVO(userStat.getAccountId(), userStat.getDataCenterId(), hostId, userStat.getDeviceType(), userStat.getNetworkId(), bytesSent, bytesReceived, + userStat.getNetBytesReceived(), userStat.getNetBytesSent(), + userStat.getCurrentBytesReceived(), userStat.getCurrentBytesSent(), timestamp); + if (s_logger.isDebugEnabled()) { + s_logger.debug("creating networkHelperEntry... accountId: " + userStat.getAccountId() + " in zone: " + userStat.getDataCenterId() + "; cbr: " + userStat.getCurrentBytesReceived() + "; cbs: " + userStat.getCurrentBytesSent() + + "; nbr: " + userStat.getNetBytesReceived() + "; nbs: " + userStat.getNetBytesSent() + "; curABS: " + currentAccountedBytesSent + "; curABR: " + currentAccountedBytesReceived + "; ubs: " + bytesSent + "; ubr: " + bytesReceived); + } + m_usageNetworkDao.persist(usageNetworkVO); + } + + private void createIPHelperEvent(UsageEventVO event) { + + String ipAddress = event.getResourceName(); + + if (EventTypes.EVENT_NET_IP_ASSIGN.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("assigning ip address: " + ipAddress + " to account: " + event.getAccountId()); + } + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + long zoneId = event.getZoneId(); + long id = event.getResourceId(); + long sourceNat = event.getSize(); + boolean isSourceNat = (sourceNat == 1) ? true : false ; + UsageIPAddressVO ipAddressVO = new UsageIPAddressVO(id, event.getAccountId(), acct.getDomainId(), zoneId, ipAddress, isSourceNat, event.getCreateDate(), null); + m_usageIPAddressDao.persist(ipAddressVO); + } else if (EventTypes.EVENT_NET_IP_RELEASE.equals(event.getType())) { + SearchCriteria sc = m_usageIPAddressDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("address", SearchCriteria.Op.EQ, ipAddress); + sc.addAnd("released", SearchCriteria.Op.NULL); + List ipAddressVOs = m_usageIPAddressDao.search(sc, null); + if (ipAddressVOs.size() > 1) { + s_logger.warn("More that one usage entry for ip address: " + ipAddress + " assigned to account: " + event.getAccountId() + "; marking them all as released..."); + } + for (UsageIPAddressVO ipAddressVO : ipAddressVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("releasing ip address: " + ipAddressVO.getAddress() + " from account: " + ipAddressVO.getAccountId()); + } + ipAddressVO.setReleased(event.getCreateDate()); // there really shouldn't be more than one + m_usageIPAddressDao.update(ipAddressVO); + } + } + } + + private void createVolumeHelperEvent(UsageEventVO event) { + + Long doId = -1L; + long zoneId = -1L; + Long templateId = -1L; + long size = -1L; + + long volId = event.getResourceId(); + if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType())) { + doId = event.getOfferingId(); + zoneId = event.getZoneId(); + templateId = event.getTemplateId(); + size = event.getSize(); + } + + if (EventTypes.EVENT_VOLUME_CREATE.equals(event.getType())) { + SearchCriteria sc = m_usageVolumeDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("id", SearchCriteria.Op.EQ, volId); + sc.addAnd("deleted", SearchCriteria.Op.NULL); + List volumesVOs = m_usageVolumeDao.search(sc, null); + if (volumesVOs.size() > 0) { + //This is a safeguard to avoid double counting of volumes. + s_logger.error("Found duplicate usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking as deleted..."); + } + for (UsageVolumeVO volumesVO : volumesVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); + } + volumesVO.setDeleted(event.getCreateDate()); + m_usageVolumeDao.update(volumesVO); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("create volume with id : " + volId + " for account: " + event.getAccountId()); + } + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsageVolumeVO volumeVO = new UsageVolumeVO(volId, zoneId, event.getAccountId(), acct.getDomainId(), doId, templateId, size, event.getCreateDate(), null); + m_usageVolumeDao.persist(volumeVO); + } else if (EventTypes.EVENT_VOLUME_DELETE.equals(event.getType())) { + SearchCriteria sc = m_usageVolumeDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("id", SearchCriteria.Op.EQ, volId); + sc.addAnd("deleted", SearchCriteria.Op.NULL); + List volumesVOs = m_usageVolumeDao.search(sc, null); + if (volumesVOs.size() > 1) { + s_logger.warn("More that one usage entry for volume: " + volId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageVolumeVO volumesVO : volumesVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting volume: " + volumesVO.getId() + " from account: " + volumesVO.getAccountId()); + } + volumesVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageVolumeDao.update(volumesVO); + } + } + } + + private void createTemplateHelperEvent(UsageEventVO event) { + + long templateId = -1L; + long zoneId = -1L; + long templateSize = -1L; + + templateId = event.getResourceId(); + zoneId = event.getZoneId(); + if (EventTypes.EVENT_TEMPLATE_CREATE.equals(event.getType()) || EventTypes.EVENT_TEMPLATE_COPY.equals(event.getType())) { + templateSize = event.getSize(); + if(templateSize < 1){ + s_logger.error("Incorrect size for template with Id "+templateId); + return; + } + if(zoneId == -1L){ + s_logger.error("Incorrect zoneId for template with Id "+templateId); + return; + } + } + + if (EventTypes.EVENT_TEMPLATE_CREATE.equals(event.getType()) || EventTypes.EVENT_TEMPLATE_COPY.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("create template with id : " + templateId + " for account: " + event.getAccountId()); + } + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsageStorageVO storageVO = new UsageStorageVO(templateId, zoneId, event.getAccountId(), acct.getDomainId(), StorageTypes.TEMPLATE, event.getTemplateId(), + templateSize, event.getCreateDate(), null); + m_usageStorageDao.persist(storageVO); + } else if (EventTypes.EVENT_TEMPLATE_DELETE.equals(event.getType())) { + List storageVOs; + if(zoneId != -1L){ + storageVOs = m_usageStorageDao.listByIdAndZone(event.getAccountId(), templateId, StorageTypes.TEMPLATE, zoneId); + } else { + storageVOs = m_usageStorageDao.listById(event.getAccountId(), templateId, StorageTypes.TEMPLATE); + } + if (storageVOs.size() > 1) { + s_logger.warn("More that one usage entry for storage: " + templateId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageStorageVO storageVO : storageVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting template: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + } + storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageStorageDao.update(storageVO); + } + } + } + + private void createISOHelperEvent(UsageEventVO event) { + long isoSize = -1L; + + long isoId = event.getResourceId(); + long zoneId = event.getZoneId(); + if (EventTypes.EVENT_ISO_CREATE.equals(event.getType()) || EventTypes.EVENT_ISO_COPY.equals(event.getType())) { + isoSize = event.getSize(); + } + + if (EventTypes.EVENT_ISO_CREATE.equals(event.getType()) || EventTypes.EVENT_ISO_COPY.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("create iso with id : " + isoId + " for account: " + event.getAccountId()); + } + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsageStorageVO storageVO = new UsageStorageVO( isoId, zoneId, event.getAccountId(), acct.getDomainId(), StorageTypes.ISO, null, + isoSize, event.getCreateDate(), null); + m_usageStorageDao.persist(storageVO); + } else if (EventTypes.EVENT_ISO_DELETE.equals(event.getType())) { + List storageVOs; + if(zoneId != -1L){ + storageVOs = m_usageStorageDao.listByIdAndZone(event.getAccountId(), isoId, StorageTypes.ISO, zoneId); + } else { + storageVOs = m_usageStorageDao.listById(event.getAccountId(), isoId, StorageTypes.ISO); + } + + if (storageVOs.size() > 1) { + s_logger.warn("More that one usage entry for storage: " + isoId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageStorageVO storageVO : storageVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting iso: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + } + storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageStorageDao.update(storageVO); + } + } + } + + private void createSnapshotHelperEvent(UsageEventVO event) { + long snapSize = -1L; + long zoneId = -1L; + + long snapId = event.getResourceId(); + if (EventTypes.EVENT_SNAPSHOT_CREATE.equals(event.getType())) { + snapSize = event.getSize(); + zoneId = event.getZoneId(); + } + + if (EventTypes.EVENT_SNAPSHOT_CREATE.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("create snapshot with id : " + snapId + " for account: " + event.getAccountId()); + } + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsageStorageVO storageVO = new UsageStorageVO( snapId, zoneId, event.getAccountId(), acct.getDomainId(), StorageTypes.SNAPSHOT, null, + snapSize, event.getCreateDate(), null); + m_usageStorageDao.persist(storageVO); + } else if (EventTypes.EVENT_SNAPSHOT_DELETE.equals(event.getType())) { + List storageVOs = m_usageStorageDao.listById(event.getAccountId(), snapId, StorageTypes.SNAPSHOT); + if (storageVOs.size() > 1) { + s_logger.warn("More that one usage entry for storage: " + snapId + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageStorageVO storageVO : storageVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting snapshot: " + storageVO.getId() + " from account: " + storageVO.getAccountId()); + } + storageVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageStorageDao.update(storageVO); + } + } + } + + private void createLoadBalancerHelperEvent(UsageEventVO event) { + + long zoneId = -1L; + + long id = event.getResourceId(); + + if (EventTypes.EVENT_LOAD_BALANCER_CREATE.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating load balancer : " + id + " for account: " + event.getAccountId()); + } + zoneId = event.getZoneId(); + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsageLoadBalancerPolicyVO lbVO = new UsageLoadBalancerPolicyVO(id, zoneId, event.getAccountId(), acct.getDomainId(), + event.getCreateDate(), null); + m_usageLoadBalancerPolicyDao.persist(lbVO); + } else if (EventTypes.EVENT_LOAD_BALANCER_DELETE.equals(event.getType())) { + SearchCriteria sc = m_usageLoadBalancerPolicyDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("id", SearchCriteria.Op.EQ, id); + sc.addAnd("deleted", SearchCriteria.Op.NULL); + List lbVOs = m_usageLoadBalancerPolicyDao.search(sc, null); + if (lbVOs.size() > 1) { + s_logger.warn("More that one usage entry for load balancer policy: " + id + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageLoadBalancerPolicyVO lbVO : lbVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting load balancer policy: " + lbVO.getId() + " from account: " + lbVO.getAccountId()); + } + lbVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageLoadBalancerPolicyDao.update(lbVO); + } + } + } + + private void createPortForwardingHelperEvent(UsageEventVO event) { + + long zoneId = -1L; + + long id = event.getResourceId(); + + if (EventTypes.EVENT_NET_RULE_ADD.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating port forwarding rule : " + id + " for account: " + event.getAccountId()); + } + zoneId = event.getZoneId(); + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + UsagePortForwardingRuleVO pfVO = new UsagePortForwardingRuleVO(id, zoneId, event.getAccountId(), acct.getDomainId(), + event.getCreateDate(), null); + m_usagePortForwardingRuleDao.persist(pfVO); + } else if (EventTypes.EVENT_NET_RULE_DELETE.equals(event.getType())) { + SearchCriteria sc = m_usagePortForwardingRuleDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("id", SearchCriteria.Op.EQ, id); + sc.addAnd("deleted", SearchCriteria.Op.NULL); + List pfVOs = m_usagePortForwardingRuleDao.search(sc, null); + if (pfVOs.size() > 1) { + s_logger.warn("More that one usage entry for port forwarding rule: " + id + " assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsagePortForwardingRuleVO pfVO : pfVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting port forwarding rule: " + pfVO.getId() + " from account: " + pfVO.getAccountId()); + } + pfVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usagePortForwardingRuleDao.update(pfVO); + } + } + } + + private void createNetworkOfferingEvent(UsageEventVO event) { + + long zoneId = -1L; + + long vmId = event.getResourceId(); + long networkOfferingId = event.getOfferingId(); + + if (EventTypes.EVENT_NETWORK_OFFERING_CREATE.equals(event.getType()) || EventTypes.EVENT_NETWORK_OFFERING_ASSIGN.equals(event.getType())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating networking offering: "+ networkOfferingId +" for Vm: " + vmId + " for account: " + event.getAccountId()); + } + zoneId = event.getZoneId(); + Account acct = m_accountDao.findByIdIncludingRemoved(event.getAccountId()); + boolean isDefault = (event.getSize() == 1) ? true : false ; + UsageNetworkOfferingVO networkOffering = new UsageNetworkOfferingVO(zoneId, event.getAccountId(), acct.getDomainId(), vmId, networkOfferingId, isDefault, event.getCreateDate(), null); + m_usageNetworkOfferingDao.persist(networkOffering); + } else if (EventTypes.EVENT_NETWORK_OFFERING_DELETE.equals(event.getType()) || EventTypes.EVENT_NETWORK_OFFERING_REMOVE.equals(event.getType())) { + SearchCriteria sc = m_usageNetworkOfferingDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, event.getAccountId()); + sc.addAnd("vmInstanceId", SearchCriteria.Op.EQ, vmId); + sc.addAnd("networkOfferingId", SearchCriteria.Op.EQ, networkOfferingId); + sc.addAnd("deleted", SearchCriteria.Op.NULL); + List noVOs = m_usageNetworkOfferingDao.search(sc, null); + if (noVOs.size() > 1) { + s_logger.warn("More that one usage entry for networking offering: "+ networkOfferingId +" for Vm: " + vmId+" assigned to account: " + event.getAccountId() + "; marking them all as deleted..."); + } + for (UsageNetworkOfferingVO noVO : noVOs) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("deleting network offering: " + noVO.getNetworkOfferingId() + " from Vm: " + noVO.getVmInstanceId()); + } + noVO.setDeleted(event.getCreateDate()); // there really shouldn't be more than one + m_usageNetworkOfferingDao.update(noVO); + } + } + } + + + private class Heartbeat implements Runnable { + public void run() { + Transaction usageTxn = Transaction.open(Transaction.USAGE_DB); + try { + if(!m_heartbeatLock.lock(3)) { // 3 second timeout + if(s_logger.isTraceEnabled()) + s_logger.trace("Heartbeat lock is in use by others, returning true as someone else will take over the job if required"); + return; + } + + try { + // check for one-off jobs + UsageJobVO nextJob = m_usageJobDao.getNextImmediateJob(); + if (nextJob != null) { + if (m_hostname.equals(nextJob.getHost()) && (m_pid == nextJob.getPid().intValue())) { + updateJob(nextJob.getId(), null, null, null, UsageJobVO.JOB_SCHEDULED); + scheduleParse(); + } + } + + Long jobId = m_usageJobDao.checkHeartbeat(m_hostname, m_pid, m_aggregationDuration); + if (jobId != null) { + // if I'm taking over the job...see how long it's been since the last job, and if it's more than the + // aggregation range...do a one off job to catch up. However, only do this if we are more than half + // the aggregation range away from executing the next job + long now = System.currentTimeMillis(); + long timeToJob = m_jobExecTime.getTimeInMillis() - now; + long timeSinceJob = 0; + long aggregationDurationMillis = m_aggregationDuration * 60 * 1000; + long lastSuccess = m_usageJobDao.getLastJobSuccessDateMillis(); + if (lastSuccess > 0) { + timeSinceJob = now - lastSuccess; + } + + if ((timeSinceJob > 0) && (timeSinceJob > aggregationDurationMillis)) { + if (timeToJob > (aggregationDurationMillis/2)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("it's been " + timeSinceJob + " ms since last usage job and " + timeToJob + " ms until next job, scheduling an immediate job to catch up (aggregation duration is " + m_aggregationDuration + " minutes)"); + } + scheduleParse(); + } + } + + boolean changeOwner = updateJob(jobId, m_hostname, Integer.valueOf(m_pid), new Date(), UsageJobVO.JOB_NOT_SCHEDULED); + if (changeOwner) { + deleteOneOffJobs(m_hostname, m_pid); + } + } + } finally { + m_heartbeatLock.unlock(); + } + } catch (Exception ex) { + s_logger.error("error in heartbeat", ex); + } finally { + usageTxn.close(); + } + } + + @DB + protected boolean updateJob(Long jobId, String hostname, Integer pid, Date heartbeat, int scheduled) { + boolean changeOwner = false; + Transaction txn = Transaction.currentTxn(); + try { + txn.start(); + + // take over the job, setting our hostname/pid/heartbeat time + UsageJobVO job = m_usageJobDao.lockRow(jobId, Boolean.TRUE); + if (!job.getHost().equals(hostname) || !job.getPid().equals(pid)) { + changeOwner = true; + } + + UsageJobVO jobForUpdate = m_usageJobDao.createForUpdate(); + if (hostname != null) { + jobForUpdate.setHost(hostname); + } + if (pid != null) { + jobForUpdate.setPid(pid); + } + if (heartbeat != null) { + jobForUpdate.setHeartbeat(heartbeat); + } + jobForUpdate.setScheduled(scheduled); + m_usageJobDao.update(job.getId(), jobForUpdate); + + txn.commit(); + } catch (Exception dbEx) { + txn.rollback(); + s_logger.error("error updating usage job", dbEx); + } + return changeOwner; + } + + @DB + protected void deleteOneOffJobs(String hostname, int pid) { + SearchCriteria sc = m_usageJobDao.createSearchCriteria(); + SearchCriteria ssc = m_usageJobDao.createSearchCriteria(); + ssc.addOr("host", SearchCriteria.Op.NEQ, hostname); + ssc.addOr("pid", SearchCriteria.Op.NEQ, pid); + sc.addAnd("host", SearchCriteria.Op.SC, ssc); + sc.addAnd("endMillis", SearchCriteria.Op.EQ, Long.valueOf(0)); + sc.addAnd("jobType", SearchCriteria.Op.EQ, Integer.valueOf(UsageJobVO.JOB_TYPE_SINGLE)); + sc.addAnd("scheduled", SearchCriteria.Op.EQ, Integer.valueOf(0)); + m_usageJobDao.expunge(sc); + } + } + + private class SanityCheck implements Runnable { + public void run() { + UsageSanityChecker usc = new UsageSanityChecker(); + try { + String errors = usc.runSanityCheck(); + if(errors.length() > 0){ + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_USAGE_SANITY_RESULT, 0, new Long(0), "Usage Sanity Check failed", errors); + } else { + _alertMgr.clearAlert(AlertManager.ALERT_TYPE_USAGE_SANITY_RESULT, 0, 0); + } + } catch (SQLException e) { + s_logger.error("Error in sanity check", e); + } + } + } +} diff --git a/usage/src/com/cloud/usage/UsageSanityChecker.java b/usage/src/com/cloud/usage/UsageSanityChecker.java new file mode 100644 index 00000000000..e3fc88d822e --- /dev/null +++ b/usage/src/com/cloud/usage/UsageSanityChecker.java @@ -0,0 +1,232 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + +* + * + */ + +package com.cloud.usage; + +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import com.cloud.utils.db.Transaction; + +public class UsageSanityChecker { + + private StringBuffer errors; + private String lastCheckId = ""; + private final String lastCheckFile = "/usr/local/libexec/sanity-check-last-id"; + + private boolean checkMaxUsage(Connection conn) throws SQLException{ + + PreparedStatement pstmt = conn.prepareStatement("SELECT value FROM `cloud`.`configuration` where name = 'usage.stats.job.aggregation.range'"); + ResultSet rs = pstmt.executeQuery(); + + int aggregationRange = 1440; + if(rs.next()){ + aggregationRange = rs.getInt(1); + } else { + System.out.println("Failed to retrieve aggregation range. Using default : "+aggregationRange); + } + + int aggregationHours = aggregationRange / 60; + + /* + * Check for usage records with raw_usage > aggregationHours + */ + pstmt = conn.prepareStatement("SELECT count(*) FROM `cloud_usage`.`cloud_usage` cu where usage_type not in (4,5) and raw_usage > "+aggregationHours+lastCheckId); + rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" usage records with raw_usage > "+aggregationHours); + errors.append("\n"); + return false; + } + return true; + } + + private boolean checkVmUsage(Connection conn) throws SQLException{ + boolean success = true; + /* + * Check for Vm usage records which are created after the vm is destroyed + */ + PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.vm_instance vm where vm.type = 'User' " + + "and cu.usage_type in (1 , 2) and cu.usage_id = vm.id and cu.start_date > vm.removed"+lastCheckId); + ResultSet rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" Vm usage records which are created after Vm is destroyed"); + errors.append("\n"); + success = false; + } + + /* + * Check for Vms which have multiple running vm records in helper table + */ + pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =1 " + + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); + rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" duplicate running Vm entries in vm usage helper table"); + errors.append("\n"); + success = false; + } + + /* + * Check for Vms which have multiple allocated vm records in helper table + */ + pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_vm_instance where usage_type =2 " + + "and end_date is null group by vm_instance_id having count(vm_instance_id) > 1) c ;"); + rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" duplicate allocated Vm entries in vm usage helper table"); + errors.append("\n"); + success = false; + } + + /* + * Check for Vms which have running vm entry without allocated vm entry in helper table + */ + pstmt = conn.prepareStatement("select count(vm_instance_id) from cloud_usage.usage_vm_instance o where o.end_date is null and o.usage_type=1 and not exists " + + "(select 1 from cloud_usage.usage_vm_instance i where i.vm_instance_id=o.vm_instance_id and usage_type=2 and i.end_date is null)"); + rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" running Vm entries without corresponding allocated entries in vm usage helper table"); + errors.append("\n"); + success = false; + } + return success; + } + + private boolean checkVolumeUsage(Connection conn) throws SQLException{ + boolean success = true; + /* + * Check for Volume usage records which are created after the volume is removed + */ + PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.volumes v " + + "where cu.usage_type = 6 and cu.usage_id = v.id and cu.start_date > v.removed"+lastCheckId); + ResultSet rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" volume usage records which are created after volume is removed"); + errors.append("\n"); + success = false; + } + + /* + * Check for duplicate records in volume usage helper table + */ + pstmt = conn.prepareStatement("select sum(cnt) from (select count(*) as cnt from cloud_usage.usage_volume " + + "where deleted is null group by id having count(id) > 1) c;"); + rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" duplicate records is volume usage helper table"); + errors.append("\n"); + success = false; + } + return success; + } + + private boolean checkTemplateISOUsage(Connection conn) throws SQLException{ + /* + * Check for Template/ISO usage records which are created after it is removed + */ + PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.template_zone_ref tzr " + + "where cu.usage_id = tzr.template_id and cu.zone_id = tzr.zone_id and cu.usage_type in (7,8) and cu.start_date > tzr.removed"+lastCheckId); + ResultSet rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" template/ISO usage records which are created after it is removed"); + errors.append("\n"); + return false; + } + return true; + } + + private boolean checkSnapshotUsage(Connection conn) throws SQLException{ + /* + * Check for snapshot usage records which are created after snapshot is removed + */ + PreparedStatement pstmt = conn.prepareStatement("select count(*) from cloud_usage.cloud_usage cu inner join cloud.snapshots s " + + "where cu.usage_id = s.id and cu.usage_type = 9 and cu.start_date > s.removed"+lastCheckId); + ResultSet rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + errors.append("Error: Found "+rs.getInt(1)+" snapshot usage records which are created after snapshot is removed"); + errors.append("\n"); + return false; + } + return true; + } + + public String runSanityCheck() throws SQLException{ + try { + BufferedReader reader = new BufferedReader( new FileReader (lastCheckFile)); + String last_id = null; + if( (reader != null) && ( last_id = reader.readLine() ) != null ) { + int lastId = Integer.parseInt(last_id); + if(lastId > 0){ + lastCheckId = " and cu.id > "+last_id; + } + } + reader.close(); + } catch (Exception e) { + // Error while reading last check id + } + + Connection conn = Transaction.getStandaloneConnection(); + int maxId = 0; + PreparedStatement pstmt = conn.prepareStatement("select max(id) from cloud_usage.cloud_usage"); + ResultSet rs = pstmt.executeQuery(); + if(rs.next() && (rs.getInt(1) > 0)){ + maxId = rs.getInt(1); + lastCheckId += " and cu.id <= "+maxId; + } + errors = new StringBuffer(); + checkMaxUsage(conn); + checkVmUsage(conn); + checkVolumeUsage(conn); + checkTemplateISOUsage(conn); + checkSnapshotUsage(conn); + FileWriter fstream; + try { + fstream = new FileWriter(lastCheckFile); + BufferedWriter out = new BufferedWriter(fstream); + out.write(""+maxId); + out.close(); + } catch (IOException e) { + // Error while writing last check id + } + return errors.toString(); + } + + public static void main(String args[]){ + UsageSanityChecker usc = new UsageSanityChecker(); + String sanityErrors; + try { + sanityErrors = usc.runSanityCheck(); + if(sanityErrors.length() > 0){ + System.out.println(sanityErrors.toString()); + } + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/usage/src/com/cloud/usage/UsageServer.java b/usage/src/com/cloud/usage/UsageServer.java new file mode 100644 index 00000000000..f2f6c121571 --- /dev/null +++ b/usage/src/com/cloud/usage/UsageServer.java @@ -0,0 +1,43 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see .* + * + */ + +package com.cloud.usage; + +import org.apache.log4j.Logger; + +import com.cloud.utils.component.ComponentLocator; + +public class UsageServer { + private static final Logger s_logger = Logger.getLogger(UsageServer.class.getName()); + public static final String Name = "usage-server"; + + /** + * @param args + */ + public static void main(String[] args) { + // TODO: do we need to communicate with mgmt server? + final ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + UsageManager mgr = _locator.getManager(UsageManager.class); + if (mgr != null) { + if (s_logger.isInfoEnabled()) { + s_logger.info("UsageServer ready..."); + } + } + } +} diff --git a/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java b/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java new file mode 100644 index 00000000000..ad86346733b --- /dev/null +++ b/usage/src/com/cloud/usage/parser/IPAddressUsageParser.java @@ -0,0 +1,178 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageIPAddressVO; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageIPAddressDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class IPAddressUsageParser { + public static final Logger s_logger = Logger.getLogger(IPAddressUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageIPAddressDao m_usageIPAddressDao = _locator.getDao(UsageIPAddressDao.class); + + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing IP Address usage for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_ip_address table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageIPAddress = m_usageIPAddressDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate); + + if(usageIPAddress.isEmpty()){ + s_logger.debug("No IP Address usage for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + + Map IPMap = new HashMap(); + + // loop through all the usage IPs, create a usage record for each + for (UsageIPAddressVO usageIp : usageIPAddress) { + long IpId = usageIp.getId(); + + String key = ""+IpId; + + // store the info in the IP map + IPMap.put(key, new IpInfo(usageIp.getZoneId(), IpId, usageIp.getAddress(), usageIp.isSourceNat())); + + Date IpAssignDate = usageIp.getAssigned(); + Date IpReleaseDeleteDate = usageIp.getReleased(); + + if ((IpReleaseDeleteDate == null) || IpReleaseDeleteDate.after(endDate)) { + IpReleaseDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (IpAssignDate.before(startDate)) { + IpAssignDate = startDate; + } + + long currentDuration = (IpReleaseDeleteDate.getTime() - IpAssignDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + updateIpUsageData(usageMap, key, usageIp.getId(), currentDuration); + } + + for (String ipIdKey : usageMap.keySet()) { + Pair ipTimeInfo = usageMap.get(ipIdKey); + long useTime = ipTimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + IpInfo info = IPMap.get(ipIdKey); + createUsageRecord(info.getZoneId(), useTime, startDate, endDate, account, info.getIpId(), info.getIPAddress(), info.isSourceNat()); + } + } + + return true; + } + + private static void updateIpUsageData(Map> usageDataMap, String key, long ipId, long duration) { + Pair ipUsageInfo = usageDataMap.get(key); + if (ipUsageInfo == null) { + ipUsageInfo = new Pair(new Long(ipId), new Long(duration)); + } else { + Long runningTime = ipUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + ipUsageInfo = new Pair(ipUsageInfo.first(), runningTime); + } + usageDataMap.put(key, ipUsageInfo); + } + + private static void createUsageRecord(long zoneId, long runningTime, Date startDate, Date endDate, AccountVO account, long IpId, String IPAddress, boolean isSourceNat) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total usage time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating IP usage record with id: " + IpId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + String usageDesc = "IPAddress: "+IPAddress; + + // Create the usage record + + UsageVO usageRecord = new UsageVO(zoneId, account.getAccountId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", + UsageTypes.IP_ADDRESS, new Double(usage), null, null, null, null, IpId, startDate, endDate, (isSourceNat?"SourceNat":"")); + m_usageDao.persist(usageRecord); + } + + private static class IpInfo { + private long zoneId; + private long IpId; + private String IPAddress; + private boolean isSourceNat; + + public IpInfo(long zoneId,long IpId, String IPAddress, boolean isSourceNat) { + this.zoneId = zoneId; + this.IpId = IpId; + this.IPAddress = IPAddress; + this.isSourceNat = isSourceNat; + } + + public long getZoneId() { + return zoneId; + } + + public long getIpId() { + return IpId; + } + + public String getIPAddress() { + return IPAddress; + } + + public boolean isSourceNat() { + return isSourceNat; + } + } + +} diff --git a/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java b/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java new file mode 100644 index 00000000000..e0d0b1b01be --- /dev/null +++ b/usage/src/com/cloud/usage/parser/LoadBalancerUsageParser.java @@ -0,0 +1,161 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageLoadBalancerPolicyVO; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageLoadBalancerPolicyDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class LoadBalancerUsageParser { + public static final Logger s_logger = Logger.getLogger(LoadBalancerUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageLoadBalancerPolicyDao m_usageLoadBalancerPolicyDao = _locator.getDao(UsageLoadBalancerPolicyDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all LoadBalancerPolicy usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_volume table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageLBs = m_usageLoadBalancerPolicyDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); + + if(usageLBs.isEmpty()){ + s_logger.debug("No load balancer usage events for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + Map lbMap = new HashMap(); + + // loop through all the load balancer policies, create a usage record for each + for (UsageLoadBalancerPolicyVO usageLB : usageLBs) { + long lbId = usageLB.getId(); + String key = ""+lbId; + + lbMap.put(key, new LBInfo(lbId, usageLB.getZoneId())); + + Date lbCreateDate = usageLB.getCreated(); + Date lbDeleteDate = usageLB.getDeleted(); + + if ((lbDeleteDate == null) || lbDeleteDate.after(endDate)) { + lbDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (lbCreateDate.before(startDate)) { + lbCreateDate = startDate; + } + + long currentDuration = (lbDeleteDate.getTime() - lbCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + + updateLBUsageData(usageMap, key, usageLB.getId(), currentDuration); + } + + for (String lbIdKey : usageMap.keySet()) { + Pair sgtimeInfo = usageMap.get(lbIdKey); + long useTime = sgtimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + LBInfo info = lbMap.get(lbIdKey); + createUsageRecord(UsageTypes.LOAD_BALANCER_POLICY, useTime, startDate, endDate, account, info.getId(), info.getZoneId() ); + } + } + + return true; + } + + private static void updateLBUsageData(Map> usageDataMap, String key, long lbId, long duration) { + Pair lbUsageInfo = usageDataMap.get(key); + if (lbUsageInfo == null) { + lbUsageInfo = new Pair(new Long(lbId), new Long(duration)); + } else { + Long runningTime = lbUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + lbUsageInfo = new Pair(lbUsageInfo.first(), runningTime); + } + usageDataMap.put(key, lbUsageInfo); + } + + private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long lbId, long zoneId) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating Volume usage record for load balancer: " + lbId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + // Create the usage record + String usageDesc = "Load Balancing Policy: "+lbId+" usage time"; + + //ToDo: get zone id + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, + new Double(usage), null, null, null, null, lbId, null, startDate, endDate); + m_usageDao.persist(usageRecord); + } + + private static class LBInfo { + private long id; + private long zoneId; + + public LBInfo(long id, long zoneId) { + this.id = id; + this.zoneId = zoneId; + } + public long getZoneId() { + return zoneId; + } + public long getId() { + return id; + } + } + +} diff --git a/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java b/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java new file mode 100644 index 00000000000..3672fdc95ba --- /dev/null +++ b/usage/src/com/cloud/usage/parser/NetworkOfferingUsageParser.java @@ -0,0 +1,173 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageNetworkOfferingVO; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageNetworkOfferingDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class NetworkOfferingUsageParser { + public static final Logger s_logger = Logger.getLogger(NetworkOfferingUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageNetworkOfferingDao m_usageNetworkOfferingDao = _locator.getDao(UsageNetworkOfferingDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all NetworkOffering usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_volume table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageNOs = m_usageNetworkOfferingDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); + + if(usageNOs.isEmpty()){ + s_logger.debug("No NetworkOffering usage events for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + Map noMap = new HashMap(); + + // loop through all the network offerings, create a usage record for each + for (UsageNetworkOfferingVO usageNO : usageNOs) { + long vmId = usageNO.getVmInstanceId(); + long noId = usageNO.getNetworkOfferingId(); + String key = ""+vmId+"NO"+noId; + + noMap.put(key, new NOInfo(vmId, usageNO.getZoneId(), noId, usageNO.isDefault())); + + Date noCreateDate = usageNO.getCreated(); + Date noDeleteDate = usageNO.getDeleted(); + + if ((noDeleteDate == null) || noDeleteDate.after(endDate)) { + noDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (noCreateDate.before(startDate)) { + noCreateDate = startDate; + } + + long currentDuration = (noDeleteDate.getTime() - noCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + + updateNOUsageData(usageMap, key, usageNO.getVmInstanceId(), currentDuration); + } + + for (String noIdKey : usageMap.keySet()) { + Pair notimeInfo = usageMap.get(noIdKey); + long useTime = notimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + NOInfo info = noMap.get(noIdKey); + createUsageRecord(UsageTypes.NETWORK_OFFERING, useTime, startDate, endDate, account, info.getVmId(), info.getNOId(), info.getZoneId(), info.isDefault()); + } + } + + return true; + } + + private static void updateNOUsageData(Map> usageDataMap, String key, long vmId, long duration) { + Pair noUsageInfo = usageDataMap.get(key); + if (noUsageInfo == null) { + noUsageInfo = new Pair(new Long(vmId), new Long(duration)); + } else { + Long runningTime = noUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + noUsageInfo = new Pair(noUsageInfo.first(), runningTime); + } + usageDataMap.put(key, noUsageInfo); + } + + private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long vmId, long noId, long zoneId, boolean isDefault) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating network offering:" + noId + " usage record for Vm : " + vmId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + // Create the usage record + String usageDesc = "Network offering:" + noId + " for Vm : " + vmId + " usage time"; + + long defaultNic = (isDefault) ? 1 : 0; + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, + new Double(usage), vmId, null, noId, null, defaultNic, null, startDate, endDate); + m_usageDao.persist(usageRecord); + } + + private static class NOInfo { + private long vmId; + private long zoneId; + private long noId; + private boolean isDefault; + + public NOInfo(long vmId, long zoneId, long noId, boolean isDefault) { + this.vmId = vmId; + this.zoneId = zoneId; + this.noId = noId; + this.isDefault = isDefault; + } + public long getZoneId() { + return zoneId; + } + public long getVmId() { + return vmId; + } + public long getNOId() { + return noId; + } + + public boolean isDefault(){ + return isDefault; + } + } + +} diff --git a/usage/src/com/cloud/usage/parser/NetworkUsageParser.java b/usage/src/com/cloud/usage/parser/NetworkUsageParser.java new file mode 100644 index 00000000000..fa9c0375e3d --- /dev/null +++ b/usage/src/com/cloud/usage/parser/NetworkUsageParser.java @@ -0,0 +1,167 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageNetworkVO; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageNetworkDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; +import com.cloud.utils.db.SearchCriteria; + +public class NetworkUsageParser { +public static final Logger s_logger = Logger.getLogger(NetworkUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageNetworkDao m_usageNetworkDao = _locator.getDao(UsageNetworkDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all Network usage events for account: " + account.getId()); + } + + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_network table for all entries for userId with + // event_date in the given range + SearchCriteria sc = m_usageNetworkDao.createSearchCriteria(); + sc.addAnd("accountId", SearchCriteria.Op.EQ, account.getId()); + sc.addAnd("eventTimeMillis", SearchCriteria.Op.BETWEEN, startDate.getTime(), endDate.getTime()); + List usageNetworkVOs = m_usageNetworkDao.search(sc, null); + + Map networkUsageByZone = new HashMap(); + + // Calculate the total bytes since last parsing + for (UsageNetworkVO usageNetwork : usageNetworkVOs) { + long zoneId = usageNetwork.getZoneId(); + String key = ""+zoneId; + if(usageNetwork.getHostId() != 0){ + key += "-Host"+usageNetwork.getHostId(); + } + NetworkInfo networkInfo = networkUsageByZone.get(key); + + long bytesSent = usageNetwork.getBytesSent(); + long bytesReceived = usageNetwork.getBytesReceived(); + if (networkInfo != null) { + bytesSent += networkInfo.getBytesSent(); + bytesReceived += networkInfo.getBytesRcvd(); + } + + networkUsageByZone.put(key, new NetworkInfo(zoneId, usageNetwork.getHostId(), usageNetwork.getHostType(), usageNetwork.getNetworkId(), bytesSent, bytesReceived)); + } + + for (String key : networkUsageByZone.keySet()) { + NetworkInfo networkInfo = networkUsageByZone.get(key); + long totalBytesSent = networkInfo.getBytesSent(); + long totalBytesReceived = networkInfo.getBytesRcvd(); + + if ((totalBytesSent > 0L) || (totalBytesReceived > 0L)) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating usage record, total bytes sent:" + totalBytesSent + ", total bytes received: " + totalBytesReceived + " for account: " + + account.getId() + " in availability zone " + networkInfo.getZoneId() + ", start: " + startDate + ", end: " + endDate); + } + + Long hostId = null; + + // Create the usage record for bytes sent + String usageDesc = "network bytes sent"; + if(networkInfo.getHostId() != 0){ + hostId = networkInfo.getHostId(); + usageDesc += " for Host: "+networkInfo.getHostId(); + } + UsageVO usageRecord = new UsageVO(networkInfo.getZoneId(), account.getId(), account.getDomainId(), usageDesc, totalBytesSent + " bytes sent", + UsageTypes.NETWORK_BYTES_SENT, new Double(totalBytesSent), hostId, networkInfo.getHostType(), networkInfo.getNetworkId(), startDate, endDate); + m_usageDao.persist(usageRecord); + + // Create the usage record for bytes received + usageDesc = "network bytes received"; + if(networkInfo.getHostId() != 0){ + usageDesc += " for Host: "+networkInfo.getHostId(); + } + usageRecord = new UsageVO(networkInfo.getZoneId(), account.getId(), account.getDomainId(), usageDesc, totalBytesReceived + " bytes received", + UsageTypes.NETWORK_BYTES_RECEIVED, new Double(totalBytesReceived), hostId, networkInfo.getHostType(), networkInfo.getNetworkId(), startDate, endDate); + m_usageDao.persist(usageRecord); + } else { + // Don't charge anything if there were zero bytes processed + if (s_logger.isDebugEnabled()) { + s_logger.debug("No usage record (0 bytes used) generated for account: " + account.getId()); + } + } + } + + return true; + } + + private static class NetworkInfo { + private long zoneId; + private long hostId; + private String hostType; + private Long networkId; + private long bytesSent; + private long bytesRcvd; + + public NetworkInfo(long zoneId, long hostId, String hostType, Long networkId, long bytesSent, long bytesRcvd) { + this.zoneId = zoneId; + this.hostId = hostId; + this.hostType = hostType; + this.networkId = networkId; + this.bytesSent = bytesSent; + this.bytesRcvd = bytesRcvd; + } + + public long getZoneId() { + return zoneId; + } + + public long getHostId() { + return hostId; + } + + public Long getNetworkId() { + return networkId; + } + + public long getBytesSent() { + return bytesSent; + } + + public long getBytesRcvd() { + return bytesRcvd; + } + + public String getHostType(){ + return hostType; + } + + } +} diff --git a/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java b/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java new file mode 100644 index 00000000000..575b39ce675 --- /dev/null +++ b/usage/src/com/cloud/usage/parser/PortForwardingUsageParser.java @@ -0,0 +1,161 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsagePortForwardingRuleVO; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsagePortForwardingRuleDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class PortForwardingUsageParser { + public static final Logger s_logger = Logger.getLogger(PortForwardingUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsagePortForwardingRuleDao m_usagePFRuleDao = _locator.getDao(UsagePortForwardingRuleDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all PortForwardingRule usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_volume table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usagePFs = m_usagePFRuleDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); + + if(usagePFs.isEmpty()){ + s_logger.debug("No port forwarding usage events for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + Map pfMap = new HashMap(); + + // loop through all the port forwarding rule, create a usage record for each + for (UsagePortForwardingRuleVO usagePF : usagePFs) { + long pfId = usagePF.getId(); + String key = ""+pfId; + + pfMap.put(key, new PFInfo(pfId, usagePF.getZoneId())); + + Date pfCreateDate = usagePF.getCreated(); + Date pfDeleteDate = usagePF.getDeleted(); + + if ((pfDeleteDate == null) || pfDeleteDate.after(endDate)) { + pfDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (pfCreateDate.before(startDate)) { + pfCreateDate = startDate; + } + + long currentDuration = (pfDeleteDate.getTime() - pfCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + + updatePFUsageData(usageMap, key, usagePF.getId(), currentDuration); + } + + for (String pfIdKey : usageMap.keySet()) { + Pair sgtimeInfo = usageMap.get(pfIdKey); + long useTime = sgtimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + PFInfo info = pfMap.get(pfIdKey); + createUsageRecord(UsageTypes.PORT_FORWARDING_RULE, useTime, startDate, endDate, account, info.getId(), info.getZoneId() ); + } + } + + return true; + } + + private static void updatePFUsageData(Map> usageDataMap, String key, long pfId, long duration) { + Pair pfUsageInfo = usageDataMap.get(key); + if (pfUsageInfo == null) { + pfUsageInfo = new Pair(new Long(pfId), new Long(duration)); + } else { + Long runningTime = pfUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + pfUsageInfo = new Pair(pfUsageInfo.first(), runningTime); + } + usageDataMap.put(key, pfUsageInfo); + } + + private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long pfId, long zoneId) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating usage record for port forwarding rule: " + pfId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + // Create the usage record + String usageDesc = "Port Forwarding Rule: "+pfId+" usage time"; + + //ToDo: get zone id + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, + new Double(usage), null, null, null, null, pfId, null, startDate, endDate); + m_usageDao.persist(usageRecord); + } + + private static class PFInfo { + private long id; + private long zoneId; + + public PFInfo(long id, long zoneId) { + this.id = id; + this.zoneId = zoneId; + } + public long getZoneId() { + return zoneId; + } + public long getId() { + return id; + } + } + +} diff --git a/usage/src/com/cloud/usage/parser/StorageUsageParser.java b/usage/src/com/cloud/usage/parser/StorageUsageParser.java new file mode 100644 index 00000000000..6faf56fb869 --- /dev/null +++ b/usage/src/com/cloud/usage/parser/StorageUsageParser.java @@ -0,0 +1,207 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.StorageTypes; +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageStorageVO; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageStorageDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class StorageUsageParser { + public static final Logger s_logger = Logger.getLogger(StorageUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageStorageDao m_usageStorageDao = _locator.getDao(UsageStorageDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all Storage usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_volume table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageUsageStorages = m_usageStorageDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); + + if(usageUsageStorages.isEmpty()){ + s_logger.debug("No Storage usage events for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + + Map storageMap = new HashMap(); + + // loop through all the usage volumes, create a usage record for each + for (UsageStorageVO usageStorage : usageUsageStorages) { + long storageId = usageStorage.getId(); + int storage_type = usageStorage.getStorageType(); + long size = usageStorage.getSize(); + long zoneId = usageStorage.getZoneId(); + Long sourceId = usageStorage.getSourceId(); + + String key = ""+storageId+"Z"+zoneId+"T"+storage_type; + + // store the info in the storage map + storageMap.put(key, new StorageInfo(zoneId, storageId, storage_type, sourceId, size)); + + Date storageCreateDate = usageStorage.getCreated(); + Date storageDeleteDate = usageStorage.getDeleted(); + + if ((storageDeleteDate == null) || storageDeleteDate.after(endDate)) { + storageDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (storageCreateDate.before(startDate)) { + storageCreateDate = startDate; + } + + long currentDuration = (storageDeleteDate.getTime() - storageCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + updateStorageUsageData(usageMap, key, usageStorage.getId(), currentDuration); + } + + for (String storageIdKey : usageMap.keySet()) { + Pair storagetimeInfo = usageMap.get(storageIdKey); + long useTime = storagetimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + StorageInfo info = storageMap.get(storageIdKey); + createUsageRecord(info.getZoneId(), info.getStorageType(), useTime, startDate, endDate, account, info.getStorageId(), info.getSourceId(), info.getSize()); + } + } + + return true; + } + + private static void updateStorageUsageData(Map> usageDataMap, String key, long storageId, long duration) { + Pair volUsageInfo = usageDataMap.get(key); + if (volUsageInfo == null) { + volUsageInfo = new Pair(new Long(storageId), new Long(duration)); + } else { + Long runningTime = volUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + volUsageInfo = new Pair(volUsageInfo.first(), runningTime); + } + usageDataMap.put(key, volUsageInfo); + } + + private static void createUsageRecord(long zoneId, int type, long runningTime, Date startDate, Date endDate, AccountVO account, long storageId, Long sourceId, long size) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating Storage usage record for type: "+ type + " with id: " + storageId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + String usageDesc = ""; + Long tmplSourceId = null; + + int usage_type = 0; + switch(type){ + case StorageTypes.TEMPLATE: + usage_type = UsageTypes.TEMPLATE; + usageDesc += "Template "; + tmplSourceId = sourceId; + break; + case StorageTypes.ISO: + usage_type = UsageTypes.ISO; + usageDesc += "ISO "; + break; + case StorageTypes.SNAPSHOT: + usage_type = UsageTypes.SNAPSHOT; + usageDesc += "Snapshot "; + break; + } + // Create the usage record + usageDesc += "Id:"+storageId+" Size:"+size; + + //ToDo: get zone id + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", usage_type, + new Double(usage), null, null, null, tmplSourceId, storageId, size, startDate, endDate); + m_usageDao.persist(usageRecord); + } + + private static class StorageInfo { + private long zoneId; + private long storageId; + private int storageType; + private Long sourceId; + private long size; + + public StorageInfo(long zoneId, long storageId, int storageType, Long sourceId, long size) { + this.zoneId = zoneId; + this.storageId = storageId; + this.storageType = storageType; + this.sourceId = sourceId; + this.size = size; + } + + public long getZoneId() { + return zoneId; + } + + public long getStorageId() { + return storageId; + } + + public int getStorageType() { + return storageType; + } + + public long getSourceId() { + return sourceId; + } + + + public long getSize() { + return size; + } + } +} diff --git a/usage/src/com/cloud/usage/parser/UsageParser.java b/usage/src/com/cloud/usage/parser/UsageParser.java new file mode 100644 index 00000000000..b628a4e39c6 --- /dev/null +++ b/usage/src/com/cloud/usage/parser/UsageParser.java @@ -0,0 +1,37 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.util.Date; + +import org.apache.log4j.Logger; + +public abstract class UsageParser implements Runnable { + public static final Logger s_logger = Logger.getLogger(UsageParser.class.getName()); + + public void run() { + try { + parse(null); + } catch (Exception e) { + s_logger.warn("Error while parsing usage events", e); + } + } + + public abstract void parse(Date endDate); +} diff --git a/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java b/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java new file mode 100644 index 00000000000..93bb324d3be --- /dev/null +++ b/usage/src/com/cloud/usage/parser/VMInstanceUsageParser.java @@ -0,0 +1,202 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVMInstanceVO; +import com.cloud.usage.UsageVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageVMInstanceDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class VMInstanceUsageParser { + public static final Logger s_logger = Logger.getLogger(VMInstanceUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageVMInstanceDao m_usageInstanceDao = _locator.getDao(UsageVMInstanceDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all VMInstance usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_vm_instance table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageInstances = m_usageInstanceDao.getUsageRecords(account.getId(), startDate, endDate); +//ToDo: Add domainID for getting usage records + + // This map has both the running time *and* the usage amount. + Map> usageVMUptimeMap = new HashMap>(); + Map> allocatedVMMap = new HashMap>(); + + Map vmServiceOfferingMap = new HashMap(); + + // loop through all the usage instances, create a usage record for each + for (UsageVMInstanceVO usageInstance : usageInstances) { + long vmId = usageInstance.getVmInstanceId(); + long soId = usageInstance.getSerivceOfferingId(); + long zoneId = usageInstance.getZoneId(); + long tId = usageInstance.getTemplateId(); + int usageType = usageInstance.getUsageType(); + String key = vmId + "-" + soId + "-" + usageType; + + // store the info in the service offering map + vmServiceOfferingMap.put(key, new VMInfo(vmId, zoneId, soId, tId, usageInstance.getHypervisorType())); + + Date vmStartDate = usageInstance.getStartDate(); + Date vmEndDate = usageInstance.getEndDate(); + + if ((vmEndDate == null) || vmEndDate.after(endDate)) { + vmEndDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (vmStartDate.before(startDate)) { + vmStartDate = startDate; + } + + long currentDuration = (vmEndDate.getTime() - vmStartDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + switch (usageType) { + case UsageTypes.ALLOCATED_VM: + updateVmUsageData(allocatedVMMap, key, usageInstance.getVmName(), currentDuration); + break; + case UsageTypes.RUNNING_VM: + updateVmUsageData(usageVMUptimeMap, key, usageInstance.getVmName(), currentDuration); + break; + } + } + + for (String vmIdKey : usageVMUptimeMap.keySet()) { + Pair vmUptimeInfo = usageVMUptimeMap.get(vmIdKey); + long runningTime = vmUptimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (runningTime > 0L) { + VMInfo info = vmServiceOfferingMap.get(vmIdKey); + createUsageRecord(UsageTypes.RUNNING_VM, runningTime, startDate, endDate, account, info.getVirtualMachineId(), vmUptimeInfo.first(), info.getZoneId(), + info.getServiceOfferingId(), info.getTemplateId(), info.getHypervisorType()); + } + } + + for (String vmIdKey : allocatedVMMap.keySet()) { + Pair vmAllocInfo = allocatedVMMap.get(vmIdKey); + long allocatedTime = vmAllocInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (allocatedTime > 0L) { + VMInfo info = vmServiceOfferingMap.get(vmIdKey); + createUsageRecord(UsageTypes.ALLOCATED_VM, allocatedTime, startDate, endDate, account, info.getVirtualMachineId(), vmAllocInfo.first(), info.getZoneId(), + info.getServiceOfferingId(), info.getTemplateId(), info.getHypervisorType()); + } + } + + return true; + } + + private static void updateVmUsageData(Map> usageDataMap, String key, String vmName, long duration) { + Pair vmUsageInfo = usageDataMap.get(key); + if (vmUsageInfo == null) { + vmUsageInfo = new Pair(vmName, new Long(duration)); + } else { + Long runningTime = vmUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + vmUsageInfo = new Pair(vmUsageInfo.first(), runningTime); + } + usageDataMap.put(key, vmUsageInfo); + } + + private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long vmId, String vmName, long zoneId, long serviceOfferingId, long templateId, String hypervisorType) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating VM usage record for vm: " + vmName + ", type: " + type + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + // Create the usage record + String usageDesc = vmName; + if (type == UsageTypes.ALLOCATED_VM) { + usageDesc += " allocated"; + } else { + usageDesc += " running time"; + } + usageDesc += " (ServiceOffering: " + serviceOfferingId + ") (Template: " + templateId + ")"; + UsageVO usageRecord = new UsageVO(Long.valueOf(zoneId), account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, + new Double(usage), Long.valueOf(vmId), vmName, Long.valueOf(serviceOfferingId), Long.valueOf(templateId), Long.valueOf(vmId), startDate, endDate, hypervisorType); + m_usageDao.persist(usageRecord); + } + + private static class VMInfo { + private long virtualMachineId; + private long zoneId; + private long serviceOfferingId; + private long templateId; + private String hypervisorType; + + public VMInfo(long vmId, long zId, long soId, long tId, String hypervisorType) { + virtualMachineId = vmId; + zoneId = zId; + serviceOfferingId = soId; + templateId = tId; + this.hypervisorType = hypervisorType; + } + + public long getZoneId() { + return zoneId; + } + public long getVirtualMachineId() { + return virtualMachineId; + } + public long getServiceOfferingId() { + return serviceOfferingId; + } + public long getTemplateId() { + return templateId; + } + private String getHypervisorType(){ + return hypervisorType; + } + } +} diff --git a/usage/src/com/cloud/usage/parser/VolumeUsageParser.java b/usage/src/com/cloud/usage/parser/VolumeUsageParser.java new file mode 100644 index 00000000000..85f3005a266 --- /dev/null +++ b/usage/src/com/cloud/usage/parser/VolumeUsageParser.java @@ -0,0 +1,185 @@ +/** + * Copyright (C) 2011 Citrix Systems, Inc. All rights reserved + * + * + * This software is licensed under the GNU General Public License v3 or later. + * + * It is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +package com.cloud.usage.parser; + +import java.text.DecimalFormat; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.Logger; + +import com.cloud.usage.UsageServer; +import com.cloud.usage.UsageTypes; +import com.cloud.usage.UsageVO; +import com.cloud.usage.UsageVolumeVO; +import com.cloud.usage.dao.UsageDao; +import com.cloud.usage.dao.UsageVolumeDao; +import com.cloud.user.AccountVO; +import com.cloud.utils.Pair; +import com.cloud.utils.component.ComponentLocator; + +public class VolumeUsageParser { + public static final Logger s_logger = Logger.getLogger(VolumeUsageParser.class.getName()); + + private static ComponentLocator _locator = ComponentLocator.getLocator(UsageServer.Name, "usage-components.xml", "log4j-cloud_usage"); + private static UsageDao m_usageDao = _locator.getDao(UsageDao.class); + private static UsageVolumeDao m_usageVolumeDao = _locator.getDao(UsageVolumeDao.class); + + public static boolean parse(AccountVO account, Date startDate, Date endDate) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Parsing all Volume usage events for account: " + account.getId()); + } + if ((endDate == null) || endDate.after(new Date())) { + endDate = new Date(); + } + + // - query usage_volume table with the following criteria: + // - look for an entry for accountId with start date in the given range + // - look for an entry for accountId with end date in the given range + // - look for an entry for accountId with end date null (currently running vm or owned IP) + // - look for an entry for accountId with start date before given range *and* end date after given range + List usageUsageVols = m_usageVolumeDao.getUsageRecords(account.getId(), account.getDomainId(), startDate, endDate, false, 0); + + if(usageUsageVols.isEmpty()){ + s_logger.debug("No volume usage events for this period"); + return true; + } + + // This map has both the running time *and* the usage amount. + Map> usageMap = new HashMap>(); + + Map diskOfferingMap = new HashMap(); + + // loop through all the usage volumes, create a usage record for each + for (UsageVolumeVO usageVol : usageUsageVols) { + long volId = usageVol.getId(); + Long doId = usageVol.getDiskOfferingId(); + long zoneId = usageVol.getZoneId(); + Long templateId = usageVol.getTemplateId(); + long size = usageVol.getSize(); + String key = ""+volId; + + diskOfferingMap.put(key, new VolInfo(volId, zoneId, doId, templateId, size)); + + Date volCreateDate = usageVol.getCreated(); + Date volDeleteDate = usageVol.getDeleted(); + + if ((volDeleteDate == null) || volDeleteDate.after(endDate)) { + volDeleteDate = endDate; + } + + // clip the start date to the beginning of our aggregation range if the vm has been running for a while + if (volCreateDate.before(startDate)) { + volCreateDate = startDate; + } + + long currentDuration = (volDeleteDate.getTime() - volCreateDate.getTime()) + 1; // make sure this is an inclusive check for milliseconds (i.e. use n - m + 1 to find total number of millis to charge) + + + updateVolUsageData(usageMap, key, usageVol.getId(), currentDuration); + } + + for (String volIdKey : usageMap.keySet()) { + Pair voltimeInfo = usageMap.get(volIdKey); + long useTime = voltimeInfo.second().longValue(); + + // Only create a usage record if we have a runningTime of bigger than zero. + if (useTime > 0L) { + VolInfo info = diskOfferingMap.get(volIdKey); + createUsageRecord(UsageTypes.VOLUME, useTime, startDate, endDate, account, info.getVolumeId(), info.getZoneId(), info.getDiskOfferingId(), info.getTemplateId(), info.getSize()); + } + } + + return true; + } + + private static void updateVolUsageData(Map> usageDataMap, String key, long volId, long duration) { + Pair volUsageInfo = usageDataMap.get(key); + if (volUsageInfo == null) { + volUsageInfo = new Pair(new Long(volId), new Long(duration)); + } else { + Long runningTime = volUsageInfo.second(); + runningTime = new Long(runningTime.longValue() + duration); + volUsageInfo = new Pair(volUsageInfo.first(), runningTime); + } + usageDataMap.put(key, volUsageInfo); + } + + private static void createUsageRecord(int type, long runningTime, Date startDate, Date endDate, AccountVO account, long volId, long zoneId, Long doId, Long templateId, long size) { + // Our smallest increment is hourly for now + if (s_logger.isDebugEnabled()) { + s_logger.debug("Total running time " + runningTime + "ms"); + } + + float usage = runningTime / 1000f / 60f / 60f; + + DecimalFormat dFormat = new DecimalFormat("#.######"); + String usageDisplay = dFormat.format(usage); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating Volume usage record for vol: " + volId + ", usage: " + usageDisplay + ", startDate: " + startDate + ", endDate: " + endDate + ", for account: " + account.getId()); + } + + // Create the usage record + String usageDesc = "Volume Id: "+volId+" usage time"; + + if(templateId != null){ + usageDesc += " (Template: " +templateId+ ")"; + } else if(doId != null){ + usageDesc += " (DiskOffering: " +doId+ ")"; + } + + UsageVO usageRecord = new UsageVO(zoneId, account.getId(), account.getDomainId(), usageDesc, usageDisplay + " Hrs", type, + new Double(usage), null, null, doId, templateId, volId, size, startDate, endDate); + m_usageDao.persist(usageRecord); + } + + private static class VolInfo { + private long volId; + private long zoneId; + private Long diskOfferingId; + private Long templateId; + private long size; + + public VolInfo(long volId, long zoneId, Long diskOfferingId, Long templateId, long size) { + this.volId = volId; + this.zoneId = zoneId; + this.diskOfferingId = diskOfferingId; + this.templateId = templateId; + this.size = size; + } + public long getZoneId() { + return zoneId; + } + public long getVolumeId() { + return volId; + } + public Long getDiskOfferingId() { + return diskOfferingId; + } + public Long getTemplateId() { + return templateId; + } + public long getSize() { + return size; + } + } +} diff --git a/utils/conf/db.properties b/utils/conf/db.properties index e90b88f702b..c5346c6ae98 100644 --- a/utils/conf/db.properties +++ b/utils/conf/db.properties @@ -24,7 +24,7 @@ db.cloud.removeAbandoned=false db.cloud.removeAbandonedTimeout=300 db.cloud.logAbandoned=true db.cloud.poolPreparedStatements=false -db.cloud.url.params=prepStmtCacheSize=517&cachePrepStmts=true +db.cloud.url.params=prepStmtCacheSize=517&cachePrepStmts=true&prepStmtCacheSqlLimit=4096 # usage database settings db.usage.username=cloud diff --git a/utils/src/com/cloud/utils/db/Transaction.java b/utils/src/com/cloud/utils/db/Transaction.java index 18ea3e97be5..a7b4aa45b24 100755 --- a/utils/src/com/cloud/utils/db/Transaction.java +++ b/utils/src/com/cloud/utils/db/Transaction.java @@ -992,11 +992,18 @@ public class Transaction { final boolean cloudLogAbandoned = Boolean.parseBoolean(dbProps.getProperty("db.cloud.logAbandoned")); final boolean cloudPoolPreparedStatements = Boolean.parseBoolean(dbProps.getProperty("db.cloud.poolPreparedStatements")); final String url = dbProps.getProperty("db.cloud.url.params"); + final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL")); + if(useSSL){ + System.setProperty("javax.net.ssl.keyStore", dbProps.getProperty("db.cloud.keyStore")); + System.setProperty("javax.net.ssl.keyStorePassword", dbProps.getProperty("db.cloud.keyStorePassword")); + System.setProperty("javax.net.ssl.trustStore", dbProps.getProperty("db.cloud.trustStore")); + System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword")); + } final GenericObjectPool cloudConnectionPool = new GenericObjectPool(null, cloudMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, cloudMaxWait, cloudMaxIdle, cloudTestOnBorrow, false, cloudTimeBtwEvictionRunsMillis, 1, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle); final ConnectionFactory cloudConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://"+cloudHost + ":" + cloudPort + "/" + cloudDbName + - "?autoReconnect="+cloudAutoReconnect + (url != null ? "&" + url : ""), cloudUsername, cloudPassword); + "?autoReconnect="+cloudAutoReconnect + (url != null ? "&" + url : "")+ (useSSL ? "&useSSL=true" : ""), cloudUsername, cloudPassword); final KeyedObjectPoolFactory poolableObjFactory = (cloudPoolPreparedStatements ? new StackKeyedObjectPoolFactory() : null); final PoolableConnectionFactory cloudPoolableConnectionFactory = new PoolableConnectionFactory(cloudConnectionFactory, cloudConnectionPool, poolableObjFactory, cloudValidationQuery, false, false, isolationLevel); diff --git a/utils/src/com/cloud/utils/ssh/SSHKeysHelper.java b/utils/src/com/cloud/utils/ssh/SSHKeysHelper.java index 85eae351976..8f286bb9c2d 100644 --- a/utils/src/com/cloud/utils/ssh/SSHKeysHelper.java +++ b/utils/src/com/cloud/utils/ssh/SSHKeysHelper.java @@ -18,16 +18,16 @@ package com.cloud.utils.ssh; -import com.jcraft.jsch.JSchException; -import com.jcraft.jsch.KeyPair; -import com.jcraft.jsch.JSch; - import java.io.ByteArrayOutputStream; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import org.apache.commons.codec.binary.Base64; +import com.jcraft.jsch.JSch; +import com.jcraft.jsch.JSchException; +import com.jcraft.jsch.KeyPair; + public class SSHKeysHelper { private KeyPair keyPair; @@ -56,6 +56,9 @@ public class SSHKeysHelper { public static String getPublicKeyFingerprint(String publicKey) { String key[] = publicKey.split(" "); + if (key.length < 2) { + throw new RuntimeException("Incorrect public key is passed in"); + } byte[] keyBytes = Base64.decodeBase64(key[1]); MessageDigest md5 = null; diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java index e86b604aa33..95685d5bbf1 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/mo/VirtualMachineMO.java @@ -221,8 +221,25 @@ public class VirtualMachineMO extends BaseMO { String result = _context.getServiceUtil().waitForTask(morTask); if(result.equals("sucess")) { _context.waitForTaskProgressDone(morTask); + + // It seems that even if a power-off task is returned done, VM state may still not be marked, + // wait up to 5 seconds to make sure to avoid race conditioning for immediate following on operations + // that relies on a powered-off VM + long startTick = System.currentTimeMillis(); + while(getPowerState() != VirtualMachinePowerState.poweredOff && System.currentTimeMillis() - startTick < 5000) { + try { + Thread.sleep(1000); + } catch(InterruptedException e) { + } + } return true; } else { + if(getPowerState() == VirtualMachinePowerState.poweredOff) { + // to help deal with possible race-condition + s_logger.info("Current power-off task failed. However, VM has been switched to the state we are expecting for"); + return true; + } + s_logger.error("VMware powerOffVM_Task failed due to " + TaskMO.getTaskFailureInfo(_context, morTask)); } diff --git a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 4e458b4e4f7..da78ef5ef4c 100755 --- a/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -92,11 +92,13 @@ public class VmwareHelper { connectInfo.setStartConnected(connectOnStart); nic.setAddressType("Manual"); nic.setConnectable(connectInfo); - nic.setMacAddress(macAddress); + nic.setMacAddress(macAddress); + +/* nic.setControllerKey(vmMo.getPCIDeviceControllerKey()); - if(deviceNumber < 0) - deviceNumber = vmMo.getNextPCIDeviceNumber(); + deviceNumber = vmMo.getNextPCIDeviceNumber(); +*/ nic.setUnitNumber(deviceNumber); nic.setKey(-contextNumber); return nic; @@ -106,9 +108,6 @@ public class VmwareHelper { public static VirtualDevice prepareDiskDevice(VirtualMachineMO vmMo, int controllerKey, String vmdkDatastorePath, int sizeInMb, ManagedObjectReference morDs, int deviceNumber, int contextNumber) throws Exception { - if(controllerKey < 0) - controllerKey = vmMo.getIDEDeviceControllerKey(); - VirtualDisk disk = new VirtualDisk(); VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); @@ -119,9 +118,12 @@ public class VmwareHelper { backingInfo.setFileName(vmdkDatastorePath); disk.setBacking(backingInfo); + if(controllerKey < 0) + controllerKey = vmMo.getIDEDeviceControllerKey(); if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); - disk.setControllerKey(controllerKey); + disk.setControllerKey(controllerKey); + disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); disk.setCapacityInKB(sizeInMb*1024); @@ -187,9 +189,9 @@ public class VmwareHelper { if(controllerKey < 0) controllerKey = vmMo.getIDEDeviceControllerKey(); disk.setControllerKey(controllerKey); - if(deviceNumber < 0) - deviceNumber = vmMo.getNextDeviceNumber(controllerKey); + deviceNumber = vmMo.getNextDeviceNumber(controllerKey); + disk.setKey(-contextNumber); disk.setUnitNumber(deviceNumber); disk.setCapacityInKB(sizeInMb*1024); @@ -208,9 +210,6 @@ public class VmwareHelper { assert(vmdkDatastorePathChain != null); assert(vmdkDatastorePathChain.length >= 1); - if(controllerKey < 0) - controllerKey = vmMo.getIDEDeviceControllerKey(); - VirtualDisk disk = new VirtualDisk(); VirtualDiskFlatVer2BackingInfo backingInfo = new VirtualDiskFlatVer2BackingInfo(); @@ -227,6 +226,8 @@ public class VmwareHelper { disk.setBacking(backingInfo); + if(controllerKey < 0) + controllerKey = vmMo.getIDEDeviceControllerKey(); if(deviceNumber < 0) deviceNumber = vmMo.getNextDeviceNumber(controllerKey); @@ -267,12 +268,13 @@ public class VmwareHelper { VirtualCdrom cdRom = (VirtualCdrom )vmMo.getIsoDevice(); if(cdRom == null) { newCdRom = true; - cdRom = new VirtualCdrom(); + cdRom = new VirtualCdrom(); + assert(vmMo.getIDEDeviceControllerKey() >= 0); cdRom.setControllerKey(vmMo.getIDEDeviceControllerKey()); - if(deviceNumber < 0) deviceNumber = vmMo.getNextIDEDeviceNumber(); + cdRom.setUnitNumber(deviceNumber); cdRom.setKey(-contextNumber); } diff --git a/wscript_build b/wscript_build index a08d574cb31..844d25a645c 100644 --- a/wscript_build +++ b/wscript_build @@ -211,6 +211,8 @@ def build_dirs_symlinks (): ("${AGENTLIBDIR}/images", '${CPLIBDIR}/images'), ("${AGENTLIBDIR}/js", '${CPLIBDIR}/js'), ("${AGENTLIBDIR}/ui", '${CPLIBDIR}/ui'), + ("${MSCONF}/server.xml", '${MSCONF}/server-nonssl.xml'), + ("${MSCONF}/tomcat6.conf", '${MSCONF}/tomcat6-nonssl.conf'), ] for lnk,dst in symlinks: bld.symlink_as(lnk,Utils.subst_vars(dst,bld.env))