From 14ae76781ea5fc7edb770854ad9c040cf2ab812a Mon Sep 17 00:00:00 2001 From: "Manuel Amador (Rudd-O)" Date: Wed, 11 Aug 2010 16:02:57 -0700 Subject: [PATCH] Committing Kris' big merge --- .gitignore | 12 + HACKING | 383 +- INSTALL | 155 + README | 191 +- README.html | 10555 ++++++++++++++++ agent/libexec/agent-runner.in | 4 +- .../computing/LibvirtComputingResource.java | 100 +- .../src/com/cloud/hypervisor/Hypervisor.java | 0 .../src/com/cloud/vm/VmCharacteristics.java | 47 +- build.xml | 42 +- build/build-cloud.xml | 9 +- build/package.xml | 7 +- cloud.spec | 41 +- console-proxy/libexec/console-proxy-runner.in | 4 +- core/.classpath | 21 + .../agent/api/BackupSnapshotCommand.java | 8 +- .../agent/api/storage/CreateCommand.java | 10 +- .../vmware/resource/VmwareResource.java | 122 + .../xen/resource/CitrixResourceBase.java | 84 +- .../network/ExteralIpAddressAllocator.java | 10 +- .../com/cloud/server/ManagementServer.java | 21 +- .../src/com/cloud/storage/DiskOfferingVO.java | 6 +- .../src/com/cloud/storage/StorageManager.java | 13 +- .../cloud/storage/dao/DiskOfferingDao.java | 4 +- .../storage/dao/DiskOfferingDaoImpl.java | 12 + .../cloud/storage/dao/VMTemplateDaoImpl.java | 1 - .../cloud/storage/dao/VMTemplateHostDao.java | 2 + .../storage/dao/VMTemplateHostDaoImpl.java | 7 + core/src/com/cloud/vm/UserVmVO.java | 18 + core/src/com/cloud/vm/VMInstanceVO.java | 21 +- core/test/com/cloud/vmware/TestVMWare.java | 651 + debian/rules | 2 +- python/lib/cloud_utils.py | 6 +- scripts/.project | 4 +- scripts/.pydevproject | 7 + scripts/storage/qcow2/createtmplt.sh | 7 +- scripts/storage/qcow2/managesnapshot.sh | 17 +- scripts/vm/hypervisor/xenserver/launch_hb.sh | 2 +- .../vm/hypervisor/xenserver/setupxenserver.sh | 5 + scripts/vm/network/vnet/modifyvlan.sh | 9 +- .../cloud/api/commands/CreateTemplateCmd.java | 8 - .../cloud/api/commands/CreateVolumeCmd.java | 47 +- .../com/cloud/api/commands/DeployVMCmd.java | 18 +- .../cloud/api/commands/ListTemplatesCmd.java | 4 + .../com/cloud/api/commands/RecoverVMCmd.java | 5 +- .../api/commands/RegisterTemplateCmd.java | 3 +- .../cloud/api/commands/UpdateTemplateCmd.java | 1 + .../UpdateTemplateOrIsoPermissionsCmd.java | 6 + .../CreatePrivateTemplateExecutor.java | 1 + .../CreatePrivateTemplateResultObject.java | 11 + .../async/executor/DeployVMExecutor.java | 2 +- .../cloud/async/executor/DeployVMParam.java | 10 +- .../executor/VolumeOperationExecutor.java | 2 +- .../async/executor/VolumeOperationParam.java | 10 +- .../configuration/ConfigurationManager.java | 1 + .../ConfigurationManagerImpl.java | 19 +- .../consoleproxy/ConsoleProxyManagerImpl.java | 7 +- .../discoverer/VmwareServerDiscoverer.java | 40 + .../xen/discoverer/XcpServerDiscoverer.java | 6 +- .../com/cloud/network/NetworkManagerImpl.java | 7 +- .../cloud/server/ConfigurationServerImpl.java | 3 +- .../cloud/server/ManagementServerImpl.java | 82 +- .../com/cloud/storage/StorageManagerImpl.java | 96 +- .../SecondaryStorageManagerImpl.java | 2 +- .../storage/snapshot/SnapshotManagerImpl.java | 29 +- server/src/com/cloud/vm/UserVmManager.java | 13 +- .../src/com/cloud/vm/UserVmManagerImpl.java | 204 +- ui/content/tab_accounts.html | 2 + ui/content/tab_configuration.html | 6 +- ui/content/tab_events.html | 2 + ui/content/tab_hosts.html | 2 + ui/content/tab_instances.html | 16 +- ui/content/tab_networking.html | 2 +- ui/content/tab_storage.html | 2 + ui/content/tab_templates.html | 6 +- ui/index.html | 8 - .../tab_domains.html => jsp/tab_domains.jsp} | 296 +- ui/jsp/test.jsp | 8 - ui/resources/resource.properties | 5 + ui/resources/resource_zh.properties | 5 + ui/scripts/cloud.core.accounts.js | 598 +- ui/scripts/cloud.core.configuration.js | 3396 +++-- ui/scripts/cloud.core.domains.js | 744 +- ui/scripts/cloud.core.events.js | 212 +- ui/scripts/cloud.core.hosts.js | 1354 +- ui/scripts/cloud.core.init.js | 32 +- ui/scripts/cloud.core.instances.js | 5136 ++++---- ui/scripts/cloud.core.storage.js | 1047 +- ui/scripts/cloud.core.templates.js | 2109 ++- utils/src/com/cloud/utils/db/Merovingian.java | 30 +- wscript | 14 +- wscript_build | 2 +- 92 files changed, 20362 insertions(+), 7929 deletions(-) create mode 100644 INSTALL create mode 100644 README.html rename {core => api}/src/com/cloud/hypervisor/Hypervisor.java (100%) rename {core => api}/src/com/cloud/vm/VmCharacteristics.java (56%) create mode 100644 core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java create mode 100644 core/test/com/cloud/vmware/TestVMWare.java create mode 100644 scripts/.pydevproject create mode 100644 server/src/com/cloud/hypervisor/vmware/discoverer/VmwareServerDiscoverer.java rename ui/{content/tab_domains.html => jsp/tab_domains.jsp} (87%) delete mode 100644 ui/jsp/test.jsp create mode 100644 ui/resources/resource.properties create mode 100644 ui/resources/resource_zh.properties diff --git a/.gitignore b/.gitignore index e69de29bb2d..18b6d4a9629 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1,12 @@ +build/build.number +bin +cloudstack-proprietary +.lock-wscript +artifacts +.waf-* +waf-* +target +override +premium +.metadata +dist diff --git a/HACKING b/HACKING index 140ae446134..b6a16c3ef5e 100644 --- a/HACKING +++ b/HACKING @@ -165,11 +165,14 @@ independently from the install directory: from happening. Or resort to the override method discussed above (search for "override" in this document). - 2) If you haven't done so yet, set the Console Proxy up: + 2) If you haven't done so yet, set the Agent up: - run $BINDIR/cloud-setup-agent - 3) Execute $LIBEXECDIR/agent-runner as root + 3) Execute ./waf run_agent as root + + this will launch sudo and require your root password unless you have + set sudo up not to ask for it - Console Proxy (Linux-only): @@ -187,7 +190,183 @@ independently from the install directory: - run $BINDIR/cloud-setup-console-proxy - 3) Execute $LIBEXECDIR/console-proxy-runner as root + 3) Execute ./waf run_console_proxy + + this will launch sudo and require your root password unless you have + set sudo up not to ask for it + + +--------------------------------------------------------------------- +BUILD SYSTEM TIPS +--------------------------------------------------------------------- + + +=== Integrating compilation and execution of each component into Eclipse === + +To run the Management Server from Eclipse, set up an External Tool of the +Program variety. Put the path to the waf binary in the Location of the +window, and the source directory as Working Directory. Then specify +"install --preserve-config run" as arguments (without the quotes). You can +now use the Run button in Eclipse to execute the Management Server directly +from Eclipse. You can replace run with debug if you want to run the +Management Server with the Debugging Proxy turned on. + +To run the Agent or Console Proxy from Eclipse, set up an External Tool of +the Program variety just like in the Management Server case. In there, +however, specify "install --preserve-config run_agent" or +"install --preserve-config run_console_proxy" as arguments instead. +Remember that you need to set sudo up to not ask you for a password and not +require a TTY, otherwise sudo -- implicitly called by waf run_agent or +waf run_console_proxy -- will refuse to work. + + +=== Building targets selectively === + +You can find out the targets of the build system: + +./waf list_targets + +If you want to run a specific task generator, + +./waf build --targets=patchsubst + +should run just that one (and whatever targets are required to build that +one, of course). + + +=== Common targets === + +* ./waf configure: you must always run configure once, and provide it with + the target installation paths for when you run install later + o --help: will show you all the configure options + o --no-dep-check: will skip dependency checks for java packages + needed to compile (saves 20 seconds when redoing the configure) + o --with-db-user, --with-db-pw, --with-db-host: informs the build + system of the MySQL configuration needed to set up the management + server upon install, and to do deploydb + +* ./waf build: will compile any source files (and, on some projects, will + also perform any variable substitutions on any .in files such as the + MANIFEST files). Build outputs will be in /artifacts/default. + +* ./waf install: will compile if not compiled yet, then execute an install + of the built targets. I had to write a significantly large amount of code + (that is, couple tens of lines of code) to make install work. + +* ./waf run: will run the management server in the foreground + +* ./waf debug: will run the management server in the foreground, and open + port 8787 to connect with the debugger (see the Run / debug options of + waf --help to change that port) + +* ./waf deploydb: deploys the database using the MySQL configuration supplied + with the configuration options when you did ./waf configure. RUN WAF BUILD + FIRST AT LEAST ONCE. + +* ./waf dist: create a source tarball. These tarballs will be distributed + independently on our Web site, and will form the source release of the + Cloud Stack. It is a self-contained release that can be ./waf built and + ./waf installed everywhere. + +* ./waf clean: remove known build products + +* ./waf distclean: remove the artifacts/ directory altogether + +* ./waf uninstall: uninstall all installed files + +* ./waf rpm: build RPM packages + o if the build fails because the system lacks dependencies from our + other modules, waf will attempt to install RPMs from the repos, + then try the build + o it will place the built packages in artifacts/rpmbuild/ + +* ./waf deb: build Debian packages + o if the build fails because the system lacks dependencies from our + other modules, waf will attempt to install DEBs from the repos, + then try the build + o it will place the built packages in artifacts/debbuild/ + +* ./waf uninstallrpms: removes all Cloud.com RPMs from a system (but not + logfiles or modified config files) + +* ./waf viewrpmdeps: displays RPM dependencies declared in the RPM specfile + +* ./waf installrpmdeps: runs Yum to install the packages required to build + the CloudStack + +* ./waf uninstalldebs: removes all Cloud.com DEBs from a system (AND logfiles + AND modified config files) +* ./waf viewdebdeps: displays DEB dependencies declared in the project + debian/control file + +* ./waf installdebdeps: runs aptitude to install the packages required to + build our software + + +=== Overriding certain source files === + +Earlier in this document we explored overriding configuration files. +Overrides are not limited to configuration files. + +If you want to provide your own server-setup.xml or SQL files in client/setup: + + * create a directory override inside the client/setup folder + * place your file that should override a file in client/setup there + +There's also override support in client/tomcatconf and agent/conf. + + +=== Environment substitutions === + +Any file named "something.in" has its tokens (@SOMETOKEN@) automatically +substituted for the corresponding build environment variable. The build +environment variables are generally constructed at configure time and +controllable by the --command-line-parameters to waf configure, and should +be available as a list of variables inside the file +artifacts/c4che/build.default.py. + + +=== The prerelease mechanism === + +The prerelease mechanism (--prerelease=BRANCHNAME) allows developers and +builders to build packages with pre-release Release tags. The Release tags +are constructed in such a way that both the build number and the branch name +is included, so developers can push these packages to repositories and upgrade +them using yum or aptitude without having to delete packages manually and +install packages manually every time a new build is done. Any package built +with the prerelease mechanism gets a standard X.Y.Z version number -- and, +due to the way that the prerelease Release tags are concocted, always upgrades +any older prerelease package already present on any system. The prerelease +mechanism must never be used to create packages that are intended to be +released as stable software to the general public. + +Relevant documentation: + + http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version + http://fedoraproject.org/wiki/PackageNamingGuidelines#Pre-Release_packages + +Everything comes together on the build server in the following way: + + +=== SCCS info === + +When building a source distribution (waf dist), or RPM/DEB distributions +(waf deb / waf rpm), waf will automatically detect the relevant source code +control information if the git command is present on the machine where waf +is run, and it will write the information to a file called sccs-info inside +the source tarball / install it into /usr/share/doc/cloud*/sccs-info when +installing the packages. + +If this source code conrol information cannot be calculated, then the old +sccs-info file is preserved across dist runs if it exists, and if it did +not exist before, the fact that the source could not be properly tracked +down to a repository is noted in the file. + + +=== Debugging the build system === + +Almost all targets have names. waf build -vvvvv --zones=task will give you +the task names that you can use in --targets. --------------------------------------------------------------------- @@ -195,6 +374,22 @@ UNDERSTANDING THE BUILD SYSTEM --------------------------------------------------------------------- +=== Documentation for the build system === + +The first and foremost reference material: + +- http://freehackers.org/~tnagy/wafbook/index.html + +Examples + +- http://code.google.com/p/waf/wiki/CodeSnippets +- http://code.google.com/p/waf/w/list + +FAQ + +- http://code.google.com/p/waf/wiki/FAQ + + === Why waf === The CloudStack uses waf to build itself. waf is a relative newcomer @@ -274,4 +469,184 @@ If you add to the ant build files a new ant target that uses the compile-java macro, waf will automatically pick it up, along with its depends= and JAR name attributes. In general, all you need to do is add the produced JAR name to the packaging manifests (cloud.spec and -debian/{name-of-package}.install). \ No newline at end of file +debian/{name-of-package}.install). + + +--------------------------------------------------------------------- +FOR ANT USERS +--------------------------------------------------------------------- + + +If you are using Ant directly instead of using waf, these instructions apply to you: + +in this document, the example instructions are based on local source repository rooted at c:\root. You are free to locate it to anywhere you'd like to. +3.1 Setup developer build type + + 1) Go to c:\cloud\java\build directory + + 2) Copy file build-cloud.properties.template to file build-cloud.properties, then modify some of the parameters to match your local setup. The template properties file should have content as + + debug=true + debuglevel=lines,vars,source + tomcat.home=$TOMCAT_HOME --> change to your local Tomcat root directory such as c:/apache-tomcat-6.0.18 + debug.jvmarg=-Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n + deprecation=off + build.type=developer + target.compat.version=1.5 + source.compat.version=1.5 + branding.name=default + + 3) Make sure the following Environment variables and Path are set: + +set enviroment variables: +CATALINA_HOME: +JAVA_HOME: +CLOUD_HOME: +MYSQL_HOME: + +update the path to include + +MYSQL_HOME\bin + + 4) Clone a full directory tree of C:\cloud\java\build\deploy\production to C:\cloud\java\build\deploy\developer + + You can use Windows Explorer to copy the directory tree over. Please note, during your daily development process, whenever you see updates in C:\cloud\java\build\deploy\production, be sure to sync it into C:\cloud\java\build\deploy\developer. +3.2 Common build instructions + +After you have setup the build type, you are ready to perform build and run Management Server alone locally. + +cd java +python waf configure build install + +More at Build system. + +Will install the management server and its requisites to the appropriate place (your Tomcat instance on Windows, /usr/local on Linux). It will also install the agent to /usr/local/cloud/agent (this will change in the future). +4. Database and Server deployment + +After a successful management server build (database deployment scripts use some of the artifacts from build process), you can use database deployment script to deploy and initialize the database. You can find the deployment scripts in C:/cloud/java/build/deploy/db. deploy-db.sh is used to create, populate your DB instance. Please take a look at content of deploy-db.sh for more details + +Before you run the scripts, you should edit C:/cloud/java/build/deploy/developer/db/server-setup-dev.xml to allocate Public and Private IP ranges for your development setup. Ensure that the ranges you pick are unallocated to others. + +Customized VM templates to be populated are in C:/cloud/java/build/deploy/developer/db/templates-dev.sql Edit this file to customize the templates to your needs. + +Deploy the DB by running + +./deploy-db.sh ../developer/db/server-setup-dev.xml ../developer/db/templates-dev.xml +4.1. Management Server Deployment + +ant build-server + +Build Management Server + +ant deploy-server + +Deploy Management Server software to Tomcat environment + +ant debug + +Start Management Server in debug mode. The JVM debug options can be found in cloud-build.properties + +ant run + +Start Management Server in normal mode. + +5. Agent deployment + +After a successful build process, you should be able to find build artifacts at distribution directory, in this example case, for developer build type, the artifacts locate at c:\cloud\java\dist\developer, particularly, if you have run + +ant package-agent build command, you should see the agent software be packaged in a single file named agent.zip under c:\cloud\java\dist\developer, together with the agent deployment script deploy-agent.sh. +5.1 Agent Type + +Agent software can be deployed and configured to serve with different roles at run time. In current implementation, there are 3 types of agent configuration, respectively called as Computing Server, Routing Server and Storage Server. + + * When agent software is configured to run as Computing server, it is responsible to host user VMs. Agent software should be running in Xen Dom0 system on computer server machine. + + * When agent software is configured to run as Routing Server, it is responsible to host routing VMs for user virtual network and console proxy system VMs. Routing server serves as the bridge to outside network, the machine that agent software is running should have at least two network interfaces, one towards outside network, one participates the internal VMOps management network. Like computer server, agent software on routing server should also be running in Xen Dom0 system. + + * When agent software is configured to run as Storage server, it is responsible to provide storage service for all VMs. The storage service is based on ZFS running on a Solaris system, agent software on storage server is therefore running under Solaris (actually a Solaris VM), Dom0 systems on computing server and routing server can access the storage service through iScsi initiator. The storage volume will be eventually mounted on Dom0 system and make available to DomU VMs through our agent software. + +5.2 Resource sharing + +All developers can share the same set of agent server machines for development, to make this possible, the concept of instance appears in various places + + * VM names. VM names are structual names, it contains a instance section that can identify VMs from different VMOps cloud instances. VMOps cloud instance name is configured in server configuration parameter AgentManager/instance.name + * iScsi initiator mount point. For Computing servers and Routing servers, the mount point can distinguish the mounted DomU VM images from different agent deployments. The mount location can be specified in agent.properties file with a name-value pair named mount.parent + * iScsi target allocation point. For storage servers, this allocation point can distinguish the storage allocation from different storage agent deployments. The allocation point can be specified in agent.properties file with a name-value pair named parent + +5.4 Deploy agent software + +Before running the deployment scripts, first copy the build artifacts agent.zip and deploy-agent.sh to your personal development directory on agent server machines. By our current convention, you can create your personal development directory that usually locates at /root/your name. In following example, the agent package and deployment scripts are copied to test0.lab.vmops.com and the deployment script file has been marked as executible. + + On build machine, + + scp agent.zip root@test0:/root/your name + + scp deploy-agent.sh root@test0:/root/your name + + On agent server machine + +chmod +x deploy-agent.sh +5.4.1 Deploy agent on computing server + +deploy-agent.sh -d /root//agent -h -t computing -m expert +5.4.2 Deploy agent on routing server + +deploy-agent.sh -d /root//agent -h -t routing -m expert +5.4.3 Deploy agent on storage server + +deploy-agent.sh -d /root//agent -h -t storage -m expert +5.5 Configure agent + +After you have deployed the agent software, you should configure the agent by editing the agent.properties file under /root//agent/conf directory on each of the Routing, Computing and Storage servers. Add/Edit following properties. The rest are defaults that get populated by the agent at runtime. + workers=3 + host= + port=8250 + pod= + zone= + instance= + developer=true + +Following is a sample agent.properties file for Routing server + + workers=3 + id=1 + port=8250 + pod=RC + storage=comstar + zone=RC + type=routing + private.network.nic=xenbr0 + instance=RC + public.network.nic=xenbr1 + developer=true + host=192.168.1.138 +5.5 Running agent + +Edit /root//agent/conf/log4j-cloud.xml to update the location of logs to somewhere under /root/ + +Once you have deployed and configured the agent software, you are ready to launch it. Under the agent root directory (in our example, /root//agent. there is a scrip file named run.sh, you can use it to launch the agent. + +Launch agent in detached background process + +nohup ./run.sh & + +Launch agent in interactive mode + +./run.sh + +Launch agent in debug mode, for example, following command makes JVM listen at TCP port 8787 + +./run.sh -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n + +If agent is launched in debug mode, you may use Eclipse IDE to remotely debug it, please note, when you are sharing agent server machine with others, choose a TCP port that is not in use by someone else. + +Please also note that, run.sh also searches for /etc/cloud directory for agent.properties, make sure it uses the correct agent.properties file! +5.5. Stopping the Agents + +the pid of the agent process is in /var/run/agent..pid + +To Stop the agent: + +kill + + \ No newline at end of file diff --git a/INSTALL b/INSTALL new file mode 100644 index 00000000000..bcf10e20b23 --- /dev/null +++ b/INSTALL @@ -0,0 +1,155 @@ +--------------------------------------------------------------------- +TABLE OF CONTENTS +--------------------------------------------------------------------- + + +1. Really quick start: building and installing a production stack +2. Post-install: setting the CloudStack components up +3. Installation paths: where the stack is installed on your system +4. Uninstalling the CloudStack from your system + + +--------------------------------------------------------------------- +REALLY QUICK START: BUILDING AND INSTALLING A PRODUCTION STACK +--------------------------------------------------------------------- + + +You have two options. Choose one: + +a) Building distribution packages from the source and installing them +b) Building from the source and installing directly from there + + +=== I want to build and install distribution packages === + +This is the recommended way to run your CloudStack cloud. The +advantages are that dependencies are taken care of automatically +for you, and you can verify the integrity of the installed files +using your system's package manager. + +1. As root, install the build dependencies. + + a) Fedora / CentOS: ./waf installrpmdeps + + b) Ubuntu: ./waf installdebdeps + +2. As a non-root user, build the CloudStack packages. + + a) Fedora / CentOS: ./waf rpm + + b) Ubuntu: ./waf deb + +3. As root, install the CloudStack packages. + You can choose which components to install on your system. + + a) Fedora / CentOS: the installable RPMs are in artifacts/rpmbuild + install as root: rpm -ivh artifacts/rpmbuild/RPMS/{x86_64,noarch,i386}/*.rpm + + b) Ubuntu: the installable DEBs are in artifacts/debbuild + install as root: dpkg -i artifacts/debbuild/*.deb + +4. Configure and start the components you intend to run. + Consult the Installation Guide to find out how to + configure each component, and "Installation paths" for information + on where programs, initscripts and config files are installed. + + +=== I want to build and install directly from the source === + +This is the recommended way to run your CloudStack cloud if you +intend to modify the source, if you intend to port the CloudStack to +another distribution, or if you intend to run the CloudStack on a +distribution for which packages are not built. + +1. As root, install the build dependencies. + See below for a list. + +2. As non-root, configure the build. + See below to discover configuration options. + + ./waf configure + +3. As non-root, build the CloudStack. + To learn more, see "Quick guide to developing, building and + installing from source" below. + + ./waf build + +4. As root, install the runtime dependencies. + See below for a list. + +5. As root, Install the CloudStack + + ./waf install + +6. Configure and start the components you intend to run. + Consult the Installation Guide to find out how to + configure each component, and "Installation paths" for information + on where to find programs, initscripts and config files mentioned + in the Installation Guide (paths may vary). + + +=== Dependencies of the CloudStack === + +- Build dependencies: + + 1. FIXME DEPENDENCIES LIST THEM HERE + +- Runtime dependencies: + + 2. FIXME DEPENDENCIES LIST THEM HERE + + +--------------------------------------------------------------------- +INSTALLATION PATHS: WHERE THE STACK IS INSTALLED ON YOUR SYSTEM +--------------------------------------------------------------------- + + +The CloudStack build system installs files on a variety of paths, each +one of which is selectable when building from source. + +- $PREFIX: + the default prefix where the entire stack is installed + defaults to /usr/local on source builds + defaults to /usr on package builds + +- $SYSCONFDIR/cloud: + + the prefix for CloudStack configuration files + defaults to $PREFIX/etc/cloud on source builds + defaults to /etc/cloud on package builds + +- $SYSCONFDIR/init.d: + the prefix for CloudStack initscripts + defaults to $PREFIX/etc/init.d on source builds + defaults to /etc/init.d on package builds + +- $BINDIR: + the CloudStack installs programs there + defaults to $PREFIX/bin on source builds + defaults to /usr/bin on package builds + +- $LIBEXECDIR: + the CloudStack installs service runners there + defaults to $PREFIX/libexec on source builds + defaults to /usr/libexec on package builds (/usr/bin on Ubuntu) + + +--------------------------------------------------------------------- +UNINSTALLING THE CLOUDSTACK FROM YOUR SYSTEM +--------------------------------------------------------------------- + + +- If you installed the CloudStack using packages, use your operating + system package manager to remove the CloudStack packages. + + a) Fedora / CentOS: the installable RPMs are in artifacts/rpmbuild + as root: rpm -qa | grep ^cloud- | xargs rpm -e + + b) Ubuntu: the installable DEBs are in artifacts/debbuild + aptitude purge '~ncloud' + +- If you installed from a source tree: + + ./waf uninstall + diff --git a/README b/README index 5b0f0768dfe..b0478ff475f 100644 --- a/README +++ b/README @@ -8,202 +8,19 @@ cloud. --------------------------------------------------------------------- -TABLE OF CONTENTS +HOW TO INSTALL THE CLOUDSTACK --------------------------------------------------------------------- -1. Really quick start: building and installing a production stack -2. Post-install: setting the CloudStack components up -3. Installation paths: where the stack is installed on your system -4. Uninstalling the CloudStack from your system -5. Be part of the Cloud.com community! +Please refer to the document INSTALL distributed with the source. --------------------------------------------------------------------- -REALLY QUICK START: BUILDING AND INSTALLING A PRODUCTION STACK +HOW TO HACK ON THE CLOUDSTACK --------------------------------------------------------------------- -You have two options. Choose one: - -a) Building distribution packages from the source and installing them -b) Building from the source and installing directly from there - - -=== I want to build and install distribution packages === - -This is the recommended way to run your CloudStack cloud. The -advantages are that dependencies are taken care of automatically -for you, and you can verify the integrity of the installed files -using your system's package manager. - -1. As root, install the build dependencies. - - a) Fedora / CentOS: ./waf installrpmdeps - - b) Ubuntu: ./waf installdebdeps - -2. As a non-root user, build the CloudStack packages. - - a) Fedora / CentOS: ./waf rpm - - b) Ubuntu: ./waf deb - -3. As root, install the CloudStack packages. - You can choose which components to install on your system. - - a) Fedora / CentOS: the installable RPMs are in artifacts/rpmbuild - - b) Ubuntu: the installable DEBs are in artifacts/debbuild - -4. Configure and start the components you intend to run. - See "Setting the CloudStack components up" to find out how to - configure each component, and "Installation paths" for information - on where programs, initscripts and config files are installed. - - -=== I want to build and install directly from the source === - -This is the recommended way to run your CloudStack cloud if you -intend to modify the source, if you intend to port the CloudStack to -another distribution, or if you intend to run the CloudStack on a -distribution for which packages are not built. - -1. As root, install the build dependencies. - See below for a list. - -2. As non-root, configure the build. - See below to discover configuration options. - - ./waf configure - -3. As non-root, build the CloudStack. - To learn more, see "Quick guide to developing, building and - installing from source" below. - - ./waf build - -4. As root, install the runtime dependencies. - See below for a list. - -5. As root, Install the CloudStack - - ./waf install - -6. Configure and start the components you intend to run. - See "Setting the CloudStack components up" to find out how to - configure each component, and "Installation paths" for information - on where programs, initscripts and config files are installed. - - -=== Dependencies of the CloudStack === - -- Build dependencies: - - 1. FIXME DEPENDENCIES LIST THEM HERE - -- Runtime dependencies: - - 2. FIXME DEPENDENCIES LIST THEM HERE - - ---------------------------------------------------------------------- -POST-INSTALL: SETTING THE CLOUDSTACK COMPONENTS UP ---------------------------------------------------------------------- - - -The CloudStack installs several components on your system. - -Each component usually installs an initscript on your system, along -with one configuration command that will set your system up to run -said component properly. You must set each component up before -you can run it. The Installation Manual will guide you through the -process of setting each component up, and the section "Installation -paths" will explain where to find the installed files and what each -$VARIABLE means. - - -=== cloud-management: the Management Server === - -This Tomcat-based service runs your cloud and lets you manage it. -Its initscript is called cloud-management, and its setup command is -called cloud-setup-databases. - - -=== cloud-usage: the Usage Monitor === - -This Java-based service accounts usage metrics for your cloud. -Its initscript is called cloud-usage, and it takes its configuration -from the Management Server, so to set the Usage Monitor up, set the -management server up. - - -=== cloud-agent: the Cloud Agent === - -This Java-based service runs virtual machines based on orders from -the Management Service, connecting to it at startup. Its initscript -is called cloud-agent, and its setup command is called cloud-setup-agent. - - -=== cloud-console-proxy: the Cloud Console Proxy === - -This Java-based service provides access to virtual machine consoles -based on orders from the Management Service, connecting to it at -startup. Its initscript is called cloud-console-proxy, and its setup -command is called cloud-setup-console-proxy. - - -=== cloud-vnet: the Cloud Virtual Networking Arbiter === - -This C-based service provides network virtualization and isolation for -virtual machines based on security settings established by the cloud -operator. Its initscript is called cloud-vnet; it requires no setup. - - ---------------------------------------------------------------------- -INSTALLATION PATHS: WHERE THE STACK IS INSTALLED ON YOUR SYSTEM ---------------------------------------------------------------------- - - -The CloudStack build system installs files on a variety of paths, each -one of which is selectable when building from source. - -- $PREFIX: - the default prefix where the entire stack is installed - defaults to /usr/local on source builds - defaults to /usr on package builds - -- $SYSCONFDIR/cloud: - - the prefix for CloudStack configuration files - defaults to $PREFIX/etc/cloud on source builds - defaults to /etc/cloud on package builds - -- $SYSCONFDIR/init.d: - the prefix for CloudStack initscripts - defaults to $PREFIX/etc/init.d on source builds - defaults to /etc/init.d on package builds - -- $BINDIR: - the CloudStack installs programs there - defaults to $PREFIX/bin on source builds - defaults to /usr/bin on package builds - -- $LIBEXECDIR: - the CloudStack installs service runners there - defaults to $PREFIX/libexec on source builds - defaults to /usr/libexec on package builds (/usr/bin on Ubuntu) - - ---------------------------------------------------------------------- -UNINSTALLING THE CLOUDSTACK FROM YOUR SYSTEM ---------------------------------------------------------------------- - - -If you installed the CloudStack using packages, use your operating -system package manager to remove the CloudStack packages. - -If you installed from the source: ./waf uninstall +Please refer to the document HACKING distributed with the source. --------------------------------------------------------------------- diff --git a/README.html b/README.html new file mode 100644 index 00000000000..2ece7a070e7 --- /dev/null +++ b/README.html @@ -0,0 +1,10555 @@ + + + + + + + + + + + + + Cloud.com CloudStack - Documentation + + + + + + + + + + + +
+
+
+
+
+
+
+
+
+
+
+
<!--{{{-->
+<link rel='alternate' type='application/rss+xml' title='RSS' href='index.xml' />
+<!--}}}-->
+
+
+
+
Background: #fff
+Foreground: #000
+PrimaryPale: #8cf
+PrimaryLight: #18f
+PrimaryMid: #04b
+PrimaryDark: #014
+SecondaryPale: #ffc
+SecondaryLight: #fe8
+SecondaryMid: #db4
+SecondaryDark: #841
+TertiaryPale: #eee
+TertiaryLight: #ccc
+TertiaryMid: #999
+TertiaryDark: #666
+Error: #f88
+
+
+
+
/*{{{*/
+body {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
+
+a {color:[[ColorPalette::PrimaryMid]];}
+a:hover {background-color:[[ColorPalette::PrimaryMid]]; color:[[ColorPalette::Background]];}
+a img {border:0;}
+
+h1,h2,h3,h4,h5,h6 {color:[[ColorPalette::SecondaryDark]]; background:transparent;}
+h1 {border-bottom:2px solid [[ColorPalette::TertiaryLight]];}
+h2,h3 {border-bottom:1px solid [[ColorPalette::TertiaryLight]];}
+
+.button {color:[[ColorPalette::PrimaryDark]]; border:1px solid [[ColorPalette::Background]];}
+.button:hover {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::SecondaryLight]]; border-color:[[ColorPalette::SecondaryMid]];}
+.button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::SecondaryDark]];}
+
+.header {background:[[ColorPalette::PrimaryMid]];}
+.headerShadow {color:[[ColorPalette::Foreground]];}
+.headerShadow a {font-weight:normal; color:[[ColorPalette::Foreground]];}
+.headerForeground {color:[[ColorPalette::Background]];}
+.headerForeground a {font-weight:normal; color:[[ColorPalette::PrimaryPale]];}
+
+.tabSelected{color:[[ColorPalette::PrimaryDark]];
+	background:[[ColorPalette::TertiaryPale]];
+	border-left:1px solid [[ColorPalette::TertiaryLight]];
+	border-top:1px solid [[ColorPalette::TertiaryLight]];
+	border-right:1px solid [[ColorPalette::TertiaryLight]];
+}
+.tabUnselected {color:[[ColorPalette::Background]]; background:[[ColorPalette::TertiaryMid]];}
+.tabContents {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::TertiaryPale]]; border:1px solid [[ColorPalette::TertiaryLight]];}
+.tabContents .button {border:0;}
+
+#sidebar {}
+#sidebarOptions input {border:1px solid [[ColorPalette::PrimaryMid]];}
+#sidebarOptions .sliderPanel {background:[[ColorPalette::PrimaryPale]];}
+#sidebarOptions .sliderPanel a {border:none;color:[[ColorPalette::PrimaryMid]];}
+#sidebarOptions .sliderPanel a:hover {color:[[ColorPalette::Background]]; background:[[ColorPalette::PrimaryMid]];}
+#sidebarOptions .sliderPanel a:active {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::Background]];}
+
+.wizard {background:[[ColorPalette::PrimaryPale]]; border:1px solid [[ColorPalette::PrimaryMid]];}
+.wizard h1 {color:[[ColorPalette::PrimaryDark]]; border:none;}
+.wizard h2 {color:[[ColorPalette::Foreground]]; border:none;}
+.wizardStep {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];
+	border:1px solid [[ColorPalette::PrimaryMid]];}
+.wizardStep.wizardStepDone {background:[[ColorPalette::TertiaryLight]];}
+.wizardFooter {background:[[ColorPalette::PrimaryPale]];}
+.wizardFooter .status {background:[[ColorPalette::PrimaryDark]]; color:[[ColorPalette::Background]];}
+.wizard .button {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryLight]]; border: 1px solid;
+	border-color:[[ColorPalette::SecondaryPale]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryDark]] [[ColorPalette::SecondaryPale]];}
+.wizard .button:hover {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Background]];}
+.wizard .button:active {color:[[ColorPalette::Background]]; background:[[ColorPalette::Foreground]]; border: 1px solid;
+	border-color:[[ColorPalette::PrimaryDark]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryPale]] [[ColorPalette::PrimaryDark]];}
+
+.wizard .notChanged {background:transparent;}
+.wizard .changedLocally {background:#80ff80;}
+.wizard .changedServer {background:#8080ff;}
+.wizard .changedBoth {background:#ff8080;}
+.wizard .notFound {background:#ffff80;}
+.wizard .putToServer {background:#ff80ff;}
+.wizard .gotFromServer {background:#80ffff;}
+
+#messageArea {border:1px solid [[ColorPalette::SecondaryMid]]; background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]];}
+#messageArea .button {color:[[ColorPalette::PrimaryMid]]; background:[[ColorPalette::SecondaryPale]]; border:none;}
+
+.popupTiddler {background:[[ColorPalette::TertiaryPale]]; border:2px solid [[ColorPalette::TertiaryMid]];}
+
+.popup {background:[[ColorPalette::TertiaryPale]]; color:[[ColorPalette::TertiaryDark]]; border-left:1px solid [[ColorPalette::TertiaryMid]]; border-top:1px solid [[ColorPalette::TertiaryMid]]; border-right:2px solid [[ColorPalette::TertiaryDark]]; border-bottom:2px solid [[ColorPalette::TertiaryDark]];}
+.popup hr {color:[[ColorPalette::PrimaryDark]]; background:[[ColorPalette::PrimaryDark]]; border-bottom:1px;}
+.popup li.disabled {color:[[ColorPalette::TertiaryMid]];}
+.popup li a, .popup li a:visited {color:[[ColorPalette::Foreground]]; border: none;}
+.popup li a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border: none;}
+.popup li a:active {background:[[ColorPalette::SecondaryPale]]; color:[[ColorPalette::Foreground]]; border: none;}
+.popupHighlight {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
+.listBreak div {border-bottom:1px solid [[ColorPalette::TertiaryDark]];}
+
+.tiddler .defaultCommand {font-weight:bold;}
+
+.shadow .title {color:[[ColorPalette::TertiaryDark]];}
+
+.title {color:[[ColorPalette::SecondaryDark]];}
+.subtitle {color:[[ColorPalette::TertiaryDark]];}
+
+.toolbar {color:[[ColorPalette::PrimaryMid]];}
+.toolbar a {color:[[ColorPalette::TertiaryLight]];}
+.selected .toolbar a {color:[[ColorPalette::TertiaryMid]];}
+.selected .toolbar a:hover {color:[[ColorPalette::Foreground]];}
+
+.tagging, .tagged {border:1px solid [[ColorPalette::TertiaryPale]]; background-color:[[ColorPalette::TertiaryPale]];}
+.selected .tagging, .selected .tagged {background-color:[[ColorPalette::TertiaryLight]]; border:1px solid [[ColorPalette::TertiaryMid]];}
+.tagging .listTitle, .tagged .listTitle {color:[[ColorPalette::PrimaryDark]];}
+.tagging .button, .tagged .button {border:none;}
+
+.footer {color:[[ColorPalette::TertiaryLight]];}
+.selected .footer {color:[[ColorPalette::TertiaryMid]];}
+
+.sparkline {background:[[ColorPalette::PrimaryPale]]; border:0;}
+.sparktick {background:[[ColorPalette::PrimaryDark]];}
+
+.error, .errorButton {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::Error]];}
+.warning {color:[[ColorPalette::Foreground]]; background:[[ColorPalette::SecondaryPale]];}
+.lowlight {background:[[ColorPalette::TertiaryLight]];}
+
+.zoomer {background:none; color:[[ColorPalette::TertiaryMid]]; border:3px solid [[ColorPalette::TertiaryMid]];}
+
+.imageLink, #displayArea .imageLink {background:transparent;}
+
+.annotation {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; border:2px solid [[ColorPalette::SecondaryMid]];}
+
+.viewer .listTitle {list-style-type:none; margin-left:-2em;}
+.viewer .button {border:1px solid [[ColorPalette::SecondaryMid]];}
+.viewer blockquote {border-left:3px solid [[ColorPalette::TertiaryDark]];}
+
+.viewer table, table.twtable {border:2px solid [[ColorPalette::TertiaryDark]];}
+.viewer th, .viewer thead td, .twtable th, .twtable thead td {background:[[ColorPalette::SecondaryMid]]; border:1px solid [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::Background]];}
+.viewer td, .viewer tr, .twtable td, .twtable tr {border:1px solid [[ColorPalette::TertiaryDark]];}
+
+.viewer pre {border:1px solid [[ColorPalette::SecondaryLight]]; background:[[ColorPalette::SecondaryPale]];}
+.viewer code {color:[[ColorPalette::SecondaryDark]];}
+.viewer hr {border:0; border-top:dashed 1px [[ColorPalette::TertiaryDark]]; color:[[ColorPalette::TertiaryDark]];}
+
+.highlight, .marked {background:[[ColorPalette::SecondaryLight]];}
+
+.editor input {border:1px solid [[ColorPalette::PrimaryMid]];}
+.editor textarea {border:1px solid [[ColorPalette::PrimaryMid]]; width:100%;}
+.editorFooter {color:[[ColorPalette::TertiaryMid]];}
+.readOnly {background:[[ColorPalette::TertiaryPale]];}
+
+#backstageArea {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::TertiaryMid]];}
+#backstageArea a {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
+#backstageArea a:hover {background:[[ColorPalette::SecondaryLight]]; color:[[ColorPalette::Foreground]]; }
+#backstageArea a.backstageSelTab {background:[[ColorPalette::Background]]; color:[[ColorPalette::Foreground]];}
+#backstageButton a {background:none; color:[[ColorPalette::Background]]; border:none;}
+#backstageButton a:hover {background:[[ColorPalette::Foreground]]; color:[[ColorPalette::Background]]; border:none;}
+#backstagePanel {background:[[ColorPalette::Background]]; border-color: [[ColorPalette::Background]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]] [[ColorPalette::TertiaryDark]];}
+.backstagePanelFooter .button {border:none; color:[[ColorPalette::Background]];}
+.backstagePanelFooter .button:hover {color:[[ColorPalette::Foreground]];}
+#backstageCloak {background:[[ColorPalette::Foreground]]; opacity:0.6; filter:'alpha(opacity=60)';}
+/*}}}*/
+
+
+
/*{{{*/
+* html .tiddler {height:1%;}
+
+body {font-size:.75em; font-family:arial,helvetica; margin:0; padding:0;}
+
+h1,h2,h3,h4,h5,h6 {font-weight:bold; text-decoration:none;}
+h1,h2,h3 {padding-bottom:1px; margin-top:1.2em;margin-bottom:0.3em;}
+h4,h5,h6 {margin-top:1em;}
+h1 {font-size:1.35em;}
+h2 {font-size:1.25em;}
+h3 {font-size:1.1em;}
+h4 {font-size:1em;}
+h5 {font-size:.9em;}
+
+hr {height:1px;}
+
+a {text-decoration:none;}
+
+dt {font-weight:bold;}
+
+ol {list-style-type:decimal;}
+ol ol {list-style-type:lower-alpha;}
+ol ol ol {list-style-type:lower-roman;}
+ol ol ol ol {list-style-type:decimal;}
+ol ol ol ol ol {list-style-type:lower-alpha;}
+ol ol ol ol ol ol {list-style-type:lower-roman;}
+ol ol ol ol ol ol ol {list-style-type:decimal;}
+
+.txtOptionInput {width:11em;}
+
+#contentWrapper .chkOptionInput {border:0;}
+
+.externalLink {text-decoration:underline;}
+
+.indent {margin-left:3em;}
+.outdent {margin-left:3em; text-indent:-3em;}
+code.escaped {white-space:nowrap;}
+
+.tiddlyLinkExisting {font-weight:bold;}
+.tiddlyLinkNonExisting {font-style:italic;}
+
+/* the 'a' is required for IE, otherwise it renders the whole tiddler in bold */
+a.tiddlyLinkNonExisting.shadow {font-weight:bold;}
+
+#mainMenu .tiddlyLinkExisting,
+	#mainMenu .tiddlyLinkNonExisting,
+	#sidebarTabs .tiddlyLinkNonExisting {font-weight:normal; font-style:normal;}
+#sidebarTabs .tiddlyLinkExisting {font-weight:bold; font-style:normal;}
+
+.header {position:relative;}
+.header a:hover {background:transparent;}
+.headerShadow {position:relative; padding:4.5em 0 1em 1em; left:-1px; top:-1px;}
+.headerForeground {position:absolute; padding:4.5em 0 1em 1em; left:0px; top:0px;}
+
+.siteTitle {font-size:3em;}
+.siteSubtitle {font-size:1.2em;}
+
+#mainMenu {position:absolute; left:0; width:10em; text-align:right; line-height:1.6em; padding:1.5em 0.5em 0.5em 0.5em; font-size:1.1em;}
+
+#sidebar {position:absolute; right:3px; width:16em; font-size:.9em;}
+#sidebarOptions {padding-top:0.3em;}
+#sidebarOptions a {margin:0 0.2em; padding:0.2em 0.3em; display:block;}
+#sidebarOptions input {margin:0.4em 0.5em;}
+#sidebarOptions .sliderPanel {margin-left:1em; padding:0.5em; font-size:.85em;}
+#sidebarOptions .sliderPanel a {font-weight:bold; display:inline; padding:0;}
+#sidebarOptions .sliderPanel input {margin:0 0 0.3em 0;}
+#sidebarTabs .tabContents {width:15em; overflow:hidden;}
+
+.wizard {padding:0.1em 1em 0 2em;}
+.wizard h1 {font-size:2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
+.wizard h2 {font-size:1.2em; font-weight:bold; background:none; padding:0; margin:0.4em 0 0.2em;}
+.wizardStep {padding:1em 1em 1em 1em;}
+.wizard .button {margin:0.5em 0 0; font-size:1.2em;}
+.wizardFooter {padding:0.8em 0.4em 0.8em 0;}
+.wizardFooter .status {padding:0 0.4em; margin-left:1em;}
+.wizard .button {padding:0.1em 0.2em;}
+
+#messageArea {position:fixed; top:2em; right:0; margin:0.5em; padding:0.5em; z-index:2000; _position:absolute;}
+.messageToolbar {display:block; text-align:right; padding:0.2em;}
+#messageArea a {text-decoration:underline;}
+
+.tiddlerPopupButton {padding:0.2em;}
+.popupTiddler {position: absolute; z-index:300; padding:1em; margin:0;}
+
+.popup {position:absolute; z-index:300; font-size:.9em; padding:0; list-style:none; margin:0;}
+.popup .popupMessage {padding:0.4em;}
+.popup hr {display:block; height:1px; width:auto; padding:0; margin:0.2em 0;}
+.popup li.disabled {padding:0.4em;}
+.popup li a {display:block; padding:0.4em; font-weight:normal; cursor:pointer;}
+.listBreak {font-size:1px; line-height:1px;}
+.listBreak div {margin:2px 0;}
+
+.tabset {padding:1em 0 0 0.5em;}
+.tab {margin:0 0 0 0.25em; padding:2px;}
+.tabContents {padding:0.5em;}
+.tabContents ul, .tabContents ol {margin:0; padding:0;}
+.txtMainTab .tabContents li {list-style:none;}
+.tabContents li.listLink { margin-left:.75em;}
+
+#contentWrapper {display:block;}
+#splashScreen {display:none;}
+
+#displayArea {margin:1em 17em 0 14em;}
+
+.toolbar {text-align:right; font-size:.9em;}
+
+.tiddler {padding:1em 1em 0;}
+
+.missing .viewer,.missing .title {font-style:italic;}
+
+.title {font-size:1.6em; font-weight:bold;}
+
+.missing .subtitle {display:none;}
+.subtitle {font-size:1.1em;}
+
+.tiddler .button {padding:0.2em 0.4em;}
+
+.tagging {margin:0.5em 0.5em 0.5em 0; float:left; display:none;}
+.isTag .tagging {display:block;}
+.tagged {margin:0.5em; float:right;}
+.tagging, .tagged {font-size:0.9em; padding:0.25em;}
+.tagging ul, .tagged ul {list-style:none; margin:0.25em; padding:0;}
+.tagClear {clear:both;}
+
+.footer {font-size:.9em;}
+.footer li {display:inline;}
+
+.annotation {padding:0.5em; margin:0.5em;}
+
+* html .viewer pre {width:99%; padding:0 0 1em 0;}
+.viewer {line-height:1.4em; padding-top:0.5em;}
+.viewer .button {margin:0 0.25em; padding:0 0.25em;}
+.viewer blockquote {line-height:1.5em; padding-left:0.8em;margin-left:2.5em;}
+.viewer ul, .viewer ol {margin-left:0.5em; padding-left:1.5em;}
+
+.viewer table, table.twtable {border-collapse:collapse; margin:0.8em 1.0em;}
+.viewer th, .viewer td, .viewer tr,.viewer caption,.twtable th, .twtable td, .twtable tr,.twtable caption {padding:3px;}
+table.listView {font-size:0.85em; margin:0.8em 1.0em;}
+table.listView th, table.listView td, table.listView tr {padding:0px 3px 0px 3px;}
+
+.viewer pre {padding:0.5em; margin-left:0.5em; font-size:1.2em; line-height:1.4em; overflow:auto;}
+.viewer code {font-size:1.2em; line-height:1.4em;}
+
+.editor {font-size:1.1em;}
+.editor input, .editor textarea {display:block; width:100%; font:inherit;}
+.editorFooter {padding:0.25em 0; font-size:.9em;}
+.editorFooter .button {padding-top:0px; padding-bottom:0px;}
+
+.fieldsetFix {border:0; padding:0; margin:1px 0px;}
+
+.sparkline {line-height:1em;}
+.sparktick {outline:0;}
+
+.zoomer {font-size:1.1em; position:absolute; overflow:hidden;}
+.zoomer div {padding:1em;}
+
+* html #backstage {width:99%;}
+* html #backstageArea {width:99%;}
+#backstageArea {display:none; position:relative; overflow: hidden; z-index:150; padding:0.3em 0.5em;}
+#backstageToolbar {position:relative;}
+#backstageArea a {font-weight:bold; margin-left:0.5em; padding:0.3em 0.5em;}
+#backstageButton {display:none; position:absolute; z-index:175; top:0; right:0;}
+#backstageButton a {padding:0.1em 0.4em; margin:0.1em;}
+#backstage {position:relative; width:100%; z-index:50;}
+#backstagePanel {display:none; z-index:100; position:absolute; width:90%; margin-left:3em; padding:1em;}
+.backstagePanelFooter {padding-top:0.2em; float:right;}
+.backstagePanelFooter a {padding:0.2em 0.4em;}
+#backstageCloak {display:none; z-index:20; position:absolute; width:100%; height:100px;}
+
+.whenBackstage {display:none;}
+.backstageVisible .whenBackstage {display:block;}
+/*}}}*/
+
+
+
+
/***
+StyleSheet for use when a translation requires any css style changes.
+This StyleSheet can be used directly by languages such as Chinese, Japanese and Korean which need larger font sizes.
+***/
+/*{{{*/
+body {font-size:0.8em;}
+#sidebarOptions {font-size:1.05em;}
+#sidebarOptions a {font-style:normal;}
+#sidebarOptions .sliderPanel {font-size:0.95em;}
+.subtitle {font-size:0.8em;}
+.viewer table.listView {font-size:0.95em;}
+/*}}}*/
+
+
+
/*{{{*/
+@media print {
+#mainMenu, #sidebar, #messageArea, .toolbar, #backstageButton, #backstageArea {display: none !important;}
+#displayArea {margin: 1em 1em 0em;}
+noscript {display:none;} /* Fixes a feature in Firefox 1.5.0.2 where print preview displays the noscript content */
+}
+/*}}}*/
+
+
+
<!--{{{-->
+<div class='header' macro='gradient vert [[ColorPalette::PrimaryLight]] [[ColorPalette::PrimaryMid]]'>
+<div class='headerShadow'>
+<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
+<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
+</div>
+<div class='headerForeground'>
+<span class='siteTitle' refresh='content' tiddler='SiteTitle'></span>&nbsp;
+<span class='siteSubtitle' refresh='content' tiddler='SiteSubtitle'></span>
+</div>
+</div>
+<div id='mainMenu' refresh='content' tiddler='MainMenu'></div>
+<div id='sidebar'>
+<div id='sidebarOptions' refresh='content' tiddler='SideBarOptions'></div>
+<div id='sidebarTabs' refresh='content' force='true' tiddler='SideBarTabs'></div>
+</div>
+<div id='displayArea'>
+<div id='messageArea'></div>
+<div id='tiddlerDisplay'></div>
+</div>
+<!--}}}-->
+
+
+
<!--{{{-->
+<div class='toolbar' macro='toolbar [[ToolbarCommands::ViewToolbar]]'></div>
+<div class='title' macro='view title'></div>
+<div class='subtitle'><span macro='view modifier link'></span>, <span macro='view modified date'></span> (<span macro='message views.wikified.createdPrompt'></span> <span macro='view created date'></span>)</div>
+<div class='tagging' macro='tagging'></div>
+<div class='tagged' macro='tags'></div>
+<div class='viewer' macro='view text wikified'></div>
+<div class='tagClear'></div>
+<!--}}}-->
+
+
+
<!--{{{-->
+<div class='toolbar' macro='toolbar [[ToolbarCommands::EditToolbar]]'></div>
+<div class='title' macro='view title'></div>
+<div class='editor' macro='edit title'></div>
+<div macro='annotations'></div>
+<div class='editor' macro='edit text'></div>
+<div class='editor' macro='edit tags'></div><div class='editorFooter'><span macro='message views.editor.tagPrompt'></span><span macro='tagChooser excludeLists'></span></div>
+<!--}}}-->
+
+
+
To get started with this blank [[TiddlyWiki]], you'll need to modify the following tiddlers:
+* [[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
+* [[MainMenu]]: The menu (usually on the left)
+* [[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
+You'll also need to enter your username for signing your edits: <<option txtUserName>>
+
+
+
These [[InterfaceOptions]] for customising [[TiddlyWiki]] are saved in your browser
+
+Your username for signing your edits. Write it as a [[WikiWord]] (eg [[JoeBloggs]])
+
+<<option txtUserName>>
+<<option chkSaveBackups>> [[SaveBackups]]
+<<option chkAutoSave>> [[AutoSave]]
+<<option chkRegExpSearch>> [[RegExpSearch]]
+<<option chkCaseSensitiveSearch>> [[CaseSensitiveSearch]]
+<<option chkAnimate>> [[EnableAnimations]]
+
+----
+Also see [[AdvancedOptions]]
+
+
+
<<importTiddlers>>
+
+
+ +
+
+
---------------------------------------------------------------------
+FOR ANT USERS
+---------------------------------------------------------------------
+
+
+If you are using Ant directly instead of using waf, these instructions apply to you:
+
+in this document, the example instructions are based on local source repository rooted at c:\root. You are free to locate it to anywhere you'd like to.
+3.1 Setup developer build type
+
+       1) Go to c:\cloud\java\build directory
+
+        2) Copy file build-cloud.properties.template to file build-cloud.properties, then modify some of the parameters to match your local setup. The template properties file should have content as
+
+            debug=true
+            debuglevel=lines,vars,source
+            tomcat.home=$TOMCAT_HOME      --> change to your local Tomcat root directory such as c:/apache-tomcat-6.0.18
+            debug.jvmarg=-Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n
+            deprecation=off
+            build.type=developer
+            target.compat.version=1.5
+            source.compat.version=1.5
+            branding.name=default
+
+        3) Make sure the following Environment variables and Path are set:
+
+set enviroment variables:
+CATALINA_HOME:
+JAVA_HOME:  
+CLOUD_HOME:  
+MYSQL_HOME:
+
+update the path to include
+
+MYSQL_HOME\bin
+
+    4) Clone a full directory tree of C:\cloud\java\build\deploy\production to C:\cloud\java\build\deploy\developer
+
+            You can use Windows Explorer to copy the directory tree over. Please note, during your daily development process, whenever you see updates in C:\cloud\java\build\deploy\production, be sure to sync it into C:\cloud\java\build\deploy\developer.
+3.2 Common build instructions
+
+After you have setup the build type, you are ready to perform build and run Management Server alone locally.
+
+cd java
+python waf configure build install
+
+More at Build system.
+
+Will install the management server and its requisites to the appropriate place (your Tomcat instance on Windows, /usr/local on Linux).  It will also install the agent to /usr/local/cloud/agent (this will change in the future).
+4. Database and Server deployment
+
+After a successful management server build (database deployment scripts use some of the artifacts from build process), you can use database deployment script to deploy and initialize the database. You can find the deployment scripts in C:/cloud/java/build/deploy/db.  deploy-db.sh is used to create, populate your DB instance. Please take a look at content of deploy-db.sh for more details
+
+Before you run the scripts, you should edit C:/cloud/java/build/deploy/developer/db/server-setup-dev.xml to allocate Public and Private IP ranges for your development setup. Ensure that the ranges you pick are unallocated to others.
+
+Customized VM templates to be populated are in C:/cloud/java/build/deploy/developer/db/templates-dev.sql  Edit this file to customize the templates to your needs.
+
+Deploy the DB by running
+
+./deploy-db.sh ../developer/db/server-setup-dev.xml ../developer/db/templates-dev.xml
+4.1. Management Server Deployment
+
+ant build-server
+
+Build Management Server
+
+ant deploy-server
+
+Deploy Management Server software to Tomcat environment
+
+ant debug
+
+Start Management Server in debug mode. The JVM debug options can be found in cloud-build.properties
+
+ant run
+
+Start Management Server in normal mode.
+
+5. Agent deployment
+
+After a successful build process, you should be able to find build artifacts at distribution directory, in this example case, for developer build type, the artifacts locate at c:\cloud\java\dist\developer, particularly, if you have run
+
+ant package-agent build command, you should see the agent software be packaged in a single file named agent.zip under c:\cloud\java\dist\developer, together with the agent deployment script deploy-agent.sh.
+5.1 Agent Type
+
+Agent software can be deployed and configured to serve with different roles at run time. In current implementation, there are 3 types of agent configuration, respectively called as Computing Server, Routing Server and Storage Server.
+
+    * When agent software is configured to run as Computing server, it is responsible to host user VMs. Agent software should be running in Xen Dom0 system on computer server machine.
+
+    * When agent software is configured to run as Routing Server, it is responsible to host routing VMs for user virtual network and console proxy system VMs. Routing server serves as the bridge to outside network, the machine that agent software is running should have at least two network interfaces, one towards outside network, one participates the internal VMOps management network. Like computer server, agent software on routing server should also be running in Xen Dom0 system.
+
+    * When agent software is configured to run as Storage server, it is responsible to provide storage service for all VMs. The storage service is based on ZFS running on a Solaris system, agent software on storage server is therefore running under Solaris (actually a Solaris VM), Dom0 systems on computing server and routing server can access the storage service through iScsi initiator. The storage volume will be eventually mounted on Dom0 system and make available to DomU VMs through our agent software.
+
+5.2 Resource sharing
+
+All developers can share the same set of agent server machines for development, to make this possible, the concept of instance appears in various places
+
+    * VM names. VM names are structual names, it contains a instance section that can identify VMs from different VMOps cloud instances. VMOps cloud instance name is configured in server configuration parameter AgentManager/instance.name
+    * iScsi initiator mount point. For Computing servers and Routing servers, the mount point can distinguish the mounted DomU VM images from different agent deployments. The mount location can be specified in agent.properties file with a name-value pair named mount.parent
+    * iScsi target allocation point. For storage servers, this allocation point can distinguish the storage allocation from different storage agent deployments. The allocation point can be specified in agent.properties file with a name-value pair named parent
+
+5.4 Deploy agent software
+
+Before running the deployment scripts, first copy the build artifacts agent.zip and deploy-agent.sh to your personal development directory on agent server machines. By our current convention, you can create your personal development directory that usually locates at /root/your name. In following example, the agent package and deployment scripts are copied to test0.lab.vmops.com and the deployment script file has been marked as executible.
+
+    On build machine,
+
+        scp agent.zip root@test0:/root/your name
+
+        scp deploy-agent.sh root@test0:/root/your name
+
+    On agent server machine
+
+chmod +x deploy-agent.sh
+5.4.1 Deploy agent on computing server
+
+deploy-agent.sh -d /root/<your name>/agent -h <management server IP> -t computing -m expert   
+5.4.2 Deploy agent on routing server
+
+deploy-agent.sh -d /root/<your name>/agent -h <management server IP> -t routing -m expert   
+5.4.3 Deploy agent on storage server
+
+deploy-agent.sh -d /root/<your name>/agent -h <management server IP> -t storage -m expert   
+5.5 Configure agent
+
+After you have deployed the agent software, you should configure the agent by editing the agent.properties file under /root/<your name>/agent/conf directory on each of the Routing, Computing and Storage servers. Add/Edit following properties. The rest are defaults that get populated by the agent at runtime.
+    workers=3
+    host=<replace with your management server IP>
+    port=8250
+    pod=<replace with your pod id>
+    zone=<replace with your zone id>
+   instance=<your unique instance name>
+   developer=true
+
+Following is a sample agent.properties file for Routing server
+
+   workers=3
+   id=1
+   port=8250
+   pod=RC
+   storage=comstar
+   zone=RC
+   type=routing
+   private.network.nic=xenbr0
+   instance=RC
+   public.network.nic=xenbr1
+   developer=true
+   host=192.168.1.138
+5.5 Running agent
+
+Edit /root/<ryour name>/agent/conf/log4j-cloud.xml to update the location of logs to somewhere under /root/<your name>
+
+Once you have deployed and configured the agent software, you are ready to launch it. Under the agent root directory (in our example, /root/<your name>/agent. there is a scrip file named run.sh, you can use it to launch the agent.
+
+Launch agent in detached background process
+
+nohup ./run.sh & 
+
+Launch agent in interactive mode
+
+./run.sh
+
+Launch agent in debug mode, for example, following command makes JVM listen at TCP port 8787
+
+./run.sh -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n
+
+If agent is launched in debug mode, you may use Eclipse IDE to remotely debug it, please note, when you are sharing agent server machine with others, choose a TCP port that is not in use by someone else.
+
+Please also note that, run.sh also searches for /etc/cloud directory for agent.properties, make sure it uses the correct agent.properties file!
+5.5. Stopping the Agents
+
+the pid of the agent process is in /var/run/agent.<Instance>.pid
+
+To Stop the agent:
+
+kill <pid of agent>
+
+ 
+
+
+
!Fedora / CentOS
+# [[Install the build dependencies|waf installrpmdeps]] with {{{./waf installrpmdeps}}} in the source directory.
+# As a non-root user, run the command {{{./waf rpm}}} in the source directory.
+Once this command is done, the packages will be built in the directory {{{artifacts/rpmbuild}}}.
+!Ubuntu
+# [[Install the build dependencies|waf installdebdeps]] with {{{./waf installdebdeps}}} in the source directory.
+# As a non-root user, run the command {{{./waf deb}}} in the source directory.
+Once this command is done, the packages will be built in the directory {{{artifacts/debbuild}}}.
+
+
+
!Obtain the source for the CloudStack
+If you aren't reading this from a local copy of the source code, see [[Obtaining the source]].
+!Prepare your development environment
+See [[Preparing your development environment]].
+!Configure the build on the builder machine
+As non-root, run the command {{{./waf configure}}}.  See [[waf configure]] to discover configuration options for that command.
+!Build the CloudStack on the builder machine
+As non-root, run the command {{{./waf build}}}.  See [[waf build]] for an explanation.
+!Install the CloudStack on the target systems
+On each machine where you intend to run a CloudStack component:
+# upload the entire source code tree after compilation, //ensuring that the source ends up in the same path as the machine in which you compiled it//,
+## {{{rsync}}} is [[usually very handy|Using rsync to quickly transport the source tree to another machine]] for this
+# in that newly uploaded directory of the target machine, run the command {{{./waf install}}} //as root//.
+Consult [[waf install]] for information on installation.
+
+
+
!Changing the [[configuration|waf configure]] process
+See file {{{wscript_configure}}}.
+!Changing the [[build|waf build]] and [[install|waf install]] processes
+!!Changing / adding / removing JAR targets
+You generally need to add a new {{{compile-xyz}}} target following the model of the existing ones, and add that target to the list of dependencies of the other pertinent targets.  See  [[How waf uses ant]] and the ant build project files within the {{{build/}}} folder.
+
+We have some old ant information that you might find useful: AntInformation.
+!!Other changes
+See file {{{wscript_build}}}.
+!Changing packaging
+!!Fedora / """CentOS""" packaging
+See {{{cloud.spec}}} in the source directory.
+!!Ubuntu packaging
+See the files in the {{{debian/}}} folder.
+
+
+
The Cloud.com CloudStack is an open source software product that enables the deployment, management, and configuration of multi-tier and multi-tenant infrastructure cloud services by enterprises and service providers.
+
+
+
Not done yet!
+
+
+
Not done yet!
+
+
+
[[Welcome]]
+
+
+
#[[Source layout guide]]
+
+
+
Not done yet!
+
+
+
Start here if you want to learn the essentials to extend, modify and enhance the CloudStack.  This assumes that you've already familiarized yourself with CloudStack concepts, installation and configuration using the [[Getting started|Welcome]] instructions.
+* [[Obtain the source|Obtaining the source]]
+* [[Prepare your environment|Preparing your development environment]]
+* [[Get acquainted with the development lifecycle|Your development lifecycle]]
+* [[Familiarize yourself with our development conventions|Development conventions]]
+Extra developer information:
+* [[What is this waf thing?|waf]]
+* [[How to change the build, install and packaging processes|Changing the build, install and packaging processes]]
+* [[How to integrate with Eclipse]]
+* [[Starting over]]
+* [[Making a source release|waf dist]]
+
+
+
+
!Importing the projects in the source
+#Open Eclipse
+#Select //Import projects//
+#Point it to the source directory and import all the projects within it
+!Management Server execution
+To run the Management Server from Eclipse, set up an External Tool of the Program variety.  Put the path to the {{{waf}}} binary in the Location of the window, and the source directory as Working Directory.  Then specify {{{install --preserve-config run}}} as arguments.  You can now use the Run button in Eclipse to execute the Management Server directly from Eclipse.  You can replace run with debug if you want to run the Management Server with the Debugging Proxy turned on.
+!Agent or Console Proxy execution
+To run the Agent or Console Proxy from Eclipse, set up an External Tool of the Program variety just like in the Management Server case.  In there, however, specify {{{install --preserve-config run_agent}}} or  {{{install --preserve-config run_console_proxy}}} as arguments instead. Remember that you need to [[set sudo up|Setting sudo up for passwordless root]] to not ask you for a password and not require a TTY, otherwise sudo -- implicitly called by [[waf run_agent]] or [[waf run_console_proxy]] -- will refuse to work.
+
+
+
+
+
By now, you have probably noticed that we do, indeed, ship ant build files in the CloudStack.  During the build process, waf calls ant directly to build the Java portions of our stack, and it uses the resulting JAR files to perform the installation.
+
+Any ant target added to the ant project files will automatically be detected -- if it is named {{{compile-xyz}}} waf will know it builds a JAR file and knows to use / install that JAR file.  In general, this means you only need to add the JAR file to the appropriate  packaging manifests ({{{cloud.spec}}} and {{{debian/{name-of-package}.install}}}).
+
+The reason we do this rather than use the native waf capabilities for building Java projects is simple: by using ant, we can leverage the support built-in for ant in [[Eclipse|How to integrate with Eclipse]] and many other """IDEs""".  Another reason to do this is because Java developers are familiar with ant, so adding a new JAR file or modifying what gets built into the existing JAR files is facilitated for Java developers.
+
+
+
The CloudStack build system installs files on a variety of paths, each
+one of which is selectable when building from source.
+* {{{$PREFIX}}}:
+** the default prefix where the entire stack is installed
+** defaults to /usr/local on source builds
+** defaults to /usr on package builds
+* {{{$SYSCONFDIR/cloud}}}:
+** the prefix for CloudStack configuration files
+** defaults to $PREFIX/etc/cloud on source builds
+** defaults to /etc/cloud on package builds
+* {{{$SYSCONFDIR/init.d}}}:
+** the prefix for CloudStack initscripts
+** defaults to $PREFIX/etc/init.d on source builds
+** defaults to /etc/init.d on package builds
+* {{{$BINDIR}}}:
+** the CloudStack installs programs there
+** defaults to $PREFIX/bin on source builds
+** defaults to /usr/bin on package builds
+* {{{$LIBEXECDIR}}}:
+** the CloudStack installs service runners there
+** defaults to $PREFIX/libexec on source builds
+** defaults to /usr/libexec on package builds (/usr/bin on Ubuntu)
+
+
+
These instructions cover the installation of the entire CloudStack.  If you want to install only specific components, be sure to modify the example commands given here sensibly, so that only the desired packages get installed
+!Fedora / CentOS
+After building packages, they will be available in the directory {{{artifacts/rpmbuild}}}.  You can install them by executing {{{rpm -ivh artifacts/rpmbuild/RPMS/*/*.rpm}}} from the source directory, as root.
+!Ubuntu
+After building packages, they will be available in {{{artifacts/debbuild}}}.  A {{{sudo dpkg -i artifacts/debbuild/*.deb}}} should suffice to install them.
+
+
+
You have three options.  Choose one:
+# [[Installing the CloudStack from the official package repositories]].<br>This is the recommended (and quickest) way to run a stable release of your CloudStack cloud.  The advantages of using this method are that these packages has been tested by (awesome) Cloud.com engineers, you can easily [[update to new CloudStack releases|Updating the CloudStack software]] later on, dependencies are taken care of automatically for you, and you can verify the integrity of the installed files using your system's package manager.
+# [[Building distribution packages]] from the source, then [[installing them|Installing distribution packages built by you]].<br>This is the recommended way to run your CloudStack cloud from unstable sources.  The advantages of using this method are that dependencies are taken care of automatically for you, and you can verify the integrity of the installed files using your system's package manager.
+# [[Building from the source and installing directly from there]].<br>This option is suitable for you if you want to develop the CloudStack or you have an unsupported distribution.  It is also the recommended way to run your CloudStack cloud if you intend to customize or reconfigure the source, if you intend to port the CloudStack to another distribution, or if you intend to run the CloudStack on a distribution for which packages are not built by us.
+
+
+
!Fedora
+!!Add the CloudStack repository to your operating system
+{{{
+cd /etc/yum.repos.d/
+wget http://download.cloud.com/foss/fedora/cloud.repo
+}}}
+!!Install the component you want
+* Management Server: {{{yum install cloud-client}}}
+* Agent: {{{yum install cloud-agent}}}
+* Console Proxy: {{{yum install cloud-console-proxy}}}
+!CentOS
+!!Add the CloudStack repository to your operating system
+{{{
+cd /etc/yum.repos.d/
+wget http://download.cloud.com/foss/centos/cloud.repo
+}}}
+!!Install the component you want
+* Management Server: {{{yum install cloud-client}}}
+* Agent: {{{yum install cloud-agent}}}
+* Console Proxy: {{{yum install cloud-console-proxy}}}
+!Ubuntu
+!!Add the CloudStack repository to your operating system
+Add the following line to {{{/etc/apt/sources.list}}}:
+{{{
+deb http://download.cloud.com/apt/ubuntu/stable/oss ./
+}}}
+!!Install the component you want
+* Management Server: {{{aptitude install cloud-client}}}
+* Agent: {{{aptitude install cloud-agent}}}
+* Console Proxy: {{{aptitude install cloud-console-proxy}}}
+
+
+
 * 
+
+
+
Copyright:
+
+    <Copyright (C) 2009-2010 Cloud.com.>
+
+License:
+
+    This program is dual-licensed.
+    
+    For the free software portions: you can redistribute it and/or modify   it under the terms of the GNU General Public License as published by     the Free Software Foundation, either version 3 of the License, or     (at your option) any later version.  These portions -- clearly marked     throughout the program sources -- are also distributed under the    Cloud.com Software License 1.1.
+    
+    For the proprietary portions: these portions are made available to you    on the terms of the Cloud.com Software License 1.1.
+
+    This package is distributed in the hope that it will be useful,    but WITHOUT ANY WARRANTY; without even the implied warranty of    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+
+    You should have received a copy of the GNU General Public License    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+
[[Welcome]]
+[[Using this wiki]]
+
+[[Getting started|Welcome]]
+[[Developer info|Hacking on the CloudStack]]
+
+[[Cloud.com|http://cloud.com/]]
+[[Our community|http://cloud.com/community]]
+[[Forums|http://cloud.com/community/forum]]
+[[Report a bug|http://bugs.cloud.com/enter_bug.cgi]]
+
+
+
If you are reading this file directly from a local copy of the source code in your machine, hmmm, well, you can skip this.
+
+Otherwise, there are two options to obtain the source code for the CloudStack:
+!Getting a stable release
+Download the latest tarball from the [[Cloud.com downloads section|http://cloud.com/community/downloads]].
+!Getting the latest, bleeding-edge code
+Use [[Git]] to clone the following URL:
+{{{
+git clone http://git.cloud.com/cloudstack-oss
+}}}
+This will create a folder called {{{cloudstack-oss}}} in your current folder.
+!Browsing the source code online
+You can browse the CloudStack source code through [[our CGit Web interface|http://git.cloud.com/cloudstack-oss]].
+
+
+
!Install the build dependencies on the machine where you will compile the CloudStack
+!!Fedora / CentOS
+The command [[waf installrpmdeps]] issued from the source tree gets it done.
+!!Ubuntu
+The command [[waf installdebdeps]] issues from the source tree gets it done.
+!!Other distributions
+See [[CloudStack build dependencies]]
+!Install the run-time dependencies on the machines where you will run the CloudStack
+See [[CloudStack run-time dependencies]].
+
+
+
Every time you run {{{./waf install}}} to deploy changed code, waf will install configuration files once again.  This can be a nuisance if you are developing the stack.
+
+There are, however, two ways to get around this.
+!Append {{{--preserve-config}}} to executions of {{{./waf install}}}
+{{{./waf install}}} has an option {{{--preserve-config}}}.  If you pass       this option when installing, configuration files are never        overwritten.
+This option is useful when you have modified source files and          you need to deploy them on a system that already has the          CloudStack installed and configured, but you do //not// want to          overwrite the existing configuration of the CloudStack.
+          
+If, however, you have reconfigured and rebuilt the source          since the last time you did ./waf install, then you are          advised to replace the configuration files and set the          components up again, because some configuration files          in the source use identifiers that may have changed during          the last {{{./waf configure}}}.  So, if this is your case, check      out the next technique.
+!Override configuration files in the source tree          
+Every configuration file can be overridden in the source          without touching the original.
+# Look for the specific config file {{{X}}} (or {{{X.in}}}) in the source, then
+# create an {{{override/}}} folder in the folder that contains {{{X}}}, then
+# place a file named {{{X}}} (or {{{X.in}}}) inside {{{override/}}}, then
+# put the desired contents inside {{{X}}} (or {{{X.in}}})
+Now, every time you run {{{./waf install}}}, the file that will be      installed is {{{path/to/override/X.in}}}, instead of {{{path/to/X.in}}}.
+
+This option is useful if you are developing the CloudStack          and constantly reinstalling it.  It guarantees that every          time you install the CloudStack, the installation will have           the correct configuration straight from your source tree and will be ready to run.
+
+
+
It is not technically possible to run the CloudStack components directly from the source tree.  That, however, is fine -- each component can be run independently from the install directory.  Here are instructions to do that, [[after you've set the desired component up|Setting the CloudStack components up]].
+!Management Server
+# Execute {{{./waf run}}} as your current user [[(more info)|waf run]].<br>Alternatively, you can use {{{./waf debug}}} and this will [[run with debugging enabled|waf debug]].
+!Agent (Linux-only):
+# Execute {{{./waf run_agent}}} [[(more info)|waf run_agent]]<br>This will implicitly  launch {{{sudo}}} and require your root password unless you have  set {{{sudo}}} up [[not to ask for it|Setting sudo up for passwordless root]] (advisable).
+!Console Proxy (Linux-only):
+# Execute {{{./waf run_console_proxy}}} [[(more info)|waf run_console_proxy]]<br>This will implicitly  launch {{{sudo}}} and require your root password unless you have  set {{{sudo}}} up [[not to ask for it|Setting sudo up for passwordless root]] (advisable).
+
+
+
When building a source distribution ([[waf dist]]), or distribution packages ([[waf deb]] / [[waf rpm]]), waf will automatically detect the relevant source code control information if the git command is present on the machine where waf is run, and it will write the information to a file called {{{sccs-info}}} inside the source tarball.  Packages and [[waf install]] will install this file into {{{$DOCDIR/sccs-info}}.
+
+If this source code control information cannot be calculated, then the old {{{sccs-info}}} file is preserved -- if it existed -- across [[dist|waf dist]] runs if it exists, and if it did not exist before, the fact that the source could not be properly tracked down to a repository is noted in the file.
+
+
+
+
#Comment option {{{Defaults requiretty}}} in {{{/etc/sudoers}}}
+# Add a line {{{yourusername:    ALL=(ALL)      NOPASSWD: ALL}}} with your username instead of {{{yourusername}}}
+''Warning'': this can be ''insecure'' as it leaves your computer open to root access to whomever touches your console or logs in as your user.
+
+
+
Agent setup requires some system configuration changes, but is fully automated for your benefit.  To set up your Agent, just run {{{$BINDIR/cloud-setup-agent}}}.  It will ask you some questions about the zone and pod it will run on, and then set it up to start on boot.
+
+The configuration files for the Agent live in {{{agent/conf}}} and get deployed to {{{$SYSCONFDIR/cloud/agent}}}.
+
+
+
!Management Server
+Set up the management server database:
+# either run {{{./waf deploydb_kvm}}} for a [[quick development-type database setup|waf deploydb]], or
+# run {{{$BINDIR/cloud-setup-databases}}} to [[set up the Management Server database in production mode|Setting the Management Server up]]
+!Agent (Linux-only)
+[[Set the Agent up|Setting the Agent up]] with {{{$BINDIR/cloud-setup-agent}}}.
+!Console Proxy (Linux-only):
+[[Set the Console Proxy up|Setting the Console Proxy up]] with {{{$BINDIR/cloud-setup-console-proxy}}}.
+
+
+
Please consult the [[Installation Guide|http://cloud.com/community/cloudstack-21-community-edition-installation-guide]] to find out how to configure each component of the CloudStack.  Refer to the [[Installation paths]] document for information on where to find the programs, initscripts and configuration files mentioned in the Installation Guide (paths may vary).
+
+
+
+
Console Proxy setup is fully automated for your benefit.  To set up your Console Proxy, just run {{{$BINDIR/cloud-setup-console-proxy}}}.  It will ask you some questions about the zone and pod it will run on, and then set it up to start on boot.
+
+The configuration files for the Console Proxy live in {{{console-proxy/conf.dom0}}} and get deployed to {{{$SYSCONFDIR/cloud/console-proxy}}}.
+
+
+
Management Server setup is fully automated for your benefit.  To set up your Management Server, just run {{{$BINDIR/cloud-setup-databases}}} with the appropriate command-line options (you can invoke {{{cloud-setup-databases}}} without parameters to get documentation on its options).  This will deploy the database and adjust the Management Server configuration files to use that database.
+
+The configuration files for the Management Server live in {{{client/tomcatconf}}} and get deployed to {{{$SYSCONFDIR/cloud/client}}}.
+
+
+
Documentation
+
+
+
[[Cloud.com|http://cloud.com/]] """CloudStack"""
+
+
+
Here, you'll discover how the source is laid out, what happens to the sources, and where they get installed in target systems.  Refer to the [[Installation paths]] and [[Token substitution of configure-time variables]] documents to find out about the tokens and paths used below.
+* {{{deps/*jar}}}
+** installed in {{{@JAVADIR@/}}}
+* {{{thirdparty/*jar}}}
+** installed in {{{@PREMIUMJAVADIR@/}}}
+* {{{scripts/*}}}
+** @token identifiers are substituted
+** auto-installed in {{{@AGENTLIBDIR@/scripts}}}
+* {{{patches/<virttech>/*}}}
+** @token identifiers are substituted
+** tarred up in a file called {{{patch-<virttech>.tgz}}} (the name is only used in the artifacts directory)
+** tarball auto-installed as {{{@AGENTLIBDIR@/scripts/vm/hypervisor/<virttech>/patch.tgz}}}
+* {{{<project>/src/**/*java}}}
+** runs through [[ant|How waf uses ant]]
+** turns into {{{cloud-<project>.jar}}}
+** installed in{{{ @JAVADIR@/}}}
+* {{{<project>/bindir/*}}}
+** if the file ends in .in, @token identifiers are substituted
+** auto-installed in {{{@BINDIR@/}}}
+* {{{<project>/libexec/*}}}
+** if the file ends in .in, @token identifiers are substituted
+** auto-installed in {{{@LIBEXECDIR@/}}}
+* {{{<project>/tomcatconf/*}}}
+** if the file ends in .in, @token identifiers are substituted
+** auto-installed in {{{@MSCONF@/}}}
+* {{{<project>/db/*}}}
+** auto-installed in {{{@SETUPDATADIR@/}}}
+* {{{<project>/<distro>/SOMEDIRECTORY}}}
+** if the file ends in .in, token identifiers are substituted
+** auto-installed only in specific {{{<distro>}}}, in {{{@SOMEDIRECTORY@/}}} (that is, the uppercase directory name is taken to be an identifier and substituted dynamically for the path that it represents).
+* {{{plugins/<project>/src/**/*java}}}
+** runs through ant
+** turns into {{{cloud-<project>.jar}}}
+** installed in {{{@PLUGINJAVADIR@/}}}
+** will automatically appear in the management server classpath at runtime
+* {{{vendor/<vendor>/tomcatconf/*}}}
+** if the file ends in .in, @token identifiers from ./waf showconfig are substituted
+** auto-installed in {{{@MSCONF@/vendor/<vendor>}}}
+** gets listed first in the management server classpath, so any files there will override files in {{{@MSCONF@}}}
+With your help, by being disciplined -- e.g. thinking about where to add a file, avoiding special cases -- and your suggestions (new classes of files?  we can install them!), we can make this source standards guide grow, with the ultimate goal of making it easier to just add files and have them magically be installed in the right places.
+
+
+
You shouldn't have to.  But:
+# To clean build products: [[waf clean]].
+# To clean the entire artifacts directory and configure-time variables: [[waf distclean]].
+# To uninstall: refer to the source section of [[Uninstalling the CloudStack]].
+
+
+
The prerelease mechanism ({{{--prerelease=BRANCHNAME}}} argument to [[./waf deb|waf deb]] or [[./waf rpm|waf rpm]]) allows developers and builders to build packages with pre-release Release tags.  The Release tags are constructed in such a way that both the build number and the branch name is included, so developers can push these packages to repositories and upgrade them using yum or aptitude without having to delete packages manually and install packages manually every time a new build is done.  Any package built with the prerelease mechanism gets a standard X.Y.Z version number -- and, due to the way that the prerelease Release tags are concocted, always upgrades any older prerelease package already present on any system.  The prerelease mechanism must never be used to create packages that are intended to be released as stable software to the general public.
+
+Relevant distribution documentation:
+*   http://www.debian.org/doc/debian-policy/ch-controlfields.html#s-f-Version
+*   http://fedoraproject.org/wiki/PackageNamingGuidelines#Pre-Release_packages
+!How the [[build number|waf build]] is treated when the prerelease mechanism is enabled
+The build number gets appended to the Release tag of the package.
+
+
+
In short:
+#Files with an extension of {{{.in}}} and files in the {{conf}}, {{tomcatconf}}, {{bindir}}, {{libexec}} and similar directories within the source have tokens {{{@SOMETOKEN@}}} automatically substituted for the corresponding [[configure variable|waf configure]].
+#Files in the {{{scripts/}}} folder have the token {{@VERSION@}} replaced by the version of the CloudStack upon installation.
+#There is a more detailed layout of how tokens are used in the document [[Source layout guide]].
+
+
+
!For installations done using distribution packages
+If you installed the CloudStack using distribution packages, use your operating system package manager to remove the CloudStack packages.  Examples follow:
+!!Fedora / CentOS
+{{{
+rpm -qa | grep ^cloud- | xargs rpm -e
+}}}
+will erase the CloudStack packages, but will not erase any modified configuration files, cache files or log files.  If you want to remove them:
+{{{
+rm -rf /var/log/cloud /etc/cloud /var/cache/cloud
+}}}
+!!Ubuntu
+{{{
+aptitude purge '~ncloud'
+}}}
+will remove all cloud packages and purge configuration files.
+{{{
+aptitude remove
+}}}
+will remove packages and leave configuration files.
+!For installations done from compiled source
+{{{
+./waf uninstall
+}}}
+issued from the source directory as root, will go through every single file that the install process installed, [[and remove it|waf uninstall]].
+
+
+
!If you installed the CloudStack using [[the official package repositories|Installing the CloudStack from the official package repositories]]
+!!Fedora
+{{{yum update}}}.
+!!CentOS
+{{{yum update}}}.
+!!Ubuntu
+{{{
+aptitude update
+aptitude safe-upgrade
+}}}
+!If you installed from the source, or packages built from the source
+#Download the latest CloudStack release
+#Follow the same procedure you used to install the CloudStack the first time
+
+
+
You can use {{{rsync}}} to very quickly deploy a tree of files into another machine.
+
+Let's assume that you have your source tree in {{{/home/joe/cloud-2.3.4}}}.  To take that tree to another machine:
+{{{
+rsync -avzx --delete /home/joe/cloud-2.3.4/ root@othermachine:/home/joe/cloud-2.3.4/
+}}}
+(assuming, of course, the existence of {{{/home/joe}}} in {{{othermachine}}})
+
+
+
This wiki is organized by bite-size topics called //tiddlers//, heavily linked between each other.  Normally, you can browse through each topic -- the topic opens up in a new section of this window -- or you can use the search mechanism at the top of the right sidebar.  The right sidebar also contains a list of all the tiddlers in this document.
+!Yes, you can edit it
+If you have a local copy of this file, you can edit it.  We encourage you to [[send us your edited file through our bug tracker|http://bugs.cloud.com/enter_bug.cgi]] so we can merge your enhancements!
+
+#Enter your username for signing your edits: <<option txtUserName>>.  This will be stored in a cookie on your browser.
+#Set your options to the right of this page.  I personally prefer to keep AutoSave enabled so I don't have to think about saving the file.  If you have not saved your changes to the wiki yet, this wiki will warn you to save when you close it.
+#Be warned that your browser may ask for authorization to save this file on disk.  It's a scary dialog box, but it's perfectly safe -- go ahead.
+#Learn the wiki [[formatting style|http://tiddlywiki.org/wiki/TiddlyWiki_Markup]].
+#Double-click on the text of a tiddler to modify it.  Hit //done// at the top of the tiddler once you're done.
+#Add your tiddlers:
+##To do so, create a link {{{[[Your article name|Hyperlink title]]}}}in an existing tiddler, save it, and then click the italicized link.
+##As a principle: //keep the tiddlers bite-sized and rely heavily on hyperlinking to make them useful to the readers!//  This practice also makes merging the file into the source code that much easier and reduces the opportunity for merge conflicts.
+##Don't repeat yourself!  If you foresee that a particular chunk of text will be reused in different contexts, put it in a //separate// tiddler and link to that tiddler from the appropriate places.
+##The All and More tabs in the right sidebar lets you find tiddler names to link to, and also orphan tiddlers (tiddlers that nobody links to) or missing tiddlers (nonexistent tiddlers that are linked somewhere).
+#Fundamental tiddlers in this wiki:
+##[[SiteTitle]] & [[SiteSubtitle]]: The title and subtitle of the site, as shown above (after saving, they will also appear in the browser title bar)
+##[[MainMenu]]: The menu (usually on the left)
+##[[DefaultTiddlers]]: Contains the names of the tiddlers that you want to appear when the TiddlyWiki is opened
+
+
+
+
waf supports an intelligent cache mechanism (disabled by default). waf can resurrect compiled code if the source files that produced it have not changed.  waf will use advanced checksums to prevent stale object files from being resurrected.
+
+To activate it, just set an environment variable {{{WAFCACHE}}} to point to a directory (that you must create) where waf will store the result of compiled files.
+
+
+
Hello, and thanks for your interest in the [[Cloud.com|http://cloud.com/]] CloudStack™!  The Cloud.com CloudStack™ is Open Source Software that allows organizations to build Infrastructure as a Service (Iaas) clouds.  Working with server, storage, and networking equipment of your choice, the CloudStack provides a turn-key software stack that dramatically simplifies the process of deploying and managing a cloud.
+!Get started with the CloudStack right now
+We've prepared comprehensive instructions to help you.
+# First, review the [[Installation Guide|http://cloud.com/community/cloudstack-21-community-edition-installation-guide]] sections titled //Overview// and //Prerequisites//.
+#Then, [[install the CloudStack components you want|Installing the CloudStack]] on your target machines.
+# After you're done installing, see [[Setting the CloudStack up]] to configure and run it.
+And, in the unlikely case you would want to uninstall the CloudStack, the document [[Uninstalling the CloudStack]] has you covered.
+!Be part of the [[Cloud.com community|http://cloud.com/community/]]!
+We are more than happy to have you ask us questions, hack our source code, and receive your contributions.
+* To get started developing with the CloudStack, please read [[Hacking on the CloudStack]] for more information.
+* If you are reading this from a copy of the source code in your machine, [[you can edit this document too|Using this wiki]], then share your changes with us.
+* Come to  [[our forums|http://cloud.com/community/forum]], [[sign up|http://cloud.com/community/user/register]] as a member, and post there.  We engineers lurk there to answer your questions.
+* If you find bugs, please [[register|http://bugs.cloud.com/createaccount.cgi]] on [[our bug tracker|http://bugs.cloud.com/]] and [[file a report|http://bugs.cloud.com/enter_bug.cgi]].
+* If you have patches or files to send us get in touch with us at [[info@cloud.com|mailto:info@cloud.com]] or file them as attachments in our bug tracker above.
+!Contact us!
+Cloud.com's contact information is:
+>20400 Stevens Creek Blvd
+>Suite 390
+>Cupertino, CA 95014
+>Tel: +1 (888) 384-0962
+!Legal information
+//Unless otherwise specified// by Cloud.com, Inc., or in the sources themselves, [[this software is OSI certified Open Source Software distributed under the GNU General Public License, version 3|License statement]].  OSI Certified is a certification mark of the Open Source Initiative.  The software powering this documentation is """BSD-licensed""" and obtained from [[TiddlyWiki.com|http://tiddlywiki.com/]].
+
+
+
This is the typical lifecycle that you would follow when hacking on a CloudStack component, assuming that your [[development environment has been set up|Preparing your development environment]]:
+# [[Configure|waf configure]] the source code<br>{{{./waf configure --prefix=/home/youruser/cloudstack}}}
+# [[Build|waf build]] and [[install|waf install]] the CloudStack
+## {{{./waf install}}}
+## [[How to perform these tasks from Eclipse|How to integrate with Eclipse]]
+# [[Set the CloudStack component up|Setting the CloudStack components up]]
+# [[Run the CloudStack component|Running a CloudStack component from source]]
+# Hack on the code
+# Build and install the CloudStack again, [[preserving your existing configuration|Preserving the CloudStack configuration across source reinstalls]]<br>{{{./waf install --preserve-config}}}
+#{{{GOTO 4}}}
+
+
+
See [[Setting sudo up for passwordless root]].
+
+
+
[[waf|http://code.google.com/p/waf/]] is a self-contained, advanced build system written by Thomas Nagy, in the spirit of SCons or the GNU autotools suite.
+* To run waf on Linux / Mac: {{{./waf [...commands...]}}}
+* To run waf on Windows:     {{{waf.bat [...commands...]}}}
+{{{./waf --help}}} should be your first discovery point to find out both the configure-time options and the different processes that you can run using waf.
+!Where to learn more about waf
+* The first and foremost reference material: [[the waf book|http://freehackers.org/~tnagy/wafbook/index.html]].
+* http://code.google.com/p/waf/wiki/CodeSnippets
+* http://code.google.com/p/waf/w/list
+* http://code.google.com/p/waf/wiki/FAQ
+!Why does the CloudStack use waf?
+The CloudStack uses waf to build itself.  waf is a relative newcomer to the build system world; it borrows concepts from SCons and other later-generation build systems:
+# waf is very flexible and rich; unlike other build systems, it covers  the entire life cycle, from compilation to installation to  uninstallation.  it also supports [[dist|waf dist]] (create source tarball),  distcheck (check that the source tarball compiles and installs),  autoconf-like checks for dependencies at compilation time,  and more.  With waf, there is no need to maintain fragile shell scripts to do these tasks -- all of these tasks are already built-in and configurable in a descriptive rather than imperative fashion.
+# waf is self-contained.  A single file, distributed with the project,  enables everything to be built, with only a dependency on Python or Jython,  both of which are freely available and shipped in all Linux computers.
+# waf also supports building projects written in multiple languages   (in the case of the CloudStack, we build from C, Java and Python).
+# Since waf is written in Python, the entire library of the Python  language is available to use in the build process.
+!What happens when waf runs?
+When you run waf, this happens behind the scenes:
+# When you run waf for the first time, it unpacks itself to a hidden  directory {{{.waf-1.X.Y.MD5SUM}}}, including the main program and all the Python libraries it provides and needs.
+# Immediately after unpacking itself, waf reads the {{{wscript}}} file  at the root of the source directory.  After parsing this file and loading the functions defined here, it reads {{{wscript_build}} and {{{wscript_configure}} (from which it builds a {{{build()}}} and a {{{configure()}}} function dynamically) 
+# After loading the build scripts as explained above, waf calls  the functions you specified in the command line as commands
+So, for example, {{{./waf configure build install}}} will:
+* call [[configure()|waf configure]] from {{{wscript_configure}}},
+* call [[build()|waf build]] loaded from the contents of {{{wscript_build}}},
+* call [[build()|waf build]] once more but with {{{Options.is_install = True}}}.
+As part of build(), waf invokes ant to build the Java portion of our stack.
+!If you have waf, why are there ant project files in my source tree?
+See [[How waf uses ant]].
+
+
+
{{{
+./waf build
+}}}
+!What does this do?
+This compiles every file that requires compilation or substitution.  The artifacts of this compilation end up, unless otherwise specified, in {{{artifacts/default}}}
+!When / why should I run this?
+You run this command once after you've [[configured the source|waf configure]], and to trigger compilation of any modifications you have made.  However, if you plan on installing your modifications, you can just run [[waf install]] directly -- install implicitly builds.
+!How does this work?
+This runs the contents of {{{wscript_build}}}, which takes care of discovering and describing what needs to be built, which build products / sources need to be installed, and where they should be installed.  In detail:
+# It compiles source code to object code, calling the C compiler, Python compiler, and [[invoking several ant targets|How waf uses ant]].
+# It [[substitutes configure-time variables|Token substitution of configure-time variables]] in the files that require token substitution.
+# It creates packages for those components that require packaging.
+# It creates the [[SCCS info]] file that you can use to track the provenance of an installation.
+!Viewing a progress bar when building
+Append the {{{--progress}}} option to {{{./waf build}}}.
+!Accelerating builds
+See [[WAFCACHE]].
+
+The commands [[waf deb]] and [[waf rpm]] take advantage of the waf cache.
+!Debugging the build process
+#Almost everything that gets built has a target name. [[waf list_targets]] will list the target names.
+#{{{./waf build -vvvvv}}} will give you //a lot of debugging information// on what waf is doing.
+!Specifying a build number
+Normally, the build number is auto-detected from the [[source code control system|SCCS info]].  You can override it by passing the parameter {{{--build-number}}} when building.  The build number is used internally in the JAR files to construct the {{{Implementation-Version}}} property of the metafile manifest included in the JAR file.
+
+The commands [[waf deb]] and [[waf rpm]] also support the {{{--build-number}}} build-time option.
+
+
+
{{{
+./waf clean
+}}}
+Makes an inventory of all build products in {{{artifacts/default}}}, and removes them.
+
+Contrast to [[waf distclean]].
+
+
+
{{{
+./waf configure --prefix=/directory/that/you/have/write/permission/to
+}}}
+!What does this do?
+This runs the file {{{wscript_configure}}}, which takes care of setting the  variables and options that waf will use for compilation and installation, including the installation directory|Installation paths {{{PREFIX}}} and many other installation paths.  Some of these variables refer to the [[paths where waf will install|Installation paths]] different types of files; some other variables refer to defaults or values that will get [[compiled / substituted in the object code|Token substitution of configure-time variables]].  Some of these settings are //auto-detected// based on the platform you're compiling the code on.
+!When / why should I run this?
+You run this command //once//, in preparation to building the stack, or every time you need to change a configure-time variable.  Once you find an acceptable set of configure-time variables, you should not need to run {{{configure}}} again.
+!What happens if I don't run it?
+For convenience reasons, if you forget to configure the source, waf will autoconfigure itself and select some sensible default configuration options.  By default, {{{PREFIX}}} is {{{/usr/local}}}, but you can set it e.g. to  {{{/home/youruser/cloudstack}}} if you plan to do a non-root install.  Be ware that you can later install the stack as a regular user, but most components need to //run// as root.
+!What variables / options exist for configure?
+In general: refer to the output of {{{./waf configure --help}}}.
+
+Specific, useful options:
+* {{{--no-dep-check}}}: will skip dependency checks for java packages needed to compile (saves 20 seconds when redoing the configure)
+* {{{--with-db-user}}}, {{{--with-db-pw}}}, {{{--with-db-host}}}: informs the build system of the """MySQL""" configuration needed to be able to run [[waf deploydb]]
+After configuration, the configure variables will be available inside the file {{{artifacts/c4che/build.default.py}}} and you will be able to list them with [[waf showconfig]].
+
+
+
{{{
+./waf deb
+}}}
+[[builds Debian packages|Building distribution packages]] of the CloudStack.
+
+This command supports [[the prerelease mechanism|The prerelease mechanism]] and [[custom build numbers|waf build]].  It also supports [[accelerated builds|WAFCACHE]], so if you've already compiled your source tree once with the waf cache enabled, creating packages will be orders of magnitude faster.
+
+
+
See [[waf run]].
+
+
+
{{{
+./waf deploydb_kvm
+}}}
+!What does this do?
+This command runs the function {{{deploydb}}} in {{{wscript}}}, with a paramenter {{{"kvm"}}}.  The function uses the MySQL client to recreate / deploy the database schema and default initialized data for the Cloud.com Management Server.  The credentials and database host this command will use are the ones specified at [[configure time|waf configure]] with the options {{{--with-db-user}}}, {{{--with-db-pw}} and {{{--with-db-host}}}.
+
+''Warning'': This function will //destroy// any existing Cloud.com databases on the target host.
+
+
+
{{{
+./waf dist
+}}}
+Creates a source distribution (bzip2-compressed tarball) of the CloudStack in the source directory.  [[SCCS info]] is automatically determined by {{{dist}}} and included in the tarball.
+
+
+
{{{
+./waf distclean
+}}}
+Completely nukes the {{{artifacts}}} directory, thereby eliminating all build products and [[waf configuration|waf configure]].
+
+Contrast to [[waf clean]].
+
+
+
{{{
+./waf install
+}}}
+!When / why should I run this?
+You run this command when you want to install the CloudStack to the directories specified in the [[configuration|waf configure]].
+
+''Warning'': each time you do {{{./waf install}}}, the configuration files  in the installation directory are ''overwritten''.  Consult the document [[Preserving the CloudStack configuration across source reinstalls]] for techniques to prevent this.
+
+If you are going to install for production, //you should run this  process as root//; that guarantees that the proper permissions and file ownerships are set on certain secure files.   If, conversely, you only want to install the    stack as your own user and in a directory that you have write   permission, it's fine to run {{{./waf install}}} as your own user.
+!What does this do?
+This runs the contents of {{{wscript_build}}}, with an option variable  {{{Options.is_install = True}}}.  When this variable is set, waf will, in addition to compiling whatever needs compilation, install the files described in {{{wscript_build}}}.
+!{{{--destdir}}}
+When installing, you may specify the {{{--destdir=/example/dir}}} option.  This will cause the installation of files to be performed relative to the {{{/example/dir}}} specified as argument to the option.  By way of example, if waf would install the file {{{/usr/bin/cloud-setup-databases}}}, the file would actually be installed in {{{/example/dir/usr/bin/cloud-setup-databases}}}; and so on for all installed files.
+
+This option is implicitly used in the packaging commands [[waf deb]] and [[waf rpm]].
+
+
+
+
+
{{{
+./waf installdebdeps
+}}}
+Parses the build dependency list for DEB packaging and uses aptitude (through [[sudo]]) to install them on your system.
+{{{
+./waf viewdebdeps
+}}}
+Shows a [[list of dependencies|waf viewdebdeps]].
+
+
+
{{{
+./waf installrpmdeps
+}}}
+Parses the build dependency list for RPM packaging and uses yum (through [[sudo]]) to install them on your system.
+{{{
+./waf viewrpmdeps
+}}}
+Shows a [[list of dependencies|waf viewrpmdeps]].
+
+
+
{{{
+./waf list_targets
+}}}
+prints out a list of all known build targets.
+
+You can then run
+{{{
+./waf build --targets=targetname
+}}}
+to build only that specific {{{targetname}}}.
+
+
+
{{{
+./waf rpm
+}}}
+[[builds Fedora or CentOS packages|Building distribution packages]] of the CloudStack.
+
+This command supports [[the prerelease mechanism|The prerelease mechanism]] and [[custom build numbers|waf build]].  It also supports [[accelerated builds|WAFCACHE]], so if you've already compiled your source tree once with the waf cache enabled, creating packages will be orders of magnitude faster.
+
+
+
{{{
+./waf run
+}}}
+!What does this do?
+This command starts the Management Server.
+
+More specifically, what it does is create an environment for the Apache Tomcat server, pre-configured during {{{./waf install}}}, and then start it in the foreground, to run the Management Server servlets and components.  When the Tomcat server starts, the management server is running and you can visit it by opening http://localhost:8080/
+
+Once it's running, you can log on as administrator with the user combination {{{admin}}} / {{{password}}}
+!Run with debugging enabled
+If you are developing the CloudStack, it is useful to be able to latch onto the Management Server process with your debugger.  To do that:
+{{{
+./waf debug
+}}}
+If you prefer the process to be stopped during startup and to have it continue only when your debugger has attached itself:
+{{{
+./waf debug --debug-suspend
+}}}
+
+
+
{{{
+./waf run_agent
+}}}
+Runs the Agent as root by using {{{sudo}}} to invoke it.
+
+
+
{{{
+./waf run_console_proxy
+}}}
+Runs the Console Proxy as root by using {{{sudo}}} to invoke it.
+
+
+
{{{
+./waf showconfig
+}}}
+Displays a summary of the values for each configure-time variable.
+
+
+
{{{
+./waf uninstall
+}}}
+Makes a listing of all the files and directories that would be installed, then removes them from the installation directory.
+
+This command supports the {{{--destdir}}} option as documented in [[waf install]].
+
+
+
See [[waf installdebdeps]].
+
+
+
See [[waf installrpmdeps]].
+
+
+ + + + + + + + + + + + + diff --git a/agent/libexec/agent-runner.in b/agent/libexec/agent-runner.in index fd2819b84f9..8983aeda738 100755 --- a/agent/libexec/agent-runner.in +++ b/agent/libexec/agent-runner.in @@ -33,7 +33,7 @@ for x in private public ; do defaultroute=`ip route | grep ^default | cut -d ' ' -f 5` test -n "$defaultroute" echo "Using auto-discovered network device $defaultroute which is the default route" - SERVICEARGS="$SERVICEARGS -D$x.network.device="$defaultroute + SERVICEARGS="$SERVICEARGS $x.network.device="$defaultroute fi done @@ -52,7 +52,7 @@ function termagent() { trap termagent TERM while true ; do - java -Xms128M -Xmx384M -cp "$CLASSPATH" $SERVICEARGS "$@" com.cloud.agent.AgentShell & + java -Xms128M -Xmx384M -cp "$CLASSPATH" "$@" com.cloud.agent.AgentShell $SERVICEARGS & agentpid=$! echo "Agent started. PID: $!" >&2 wait $agentpid diff --git a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java index 0421dcfbeb3..915b9ee2c26 100644 --- a/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java +++ b/agent/src/com/cloud/agent/resource/computing/LibvirtComputingResource.java @@ -114,6 +114,7 @@ import com.cloud.agent.api.ReadyAnswer; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.RebootAnswer; import com.cloud.agent.api.RebootCommand; +import com.cloud.agent.api.RebootRouterCommand; import com.cloud.agent.api.StartAnswer; import com.cloud.agent.api.StartCommand; import com.cloud.agent.api.StartConsoleProxyAnswer; @@ -852,8 +853,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - if (isDirectAttachedNetwork(router.getVlanId())) - default_network_rules_for_systemvm(vmName); + /*if (isDirectAttachedNetwork(router.getVlanId())) + default_network_rules_for_systemvm(vmName);*/ } catch (LibvirtException e) { if (nics != null) { cleanupVMNetworks(nics); @@ -1069,6 +1070,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv return execute((StopCommand)cmd); } else if (cmd instanceof GetVmStatsCommand) { return execute((GetVmStatsCommand)cmd); + } else if (cmd instanceof RebootRouterCommand) { + return execute((RebootRouterCommand)cmd); } else if (cmd instanceof RebootCommand) { return execute((RebootCommand)cmd); } else if (cmd instanceof GetHostStatsCommand) { @@ -1174,7 +1177,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (primaryPool == null) { String result = "Failed to get primary pool"; s_logger.debug(result); - new CreateAnswer(cmd, result); + return new CreateAnswer(cmd, result); } if (cmd.getTemplateUrl() != null) { @@ -1182,7 +1185,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (tmplVol == null) { String result = "Failed to get tmpl vol"; s_logger.debug(result); - new CreateAnswer(cmd, result); + return new CreateAnswer(cmd, result); } LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(UUID.randomUUID().toString(), tmplVol.getInfo().capacity, volFormat.QCOW2, tmplVol.getPath(), volFormat.QCOW2); @@ -1269,6 +1272,14 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv s_logger.debug("Failed to backup snaptshot: " + result); return new BackupSnapshotAnswer(cmd, false, result, null); } + /*Delete the snapshot on primary*/ + Domain vm = getDomain(cmd.getVmName()); + String vmUuid = vm.getUUIDString(); + Object[] args = new Object[] {snapshotName, vmUuid}; + String snapshot = SnapshotXML.format(args); + s_logger.debug(snapshot); + DomainSnapshot snap = vm.snapshotLookupByName(snapshotName); + snap.delete(0); } catch (LibvirtException e) { return new BackupSnapshotAnswer(cmd, false, e.toString(), null); } catch (URISyntaxException e) { @@ -1278,10 +1289,45 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } protected DeleteSnapshotBackupAnswer execute(final DeleteSnapshotBackupCommand cmd) { + Long dcId = cmd.getDataCenterId(); + Long accountId = cmd.getAccountId(); + Long volumeId = cmd.getVolumeId(); + try { + StoragePool secondaryStoragePool = getNfsSPbyURI(_conn, new URI(cmd.getSecondaryStoragePoolURL())); + String ssPmountPath = _mountPoint + File.separator + secondaryStoragePool.getUUIDString(); + String snapshotDestPath = ssPmountPath + File.separator + dcId + File.separator + "snapshots" + File.separator + accountId + File.separator + volumeId; + + final Script command = new Script(_manageSnapshotPath, _timeout, s_logger); + command.add("-d", snapshotDestPath); + command.add("-n", cmd.getSnapshotName()); + + command.execute(); + } catch (LibvirtException e) { + return new DeleteSnapshotBackupAnswer(cmd, false, e.toString()); + } catch (URISyntaxException e) { + return new DeleteSnapshotBackupAnswer(cmd, false, e.toString()); + } return new DeleteSnapshotBackupAnswer(cmd, true, null); } protected Answer execute(DeleteSnapshotsDirCommand cmd) { + Long dcId = cmd.getDataCenterId(); + Long accountId = cmd.getAccountId(); + Long volumeId = cmd.getVolumeId(); + try { + StoragePool secondaryStoragePool = getNfsSPbyURI(_conn, new URI(cmd.getSecondaryStoragePoolURL())); + String ssPmountPath = _mountPoint + File.separator + secondaryStoragePool.getUUIDString(); + String snapshotDestPath = ssPmountPath + File.separator + dcId + File.separator + "snapshots" + File.separator + accountId + File.separator + volumeId; + + final Script command = new Script(_manageSnapshotPath, _timeout, s_logger); + command.add("-d", snapshotDestPath); + command.add("-n", cmd.getSnapshotName()); + command.execute(); + } catch (LibvirtException e) { + return new Answer(cmd, false, e.toString()); + } catch (URISyntaxException e) { + return new Answer(cmd, false, e.toString()); + } return new Answer(cmd, true, null); } @@ -1475,7 +1521,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (secondaryPool == null) { return new Answer(cmd, false, " Failed to create storage pool"); } - tmplVol = secondaryPool.storageVolLookupByName(tmpltname); + tmplVol = getVolume(secondaryPool, getPathOfStoragePool(secondaryPool) + tmpltname); if (tmplVol == null) { return new Answer(cmd, false, " Can't find volume"); } @@ -1489,6 +1535,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv if (primaryVol == null) { return new Answer(cmd, false, " Can't create storage volume on storage pool"); } + StorageVolInfo priVolInfo = primaryVol.getInfo(); DownloadAnswer answer = new DownloadAnswer(null, 100, @@ -1520,6 +1567,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } if (secondaryPool != null) { + secondaryPool.destroy(); + secondaryPool.undefine(); secondaryPool.free(); } } catch (LibvirtException l) { @@ -1808,12 +1857,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } private Answer execute(CheckVirtualMachineCommand cmd) { - if (VirtualMachineName.isValidRouterName(cmd.getVmName()) || VirtualMachineName.isValidConsoleProxyName(cmd.getVmName()) ) { - /*For domr, the trick is that the actual vmname is vmName-domrId. - *Here, we need to build the relationship between vmName and its actual name at first*/ - getAllVms(); - } - final State state = getVmState(cmd.getVmName()); Integer vncPort = null; if (state == State.Running) { @@ -2024,6 +2067,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } } + + protected Answer execute(RebootRouterCommand cmd) { + RebootAnswer answer = (RebootAnswer) execute((RebootCommand) cmd); + String result = _virtRouterResource.connect(cmd.getPrivateIpAddress()); + if (result == null) { + return answer; + } else { + return new Answer(cmd, false, result); + } + } protected GetVmStatsAnswer execute(GetVmStatsCommand cmd) { List vmNames = cmd.getVmNames(); @@ -2059,8 +2112,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv _vms.put(vmName, State.Stopping); } try { - if (isDirectAttachedNetwork(cmd.getVnet())) - destroy_network_rules_for_vm(vmName); + /*if (isDirectAttachedNetwork(cmd.getVnet())) + destroy_network_rules_for_vm(vmName);*/ String result = stopVM(vmName, defineOps.UNDEFINE_VM); answer = new StopAnswer(cmd, null, port, bytesSent, bytesReceived); @@ -2233,8 +2286,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } } - if (isDirectAttachedNetwork(cmd.getGuestNetworkId())) - default_network_rules(cmd.getVmName(), cmd.getGuestIpAddress()); + /*if (isDirectAttachedNetwork(cmd.getGuestNetworkId())) + default_network_rules(cmd.getVmName(), cmd.getGuestIpAddress());*/ return null; } catch(LibvirtException e) { @@ -3115,7 +3168,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disks.add(hda); diskDef hdb = new diskDef(); - hdb.defFileBasedDisk(datadiskPath, "vdb", diskDef.diskBus.IDE, diskDef.diskFmtType.QCOW2); + hdb.defFileBasedDisk(datadiskPath, "hdb", diskDef.diskBus.IDE, diskDef.diskFmtType.RAW); disks.add(hdb); return disks; @@ -3163,12 +3216,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv disks.add(hdb); } - if (isoPath != null) { - diskDef hdc = new diskDef(); - hdc.defFileBasedDisk(isoPath, "hdc", diskDef.diskBus.IDE, diskDef.diskFmtType.RAW); - hdc.setDeviceType(diskDef.deviceType.CDROM); - disks.add(hdc); - } + /*Add a placeholder for iso, even if there is no iso attached*/ + diskDef hdc = new diskDef(); + hdc.defFileBasedDisk(isoPath, "hdc", diskDef.diskBus.IDE, diskDef.diskFmtType.RAW); + hdc.setDeviceType(diskDef.deviceType.CDROM); + disks.add(hdc); + return disks; } @@ -3492,4 +3545,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv } return vol; } + private String getPathOfStoragePool(StoragePool pool) throws LibvirtException { + return _mountPoint + File.separator + pool.getUUIDString() + File.separator; + } } diff --git a/core/src/com/cloud/hypervisor/Hypervisor.java b/api/src/com/cloud/hypervisor/Hypervisor.java similarity index 100% rename from core/src/com/cloud/hypervisor/Hypervisor.java rename to api/src/com/cloud/hypervisor/Hypervisor.java diff --git a/core/src/com/cloud/vm/VmCharacteristics.java b/api/src/com/cloud/vm/VmCharacteristics.java similarity index 56% rename from core/src/com/cloud/vm/VmCharacteristics.java rename to api/src/com/cloud/vm/VmCharacteristics.java index 07e88329e21..6df26121c30 100644 --- a/core/src/com/cloud/vm/VmCharacteristics.java +++ b/api/src/com/cloud/vm/VmCharacteristics.java @@ -1,7 +1,7 @@ /** * Copyright (C) 2010 Cloud.com, Inc. All rights reserved. * - * This software is licensed under the GNU General Public License v3 or later. + * This software is licensed under the GNU General Public License v3 or later. * * It is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,15 +17,18 @@ */ package com.cloud.vm; +import java.util.Map; + +import com.cloud.hypervisor.Hypervisor; + public class VmCharacteristics { + int core; + int speed; // in mhz + long ram; // in bytes + Hypervisor.Type hypervisorType; VirtualMachine.Type type; - int cpus; // -1 means, take everything. - int speed; // In megahertz - long ram; // in bytes - protected VmCharacteristics() { - - } + Map params; public VmCharacteristics(VirtualMachine.Type type) { this.type = type; @@ -34,4 +37,32 @@ public class VmCharacteristics { public VirtualMachine.Type getType() { return type; } -} + + + public VmCharacteristics() { + } + + public int getCores() { + return core; + } + + public int getSpeed() { + return speed; + } + + public long getRam() { + return ram; + } + + public Hypervisor.Type getHypervisorType() { + return hypervisorType; + } + + public VmCharacteristics(int core, int speed, long ram, Hypervisor.Type type, Map params) { + this.core = core; + this.speed = speed; + this.ram = ram; + this.hypervisorType = type; + this.params = params; + } +} \ No newline at end of file diff --git a/build.xml b/build.xml index a676328389a..f03c94b4734 100755 --- a/build.xml +++ b/build.xml @@ -10,15 +10,7 @@ - - - - - - - - - + @@ -29,57 +21,57 @@ - + - + - + - + - + - + - + - + - + - + - - - - - - + + + + + + diff --git a/build/build-cloud.xml b/build/build-cloud.xml index 416ab36ee1b..fde4bb491f6 100755 --- a/build/build-cloud.xml +++ b/build/build-cloud.xml @@ -60,9 +60,7 @@ - - - + @@ -201,8 +199,7 @@ - - + @@ -223,6 +220,7 @@ + @@ -236,6 +234,7 @@ + diff --git a/build/package.xml b/build/package.xml index 4d15dd2021a..2476eea3bbe 100755 --- a/build/package.xml +++ b/build/package.xml @@ -132,7 +132,11 @@ + + + + @@ -232,8 +236,7 @@ - - + diff --git a/cloud.spec b/cloud.spec index 00065cc3583..7f8ee77a47a 100644 --- a/cloud.spec +++ b/cloud.spec @@ -237,6 +237,7 @@ Requires: augeas >= 0.7.1 Requires: rsync Requires: /bin/egrep Requires: /sbin/ip +Requires: vconfig Group: System Environment/Libraries %description agent The Cloud.com agent is in charge of managing shared computing resources in @@ -450,14 +451,18 @@ fi %doc %{_docdir}/%{name}-%{version}/version-info %doc %{_docdir}/%{name}-%{version}/configure-info %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files client-ui %defattr(0644,root,root,0755) %{_datadir}/%{name}/management/webapps/client/* %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files server @@ -465,7 +470,9 @@ fi %{_javadir}/%{name}-server.jar %{_sysconfdir}/%{name}/server/* %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %if %{_premium} @@ -475,7 +482,9 @@ fi %{_libdir}/%{name}/agent/scripts/* %{_libdir}/%{name}/agent/vms/systemvm.zip %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %else @@ -485,19 +494,23 @@ fi %{_libdir}/%{name}/agent/scripts/installer/* %{_libdir}/%{name}/agent/scripts/network/domr/*.sh %{_libdir}/%{name}/agent/scripts/storage/*.sh +%{_libdir}/%{name}/agent/scripts/storage/zfs/* %{_libdir}/%{name}/agent/scripts/storage/qcow2/* %{_libdir}/%{name}/agent/scripts/storage/secondary/* %{_libdir}/%{name}/agent/scripts/util/* %{_libdir}/%{name}/agent/scripts/vm/*.sh %{_libdir}/%{name}/agent/scripts/vm/storage/nfs/* +%{_libdir}/%{name}/agent/scripts/vm/storage/iscsi/* %{_libdir}/%{name}/agent/scripts/vm/network/* %{_libdir}/%{name}/agent/scripts/vm/hypervisor/*.sh %{_libdir}/%{name}/agent/scripts/vm/hypervisor/kvm/* +%{_libdir}/%{name}/agent/scripts/vm/hypervisor/xen/* %{_libdir}/%{name}/agent/vms/systemvm.zip %{_libdir}/%{name}/agent/scripts/vm/hypervisor/xenserver/* -%{_libdir}/%{name}/agent/vms/systemvm-premium.zip %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %endif @@ -506,7 +519,9 @@ fi %defattr(-,root,root,-) %attr(755,root,root) %{_bindir}/%{name}-daemonize %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files deps @@ -529,13 +544,16 @@ fi %{_javadir}/%{name}-xmlrpc-common-3.*.jar %{_javadir}/%{name}-xmlrpc-client-3.*.jar %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files core %defattr(0644,root,root,0755) %{_javadir}/%{name}-core.jar %doc README +%doc INSTALL %doc HACKING %doc debian/copyright @@ -545,14 +563,18 @@ fi %attr(0755,root,root) %{_sbindir}/%{name}-vn %attr(0755,root,root) %{_initrddir}/%{name}-vnetd %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files python %defattr(0644,root,root,0755) %{_prefix}/lib*/python*/site-packages/%{name}* %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files setup @@ -572,7 +594,9 @@ fi %{_datadir}/%{name}/setup/postprocess-20to21.sql %{_datadir}/%{name}/setup/schema-20to21.sql %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files client @@ -614,13 +638,16 @@ fi %dir %attr(770,root,%{name}) %{_localstatedir}/log/%{name}/management %dir %attr(770,root,%{name}) %{_localstatedir}/log/%{name}/agent %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files agent-libs %defattr(0644,root,root,0755) %{_javadir}/%{name}-agent.jar %doc README +%doc INSTALL %doc HACKING %doc debian/copyright @@ -639,7 +666,9 @@ fi %attr(0755,root,root) %{_bindir}/%{name}-setup-agent %dir %attr(770,root,root) %{_localstatedir}/log/%{name}/agent %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files console-proxy @@ -654,7 +683,9 @@ fi %attr(0755,root,root) %{_bindir}/%{name}-setup-console-proxy %dir %attr(770,root,root) %{_localstatedir}/log/%{name}/console-proxy %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %if %{_premium} @@ -667,14 +698,18 @@ fi %{_libdir}/%{name}/test/* %{_sysconfdir}/%{name}/test/* %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files premium-deps %defattr(0644,root,root,0755) %{_javadir}/%{name}-premium/*.jar %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files premium @@ -688,7 +723,9 @@ fi %{_datadir}/%{name}/setup/create-database-premium.sql %{_datadir}/%{name}/setup/create-schema-premium.sql %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %files usage @@ -701,7 +738,9 @@ fi %config(noreplace) %{_sysconfdir}/%{name}/usage/log4j-%{name}_usage.xml %config(noreplace) %attr(640,root,%{name}) %{_sysconfdir}/%{name}/usage/db.properties %doc README +%doc INSTALL %doc HACKING +%doc README.html %doc debian/copyright %endif diff --git a/console-proxy/libexec/console-proxy-runner.in b/console-proxy/libexec/console-proxy-runner.in index 8ff9987b795..4c01f7070d9 100755 --- a/console-proxy/libexec/console-proxy-runner.in +++ b/console-proxy/libexec/console-proxy-runner.in @@ -33,7 +33,7 @@ for x in private public ; do defaultroute=`ip route | grep ^default | cut -d ' ' -f 5` test -n "$defaultroute" echo "Using auto-discovered network device $defaultroute which is the default route" - SERVICEARGS="$SERVICEARGS -D$x.network.device="$defaultroute + SERVICEARGS="$SERVICEARGS $x.network.device="$defaultroute fi done @@ -52,7 +52,7 @@ function termagent() { trap termagent TERM while true ; do - java -Xms128M -Xmx384M -cp "$CLASSPATH" $SERVICEARGS "$@" com.cloud.agent.AgentShell & + java -Xms128M -Xmx384M -cp "$CLASSPATH" "$@" com.cloud.agent.AgentShell $SERVICEARGS & agentpid=$! echo "Console Proxy started. PID: $!" >&2 wait $agentpid diff --git a/core/.classpath b/core/.classpath index d6c04820681..426e2e6c1b0 100644 --- a/core/.classpath +++ b/core/.classpath @@ -18,5 +18,26 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/core/src/com/cloud/agent/api/BackupSnapshotCommand.java b/core/src/com/cloud/agent/api/BackupSnapshotCommand.java index edf68518eb2..ccf45d71115 100644 --- a/core/src/com/cloud/agent/api/BackupSnapshotCommand.java +++ b/core/src/com/cloud/agent/api/BackupSnapshotCommand.java @@ -30,6 +30,7 @@ public class BackupSnapshotCommand extends SnapshotCommand { private boolean isFirstSnapshotOfRootVolume; private boolean isVolumeInactive; private String firstBackupUuid; + private String vmName; protected BackupSnapshotCommand() { @@ -56,7 +57,8 @@ public class BackupSnapshotCommand extends SnapshotCommand { String prevBackupUuid, String firstBackupUuid, boolean isFirstSnapshotOfRootVolume, - boolean isVolumeInactive) + boolean isVolumeInactive, + String vmName) { super(primaryStoragePoolNameLabel, secondaryStoragePoolURL, snapshotUuid, snapshotName, dcId, accountId, volumeId); this.prevSnapshotUuid = prevSnapshotUuid; @@ -64,6 +66,7 @@ public class BackupSnapshotCommand extends SnapshotCommand { this.firstBackupUuid = firstBackupUuid; this.isFirstSnapshotOfRootVolume = isFirstSnapshotOfRootVolume; this.isVolumeInactive = isVolumeInactive; + this.vmName = vmName; } public String getPrevSnapshotUuid() { @@ -86,4 +89,7 @@ public class BackupSnapshotCommand extends SnapshotCommand { return isVolumeInactive; } + public String getVmName() { + return vmName; + } } \ No newline at end of file diff --git a/core/src/com/cloud/agent/api/storage/CreateCommand.java b/core/src/com/cloud/agent/api/storage/CreateCommand.java index b3ff5f6c312..2c0ade671a9 100644 --- a/core/src/com/cloud/agent/api/storage/CreateCommand.java +++ b/core/src/com/cloud/agent/api/storage/CreateCommand.java @@ -29,6 +29,7 @@ public class CreateCommand extends Command { private StoragePoolTO pool; private DiskCharacteristicsTO diskCharacteristics; private String templateUrl; + private long size; protected CreateCommand() { super(); @@ -44,7 +45,7 @@ public class CreateCommand extends Command { * @param pool */ public CreateCommand(VolumeVO vol, VMInstanceVO vm, DiskCharacteristicsTO diskCharacteristics, String templateUrl, StoragePoolVO pool) { - this(vol, vm, diskCharacteristics, pool); + this(vol, vm, diskCharacteristics, pool, 0); this.templateUrl = templateUrl; } @@ -56,11 +57,12 @@ public class CreateCommand extends Command { * @param diskCharacteristics * @param pool */ - public CreateCommand(VolumeVO vol, VMInstanceVO vm, DiskCharacteristicsTO diskCharacteristics, StoragePoolVO pool) { + public CreateCommand(VolumeVO vol, VMInstanceVO vm, DiskCharacteristicsTO diskCharacteristics, StoragePoolVO pool, long size) { this.volId = vol.getId(); this.diskCharacteristics = diskCharacteristics; this.pool = new StoragePoolTO(pool); this.templateUrl = null; + this.size = size; } @Override @@ -83,4 +85,8 @@ public class CreateCommand extends Command { public long getVolumeId() { return volId; } + + public long getSize(){ + return this.size; + } } diff --git a/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java new file mode 100644 index 00000000000..0df9c7df36c --- /dev/null +++ b/core/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -0,0 +1,122 @@ +package com.cloud.hypervisor.vmware.resource; + +import java.util.Map; + +import javax.naming.ConfigurationException; + +import com.cloud.agent.IAgentControl; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.PingCommand; +import com.cloud.agent.api.StartupCommand; +import com.cloud.agent.api.storage.CopyVolumeAnswer; +import com.cloud.agent.api.storage.CopyVolumeCommand; +import com.cloud.agent.api.storage.CreateAnswer; +import com.cloud.agent.api.storage.CreateCommand; +import com.cloud.agent.api.storage.DestroyCommand; +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; +import com.cloud.agent.api.storage.ShareAnswer; +import com.cloud.agent.api.storage.ShareCommand; +import com.cloud.host.Host.Type; +import com.cloud.resource.ServerResource; +import com.cloud.storage.resource.StoragePoolResource; + +public class VmwareResource implements StoragePoolResource, ServerResource { + + @Override + public DownloadAnswer execute(PrimaryStorageDownloadCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public Answer execute(DestroyCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public ShareAnswer execute(ShareCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public CopyVolumeAnswer execute(CopyVolumeCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public CreateAnswer execute(CreateCommand cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public void disconnected() { + // TODO Auto-generated method stub + + } + + @Override + public Answer executeRequest(Command cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public IAgentControl getAgentControl() { + // TODO Auto-generated method stub + return null; + } + + @Override + public PingCommand getCurrentStatus(long id) { + // TODO Auto-generated method stub + return null; + } + + @Override + public Type getType() { + // TODO Auto-generated method stub + return null; + } + + @Override + public StartupCommand[] initialize() { + // TODO Auto-generated method stub + return null; + } + + @Override + public void setAgentControl(IAgentControl agentControl) { + // TODO Auto-generated method stub + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + + // TODO Auto-generated method stub + return true; + } + + @Override + public String getName() { + // TODO Auto-generated method stub + return null; + } + + @Override + public boolean start() { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean stop() { + // TODO Auto-generated method stub + return false; + } +} diff --git a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 06ad2291007..1f5ca0f6827 100644 --- a/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/core/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -148,13 +148,13 @@ import com.cloud.exception.InternalErrorException; import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor; import com.cloud.resource.ServerResource; -import com.cloud.storage.StorageLayer; -import com.cloud.storage.StoragePoolVO; -import com.cloud.storage.VolumeVO; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageLayer; +import com.cloud.storage.StoragePoolVO; import com.cloud.storage.Volume.StorageResourceType; import com.cloud.storage.Volume.VolumeType; +import com.cloud.storage.VolumeVO; import com.cloud.storage.resource.StoragePoolResource; import com.cloud.storage.template.TemplateInfo; import com.cloud.utils.NumbersUtil; @@ -171,6 +171,7 @@ import com.cloud.vm.State; import com.cloud.vm.VirtualMachineName; import com.trilead.ssh2.SCPClient; import com.xensource.xenapi.APIVersion; +import com.xensource.xenapi.Bond; import com.xensource.xenapi.Connection; import com.xensource.xenapi.Console; import com.xensource.xenapi.Host; @@ -183,6 +184,10 @@ import com.xensource.xenapi.Pool; import com.xensource.xenapi.SR; import com.xensource.xenapi.Session; import com.xensource.xenapi.Types; +import com.xensource.xenapi.Types.BadServerResponse; +import com.xensource.xenapi.Types.IpConfigurationMode; +import com.xensource.xenapi.Types.VmPowerState; +import com.xensource.xenapi.Types.XenAPIException; import com.xensource.xenapi.VBD; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VIF; @@ -190,9 +195,6 @@ import com.xensource.xenapi.VLAN; import com.xensource.xenapi.VM; import com.xensource.xenapi.VMGuestMetrics; import com.xensource.xenapi.XenAPIObject; -import com.xensource.xenapi.Types.BadServerResponse; -import com.xensource.xenapi.Types.VmPowerState; -import com.xensource.xenapi.Types.XenAPIException; /** * Encapsulates the interface to the XenServer API. @@ -2792,6 +2794,7 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR protected String startSystemVM(String vmName, String vlanId, Network nw0, List vols, String bootArgs, String guestMacAddr, String privateIp, String privateMacAddr, String publicMacAddr, int cmdPort, long ramSize) { + setupLinkLocalNetwork(); VM vm = null; List> mounts = null; Connection conn = getConnection(); @@ -3163,6 +3166,29 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR if (s_logger.isDebugEnabled()) { s_logger.debug("Found a network called " + name + " on host=" + _host.ip + "; Network=" + nr.uuid + "; pif=" + pr.uuid); } + if (pr.bondMasterOf != null && pr.bondMasterOf.size() > 0) { + if (pr.bondMasterOf.size() > 1) { + String msg = new StringBuilder("Unsupported configuration. Network " + name + " has more than one bond. Network=").append(nr.uuid) + .append("; pif=").append(pr.uuid).toString(); + s_logger.warn(msg); + return null; + } + Bond bond = pr.bondMasterOf.iterator().next(); + Set slaves = bond.getSlaves(conn); + for (PIF slave : slaves) { + PIF.Record spr = slave.getRecord(conn); + if (spr.management) { + Host host = Host.getByUuid(conn, _host.uuid); + if (!transferManagementNetwork(conn, host, slave, spr, pif)) { + String msg = new StringBuilder("Unable to transfer management network. slave=" + spr.uuid + "; master=" + pr.uuid + "; host=" + + _host.uuid).toString(); + s_logger.warn(msg); + return null; + } + break; + } + } + } return new Nic(network, nr, pif, pr); } @@ -3520,7 +3546,6 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } _host.privatePif = privateNic.pr.uuid; _host.privateNetwork = privateNic.nr.uuid; - _privateNetworkName = privateNic.nr.nameLabel; Nic guestNic = null; if (_guestNetworkName != null && !_guestNetworkName.equals(_privateNetworkName)) { @@ -3532,7 +3557,6 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } else { guestNic = privateNic; } - _guestNetworkName = guestNic.nr.nameLabel; _host.guestNetwork = guestNic.nr.uuid; _host.guestPif = guestNic.pr.uuid; @@ -3548,7 +3572,6 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } _host.publicPif = publicNic.pr.uuid; _host.publicNetwork = publicNic.nr.uuid; - _publicNetworkName = publicNic.nr.nameLabel; Nic storageNic1 = getLocalNetwork(conn, _storageNetworkName1); @@ -3656,6 +3679,35 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR } } + protected boolean transferManagementNetwork(Connection conn, Host host, PIF src, PIF.Record spr, PIF dest) throws XmlRpcException, XenAPIException { + dest.reconfigureIp(conn, spr.ipConfigurationMode, spr.IP, spr.netmask, spr.gateway, spr.DNS); + Host.managementReconfigure(conn, dest); + String hostUuid = null; + int count = 0; + while (count < 10) { + try { + Thread.sleep(10000); + hostUuid = host.getUuid(conn); + if (hostUuid != null) { + break; + } + } catch (XmlRpcException e) { + s_logger.debug("Waiting for host to come back: " + e.getMessage()); + } catch (XenAPIException e) { + s_logger.debug("Waiting for host to come back: " + e.getMessage()); + } catch (InterruptedException e) { + s_logger.debug("Gotta run"); + return false; + } + } + if (hostUuid == null) { + s_logger.warn("Unable to transfer the management network from " + spr.uuid); + return false; + } + + src.reconfigureIp(conn, IpConfigurationMode.NONE, null, null, null, null); + return true; + } @Override public StartupCommand[] initialize() throws IllegalArgumentException{ @@ -3668,8 +3720,6 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR return null; } - setupLinkLocalNetwork(); - destroyStoppedVm(); StartupRoutingCommand cmd = new StartupRoutingCommand(); fillHostInfo(cmd); @@ -4016,9 +4066,15 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR if (details == null) { details = new HashMap(); } + if (_privateNetworkName != null) { details.put("private.network.device", _privateNetworkName); + } + if (_publicNetworkName != null) { details.put("public.network.device", _publicNetworkName); + } + if (_guestNetworkName != null) { details.put("guest.network.device", _guestNetworkName); + } details.put("can_bridge_firewall", Boolean.toString(_canBridgeFirewall)); cmd.setHostDetails(details); cmd.setName(hr.nameLabel); @@ -4225,7 +4281,11 @@ public abstract class CitrixResourceBase implements StoragePoolResource, ServerR vdir.nameLabel = dskch.getName(); vdir.SR = poolSr; vdir.type = Types.VdiType.USER; - vdir.virtualSize = dskch.getSize(); + + if(cmd.getSize()!=0) + vdir.virtualSize = cmd.getSize(); + else + vdir.virtualSize = dskch.getSize(); vdi = VDI.create(conn, vdir); } diff --git a/core/src/com/cloud/network/ExteralIpAddressAllocator.java b/core/src/com/cloud/network/ExteralIpAddressAllocator.java index c4b040da4dc..b9506b34885 100644 --- a/core/src/com/cloud/network/ExteralIpAddressAllocator.java +++ b/core/src/com/cloud/network/ExteralIpAddressAllocator.java @@ -48,12 +48,12 @@ public class ExteralIpAddressAllocator implements IpAddrAllocator{ @Inject IPAddressDao _ipAddressDao = null; @Inject VlanDao _vlanDao; private boolean _isExternalIpAllocatorEnabled = false; - private String _externalIpAllocatorUrl; + private String _externalIpAllocatorUrl = null; @Override public IpAddr getPrivateIpAddress(String macAddr, long dcId, long podId) { - if (this._externalIpAllocatorUrl.equalsIgnoreCase("")) { + if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { return new IpAddr(); } String urlString = this._externalIpAllocatorUrl + "?command=getIpAddr&mac=" + macAddr + "&dc=" + dcId + "&pod=" + podId; @@ -102,10 +102,12 @@ public class ExteralIpAddressAllocator implements IpAddrAllocator{ @Override public boolean releasePrivateIpAddress(String ip, long dcId, long podId) { /*TODO: call API to release the ip address from external DHCP server*/ - String urlString = this._externalIpAllocatorUrl + "?command=releaseIpAddr&ip=" + ip + "&dc=" + dcId + "&pod=" + podId; - if (this._externalIpAllocatorUrl.equalsIgnoreCase("")) { + if (_externalIpAllocatorUrl == null || this._externalIpAllocatorUrl.equalsIgnoreCase("")) { return false; } + + String urlString = this._externalIpAllocatorUrl + "?command=releaseIpAddr&ip=" + ip + "&dc=" + dcId + "&pod=" + podId; + s_logger.debug("releaseIP:" + urlString); BufferedReader in = null; try { diff --git a/core/src/com/cloud/server/ManagementServer.java b/core/src/com/cloud/server/ManagementServer.java index f6cc1abe1e0..6de98c7b83d 100644 --- a/core/src/com/cloud/server/ManagementServer.java +++ b/core/src/com/cloud/server/ManagementServer.java @@ -481,10 +481,11 @@ public interface ManagementServer { * @param name - name for the volume * @param zoneId - id of the zone to create this volume on * @param diskOfferingId - id of the disk offering to create this volume with + * @param size - size of the volume * @return true if success, false if not */ - VolumeVO createVolume(long accountId, long userId, String name, long zoneId, long diskOfferingId, long startEventId) throws InternalErrorException; - long createVolumeAsync(long accountId, long userId, String name, long zoneId, long diskOfferingId) throws InvalidParameterValueException, InternalErrorException, ResourceAllocationException; + VolumeVO createVolume(long accountId, long userId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException; + long createVolumeAsync(long accountId, long userId, String name, long zoneId, long diskOfferingId, long size) throws InvalidParameterValueException, InternalErrorException, ResourceAllocationException; /** * Finds the root volume of the VM @@ -643,14 +644,15 @@ public interface ManagementServer { * @param displayName user-supplied name to be shown in the UI or returned in the API * @param groupName user-supplied groupname to be shown in the UI or returned in the API * @param userData user-supplied base64-encoded data that can be retrieved by the instance from the virtual router + * @param size -- size to be used for volume creation in case the disk offering is private (i.e. size=0) * @return VirtualMachine if successfully deployed, null otherwise * @throws InvalidParameterValueException if the parameter values are incorrect. * @throws ExecutionException * @throws StorageUnavailableException * @throws ConcurrentOperationException */ - UserVm deployVirtualMachine(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] groups, long startEventId) throws ResourceAllocationException, InvalidParameterValueException, InternalErrorException, InsufficientStorageCapacityException, PermissionDeniedException, ExecutionException, StorageUnavailableException, ConcurrentOperationException; - long deployVirtualMachineAsync(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] groups) throws InvalidParameterValueException, PermissionDeniedException; + UserVm deployVirtualMachine(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] groups, long startEventId, long size) throws ResourceAllocationException, InvalidParameterValueException, InternalErrorException, InsufficientStorageCapacityException, PermissionDeniedException, ExecutionException, StorageUnavailableException, ConcurrentOperationException; + long deployVirtualMachineAsync(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] groups, long size) throws InvalidParameterValueException, PermissionDeniedException; /** * Starts a Virtual Machine @@ -712,8 +714,9 @@ public interface ManagementServer { * Recovers a destroyed virtual machine. * @param vmId * @return true if recovered, false otherwise + * @throws InternalErrorException */ - boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException; + boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException, InternalErrorException; /** * Upgrade the virtual machine to a new service offering @@ -1766,6 +1769,12 @@ public interface ManagementServer { */ DiskOfferingVO findDiskOfferingById(long diskOffering); + /** + * Finds the obj associated with the private disk offering + * @return -- vo obj for private disk offering + */ + List findPrivateDiskOffering(); + /** * Update the permissions on a template. A private template can be made public, or individual accounts can be granted permission to launch instances from the template. * @param templateId @@ -2173,4 +2182,6 @@ public interface ManagementServer { boolean checkLocalStorageConfigVal(); boolean addConfig(String instance, String component, String category, String name, String value, String description); + + boolean validateCustomVolumeSizeRange(long size) throws InvalidParameterValueException; } diff --git a/core/src/com/cloud/storage/DiskOfferingVO.java b/core/src/com/cloud/storage/DiskOfferingVO.java index e6f7d408072..5b972e817a0 100644 --- a/core/src/com/cloud/storage/DiskOfferingVO.java +++ b/core/src/com/cloud/storage/DiskOfferingVO.java @@ -155,7 +155,11 @@ public class DiskOfferingVO implements DiskOffering { public void setDisplayText(String displayText) { this.displayText = displayText; } - + + public long getDiskSize(){ + return diskSize; + } + public long getDiskSizeInBytes() { return diskSize * 1024 * 1024; } diff --git a/core/src/com/cloud/storage/StorageManager.java b/core/src/com/cloud/storage/StorageManager.java index 0cb04e20eaa..7b7ed9c1aef 100644 --- a/core/src/com/cloud/storage/StorageManager.java +++ b/core/src/com/cloud/storage/StorageManager.java @@ -34,6 +34,7 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.service.ServiceOfferingVO; import com.cloud.user.Account; +import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; import com.cloud.utils.exception.ExecutionException; @@ -92,9 +93,10 @@ public interface StorageManager extends Manager { * @param offering service offering of the vm. * @param diskOffering disk offering of the vm. * @param avoids storage pools to avoid. + * @param size : size of the volume if defined * @return List of VolumeVO */ - List create(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering) throws StorageUnavailableException, ExecutionException; + List create(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering, long size) throws StorageUnavailableException, ExecutionException; /** * Create StoragePool based on uri @@ -156,7 +158,7 @@ public interface StorageManager extends Manager { public long createUserVM(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids); + List avoids, long size); /** * This method sends the given command on all the hosts in the primary storage pool given until is succeeds on any one. @@ -168,7 +170,7 @@ public interface StorageManager extends Manager { * @return The answer for that command, could be success or failure. */ Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg); - Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg, int retriesPerHost, int pauseBeforeRetry, boolean shouldBeSnapshotCapable); + Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg, int retriesPerHost, int pauseBeforeRetry, boolean shouldBeSnapshotCapable, Long vmId ); /** @@ -195,9 +197,10 @@ public interface StorageManager extends Manager { * @param name * @param dc * @param diskOffering + * @param size * @return VolumeVO */ - VolumeVO createVolume(long accountId, long userId, String name, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId); + VolumeVO createVolume(long accountId, long userId, String name, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId, long size); /** * Marks the specified volume as destroyed in the management server database. The expunge thread will delete the volume from its storage pool. @@ -230,6 +233,8 @@ public interface StorageManager extends Manager { */ boolean volumeInactive(VolumeVO volume); + String getVmNameOnVolume(VolumeVO volume); + List> isStoredOn(VMInstanceVO vm); /** diff --git a/core/src/com/cloud/storage/dao/DiskOfferingDao.java b/core/src/com/cloud/storage/dao/DiskOfferingDao.java index cbb7e8f21b0..0cfe20e3aea 100644 --- a/core/src/com/cloud/storage/dao/DiskOfferingDao.java +++ b/core/src/com/cloud/storage/dao/DiskOfferingDao.java @@ -24,5 +24,7 @@ import com.cloud.storage.DiskOfferingVO; import com.cloud.utils.db.GenericDao; public interface DiskOfferingDao extends GenericDao { - List listByDomainId(long domainId); + List listByDomainId(long domainId); + List findPrivateDiskOffering(); + } diff --git a/core/src/com/cloud/storage/dao/DiskOfferingDaoImpl.java b/core/src/com/cloud/storage/dao/DiskOfferingDaoImpl.java index edc797e1541..76811aa0eec 100644 --- a/core/src/com/cloud/storage/dao/DiskOfferingDaoImpl.java +++ b/core/src/com/cloud/storage/dao/DiskOfferingDaoImpl.java @@ -38,6 +38,7 @@ public class DiskOfferingDaoImpl extends GenericDaoBase im private static final Logger s_logger = Logger.getLogger(DiskOfferingDaoImpl.class); private final SearchBuilder DomainIdSearch; + private final SearchBuilder PrivateDiskOfferingSearch; private final Attribute _typeAttr; protected DiskOfferingDaoImpl() { @@ -45,6 +46,10 @@ public class DiskOfferingDaoImpl extends GenericDaoBase im DomainIdSearch.and("domainId", DomainIdSearch.entity().getDomainId(), SearchCriteria.Op.EQ); DomainIdSearch.done(); + PrivateDiskOfferingSearch = createSearchBuilder(); + PrivateDiskOfferingSearch.and("diskSize", PrivateDiskOfferingSearch.entity().getDiskSize(), SearchCriteria.Op.EQ); + PrivateDiskOfferingSearch.done(); + _typeAttr = _allAttributes.get("type"); } @@ -56,6 +61,13 @@ public class DiskOfferingDaoImpl extends GenericDaoBase im return listActiveBy(sc); } + @Override + public List findPrivateDiskOffering() { + SearchCriteria sc = PrivateDiskOfferingSearch.create(); + sc.setParameters("diskSize", 0); + return listActiveBy(sc); + } + @Override public List searchAll(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache) { sc.addAnd(_typeAttr, Op.EQ, Type.Disk); diff --git a/core/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/core/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index 7e8c7a8c966..a02d33bdb53 100644 --- a/core/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/core/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -130,7 +130,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem public List listByAccountId(long accountId) { SearchCriteria sc = AccountIdSearch.create(); sc.setParameters("accountId", accountId); - sc.setParameters("publicTemplate", false); return listActiveBy(sc); } diff --git a/core/src/com/cloud/storage/dao/VMTemplateHostDao.java b/core/src/com/cloud/storage/dao/VMTemplateHostDao.java index 4fd5b58053a..5a2b26cd5c2 100644 --- a/core/src/com/cloud/storage/dao/VMTemplateHostDao.java +++ b/core/src/com/cloud/storage/dao/VMTemplateHostDao.java @@ -27,6 +27,8 @@ public interface VMTemplateHostDao extends GenericDao { List listByHostId(long id); List listByTemplateId(long templateId); + + List listByOnlyTemplateId(long templateId); VMTemplateHostVO findByHostTemplate(long hostId, long templateId); diff --git a/core/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java b/core/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java index b2becd0e141..48f3d939624 100644 --- a/core/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java +++ b/core/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java @@ -144,6 +144,13 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase listByOnlyTemplateId(long templateId) { + SearchCriteria sc = TemplateSearch.create(); + sc.setParameters("template_id", templateId); + return listBy(sc); } @Override diff --git a/core/src/com/cloud/vm/UserVmVO.java b/core/src/com/cloud/vm/UserVmVO.java index fd4b9a69dd7..0943ff72f2b 100755 --- a/core/src/com/cloud/vm/UserVmVO.java +++ b/core/src/com/cloud/vm/UserVmVO.java @@ -155,6 +155,24 @@ public class UserVmVO extends VMInstanceVO implements UserVm { this.group = group; } + public UserVmVO(long id, + String instanceName, + String displayName, + long templateId, + long guestOsId, + boolean haEnabled, + long domainId, + long accountId, + long serviceOfferingId, + String group, + String userData) { + super(id, displayName, instanceName, Type.User, templateId, guestOsId, haEnabled); + this.group = group; + this.userData = userData; + this.displayName = displayName; + + } + public UserVmVO(long id, String name, long templateId, diff --git a/core/src/com/cloud/vm/VMInstanceVO.java b/core/src/com/cloud/vm/VMInstanceVO.java index 2e9296b136c..84c91116737 100644 --- a/core/src/com/cloud/vm/VMInstanceVO.java +++ b/core/src/com/cloud/vm/VMInstanceVO.java @@ -120,7 +120,26 @@ public class VMInstanceVO implements VirtualMachine { @Column(name="update_time", updatable=true) @Temporal(value=TemporalType.TIMESTAMP) - Date updateTime; + Date updateTime; + + public VMInstanceVO(long id, + String name, + String instanceName, + Type type, + Long vmTemplateId, + long guestOSId, + boolean haEnabled) { + this.id = id; + this.name = name; + if (vmTemplateId != null) { + this.templateId = vmTemplateId; + } + this.instanceName = instanceName; + this.type = type; + this.guestOSId = guestOSId; + this.haEnabled = haEnabled; + } + public VMInstanceVO(long id, String name, diff --git a/core/test/com/cloud/vmware/TestVMWare.java b/core/test/com/cloud/vmware/TestVMWare.java new file mode 100644 index 00000000000..1c7f4aa633b --- /dev/null +++ b/core/test/com/cloud/vmware/TestVMWare.java @@ -0,0 +1,651 @@ +package com.cloud.vmware; + +import java.io.File; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.log4j.xml.DOMConfigurator; + +import com.cloud.utils.PropertiesUtil; +import com.vmware.apputils.AppUtil; +import com.vmware.vim.ArrayOfManagedObjectReference; +import com.vmware.vim.DatastoreInfo; +import com.vmware.vim.DynamicProperty; +import com.vmware.vim.InvalidProperty; +import com.vmware.vim.ManagedObjectReference; +import com.vmware.vim.ObjectContent; +import com.vmware.vim.ObjectSpec; +import com.vmware.vim.PropertyFilterSpec; +import com.vmware.vim.PropertySpec; +import com.vmware.vim.RuntimeFault; +import com.vmware.vim.SelectionSpec; +import com.vmware.vim.TraversalSpec; + +public class TestVMWare { + private static AppUtil cb; + + private static void setupLog4j() { + File file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); + + if(file != null) { + System.out.println("Log4j configuration from : " + file.getAbsolutePath()); + DOMConfigurator.configureAndWatch(file.getAbsolutePath(), 10000); + } else { + System.out.println("Configure log4j with default properties"); + } + } + + private void getAndPrintInventoryContents() throws Exception { + TraversalSpec resourcePoolTraversalSpec = new TraversalSpec(); + resourcePoolTraversalSpec.setName("resourcePoolTraversalSpec"); + resourcePoolTraversalSpec.setType("ResourcePool"); + resourcePoolTraversalSpec.setPath("resourcePool"); + resourcePoolTraversalSpec.setSkip(new Boolean(false)); + resourcePoolTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null,null,"resourcePoolTraversalSpec") }); + + TraversalSpec computeResourceRpTraversalSpec = new TraversalSpec(); + computeResourceRpTraversalSpec.setName("computeResourceRpTraversalSpec"); + computeResourceRpTraversalSpec.setType("ComputeResource"); + computeResourceRpTraversalSpec.setPath("resourcePool"); + computeResourceRpTraversalSpec.setSkip(new Boolean(false)); + computeResourceRpTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null,null,"resourcePoolTraversalSpec") }); + + TraversalSpec computeResourceHostTraversalSpec = new TraversalSpec(); + computeResourceHostTraversalSpec.setName("computeResourceHostTraversalSpec"); + computeResourceHostTraversalSpec.setType("ComputeResource"); + computeResourceHostTraversalSpec.setPath("host"); + computeResourceHostTraversalSpec.setSkip(new Boolean(false)); + + TraversalSpec datacenterHostTraversalSpec = new TraversalSpec(); + datacenterHostTraversalSpec.setName("datacenterHostTraversalSpec"); + datacenterHostTraversalSpec.setType("Datacenter"); + datacenterHostTraversalSpec.setPath("hostFolder"); + datacenterHostTraversalSpec.setSkip(new Boolean(false)); + datacenterHostTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null,null,"folderTraversalSpec") }); + + TraversalSpec datacenterVmTraversalSpec = new TraversalSpec(); + datacenterVmTraversalSpec.setName("datacenterVmTraversalSpec"); + datacenterVmTraversalSpec.setType("Datacenter"); + datacenterVmTraversalSpec.setPath("vmFolder"); + datacenterVmTraversalSpec.setSkip(new Boolean(false)); + datacenterVmTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null,null,"folderTraversalSpec") }); + + TraversalSpec folderTraversalSpec = new TraversalSpec(); + folderTraversalSpec.setName("folderTraversalSpec"); + folderTraversalSpec.setType("Folder"); + folderTraversalSpec.setPath("childEntity"); + folderTraversalSpec.setSkip(new Boolean(false)); + folderTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null,null,"folderTraversalSpec"), + datacenterHostTraversalSpec, + datacenterVmTraversalSpec, + computeResourceRpTraversalSpec, + computeResourceHostTraversalSpec, + resourcePoolTraversalSpec }); + + PropertySpec[] propspecary = new PropertySpec[] { new PropertySpec() }; + propspecary[0].setAll(new Boolean(false)); + propspecary[0].setPathSet(new String[] { "name" }); + propspecary[0].setType("ManagedEntity"); + + PropertyFilterSpec spec = new PropertyFilterSpec(); + spec.setPropSet(propspecary); + spec.setObjectSet(new ObjectSpec[] { new ObjectSpec() }); + spec.getObjectSet(0).setObj(cb.getConnection().getRootFolder()); + spec.getObjectSet(0).setSkip(new Boolean(false)); + spec.getObjectSet(0).setSelectSet( + new SelectionSpec[] { folderTraversalSpec }); + + // Recursively get all ManagedEntity ManagedObjectReferences + // and the "name" property for all ManagedEntities retrieved + ObjectContent[] ocary = + cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { spec } + ); + + // If we get contents back. print them out. + if (ocary != null) { + ObjectContent oc = null; + ManagedObjectReference mor = null; + DynamicProperty[] pcary = null; + DynamicProperty pc = null; + for (int oci = 0; oci < ocary.length; oci++) { + oc = ocary[oci]; + mor = oc.getObj(); + pcary = oc.getPropSet(); + + System.out.println("Object Type : " + mor.getType()); + System.out.println("Reference Value : " + mor.get_value()); + + if (pcary != null) { + for (int pci = 0; pci < pcary.length; pci++) { + pc = pcary[pci]; + System.out.println(" Property Name : " + pc.getName()); + if (pc != null) { + if (!pc.getVal().getClass().isArray()) { + System.out.println(" Property Value : " + pc.getVal()); + } + else { + Object[] ipcary = (Object[])pc.getVal(); + System.out.println("Val : " + pc.getVal()); + for (int ii = 0; ii < ipcary.length; ii++) { + Object oval = ipcary[ii]; + if (oval.getClass().getName().indexOf("ManagedObjectReference") >= 0) { + ManagedObjectReference imor = (ManagedObjectReference)oval; + + System.out.println("Inner Object Type : " + imor.getType()); + System.out.println("Inner Reference Value : " + imor.get_value()); + } + else { + System.out.println("Inner Property Value : " + oval); + } + } + } + } + } + } + } + } else { + System.out.println("No Managed Entities retrieved!"); + } + } + + private void listDataCenters() { + try { + ManagedObjectReference[] morDatacenters = getDataCenterMors(); + if(morDatacenters != null) { + for(ManagedObjectReference mor : morDatacenters) { + System.out.println("Datacenter : " + mor.get_value()); + + Map properites = new HashMap(); + properites.put("name", null); + properites.put("vmFolder", null); + properites.put("hostFolder", null); + + getProperites(mor, properites); + for(Map.Entry entry : properites.entrySet()) { + if(entry.getValue() instanceof ManagedObjectReference) { + ManagedObjectReference morProp = (ManagedObjectReference)entry.getValue(); + System.out.println("\t" + entry.getKey() + ":(" + morProp.getType() + ", " + morProp.get_value() + ")"); + } else { + System.out.println("\t" + entry.getKey() + ":" + entry.getValue()); + } + } + + System.out.println("Datacenter clusters"); + ManagedObjectReference[] clusters = getDataCenterClusterMors(mor); + if(clusters != null) { + for(ManagedObjectReference morCluster : clusters) { + Object[] props = this.getProperties(morCluster, new String[] {"name"}); + System.out.println("cluster : " + props[0]); + + System.out.println("cluster hosts"); + ManagedObjectReference[] hosts = getClusterHostMors(morCluster); + if(hosts != null) { + for(ManagedObjectReference morHost : hosts) { + Object[] props2 = this.getProperties(morHost, new String[] {"name"}); + System.out.println("host : " + props2[0]); + } + } + } + } + + System.out.println("Datacenter standalone hosts"); + ManagedObjectReference[] hosts = getDataCenterStandaloneHostMors(mor); + if(hosts != null) { + for(ManagedObjectReference morHost : hosts) { + Object[] props = this.getProperties(morHost, new String[] {"name"}); + System.out.println("host : " + props[0]); + } + } + + System.out.println("Datacenter datastores"); + ManagedObjectReference[] stores = getDataCenterDatastoreMors(mor); + if(stores != null) { + for(ManagedObjectReference morStore : stores) { + // data store name property does not work for some reason + Object[] props = getProperties(morStore, new String[] {"info" }); + + System.out.println(morStore.getType() + ": " + ((DatastoreInfo)props[0]).getName()); + } + } + + System.out.println("Datacenter VMs"); + ManagedObjectReference[] vms = getDataCenterVMMors(mor); + if(stores != null) { + for(ManagedObjectReference morVm : vms) { + Object[] props = this.getProperties(morVm, new String[] {"name"}); + System.out.println("VM name: " + props[0] + ", ref val: " + morVm.get_value()); + } + } + } + } + } catch(RuntimeFault e) { + e.printStackTrace(); + } catch(RemoteException e) { + e.printStackTrace(); + } + } + + private void listInventoryFolders() { + TraversalSpec folderTraversalSpec = new TraversalSpec(); + folderTraversalSpec.setName("folderTraversalSpec"); + folderTraversalSpec.setType("Folder"); + folderTraversalSpec.setPath("childEntity"); + folderTraversalSpec.setSkip(new Boolean(false)); + folderTraversalSpec.setSelectSet( + new SelectionSpec [] { new SelectionSpec(null, null, "folderTraversalSpec")} + ); + + PropertySpec[] propSpecs = new PropertySpec[] { new PropertySpec() }; + propSpecs[0].setAll(new Boolean(false)); + propSpecs[0].setPathSet(new String[] { "name" }); + propSpecs[0].setType("ManagedEntity"); + + PropertyFilterSpec filterSpec = new PropertyFilterSpec(); + filterSpec.setPropSet(propSpecs); + filterSpec.setObjectSet(new ObjectSpec[] { new ObjectSpec() }); + filterSpec.getObjectSet(0).setObj(cb.getConnection().getRootFolder()); + filterSpec.getObjectSet(0).setSkip(new Boolean(false)); + filterSpec.getObjectSet(0).setSelectSet( + new SelectionSpec[] { folderTraversalSpec } + ); + + try { + ObjectContent[] objContent = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { filterSpec } + ); + printContent(objContent); + } catch (InvalidProperty e) { + e.printStackTrace(); + } catch (RuntimeFault e) { + e.printStackTrace(); + } catch (RemoteException e) { + e.printStackTrace(); + } + } + + private TraversalSpec getFolderRecursiveTraversalSpec() { + SelectionSpec recurseFolders = new SelectionSpec(); + recurseFolders.setName("folder2childEntity"); + + TraversalSpec folder2childEntity = new TraversalSpec(); + folder2childEntity.setType("Folder"); + folder2childEntity.setPath("childEntity"); + folder2childEntity.setName(recurseFolders.getName()); + folder2childEntity.setSelectSet(new SelectionSpec[] { recurseFolders }); + + return folder2childEntity; + } + + private ManagedObjectReference[] getDataCenterMors() throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("Datacenter"); + pSpec.setPathSet(new String[] { "name"} ); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(cb.getConnection().getRootFolder()); + oSpec.setSkip(Boolean.TRUE); + oSpec.setSelectSet(new SelectionSpec[] { getFolderRecursiveTraversalSpec() }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + + if(ocs != null) { + ManagedObjectReference[] morDatacenters = new ManagedObjectReference[ocs.length]; + for(int i = 0; i < ocs.length; i++) + morDatacenters[i] = ocs[i].getObj(); + + return morDatacenters; + } + return null; + } + + private ManagedObjectReference[] getDataCenterVMMors(ManagedObjectReference morDatacenter) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("VirtualMachine"); + pSpec.setPathSet(new String[] { "name"} ); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(morDatacenter); + oSpec.setSkip(Boolean.TRUE); + + TraversalSpec tSpec = new TraversalSpec(); + tSpec.setName("dc2VMFolder"); + tSpec.setType("Datacenter"); + tSpec.setPath("vmFolder"); + tSpec.setSelectSet(new SelectionSpec[] { getFolderRecursiveTraversalSpec() } ); + + oSpec.setSelectSet(new SelectionSpec[] { tSpec }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + + if(ocs != null) { + ManagedObjectReference[] morVMs = new ManagedObjectReference[ocs.length]; + for(int i = 0; i < ocs.length; i++) + morVMs[i] = ocs[i].getObj(); + + return morVMs; + } + return null; + } + + private ManagedObjectReference[] getDataCenterDatastoreMors(ManagedObjectReference morDatacenter) throws RuntimeFault, RemoteException { + Object[] stores = getProperties(morDatacenter, new String[] { "datastore" }); + if(stores != null && stores.length == 1) { + return ((ArrayOfManagedObjectReference)stores[0]).getManagedObjectReference(); + } + return null; + } + + private ManagedObjectReference[] getDataCenterClusterMors(ManagedObjectReference morDatacenter) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("ClusterComputeResource"); + pSpec.setPathSet(new String[] { "name"} ); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(morDatacenter); + oSpec.setSkip(Boolean.TRUE); + + TraversalSpec tSpec = new TraversalSpec(); + tSpec.setName("traversalHostFolder"); + tSpec.setType("Datacenter"); + tSpec.setPath("hostFolder"); + tSpec.setSkip(false); + tSpec.setSelectSet(new SelectionSpec[] { getFolderRecursiveTraversalSpec() }); + + oSpec.setSelectSet(new TraversalSpec[] { tSpec }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + + if(ocs != null) { + ManagedObjectReference[] morDatacenters = new ManagedObjectReference[ocs.length]; + for(int i = 0; i < ocs.length; i++) + morDatacenters[i] = ocs[i].getObj(); + + return morDatacenters; + } + return null; + } + + private ManagedObjectReference[] getDataCenterStandaloneHostMors(ManagedObjectReference morDatacenter) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("ComputeResource"); + pSpec.setPathSet(new String[] { "name"} ); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(morDatacenter); + oSpec.setSkip(Boolean.TRUE); + + TraversalSpec tSpec = new TraversalSpec(); + tSpec.setName("traversalHostFolder"); + tSpec.setType("Datacenter"); + tSpec.setPath("hostFolder"); + tSpec.setSkip(false); + tSpec.setSelectSet(new SelectionSpec[] { getFolderRecursiveTraversalSpec() }); + + oSpec.setSelectSet(new TraversalSpec[] { tSpec }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + + if(ocs != null) { + List listComputeResources = new ArrayList(); + for(ObjectContent oc : ocs) { + if(oc.getObj().getType().equalsIgnoreCase("ComputeResource")) + listComputeResources.add(oc.getObj()); + } + + List listHosts = new ArrayList(); + for(ManagedObjectReference morComputeResource : listComputeResources) { + ManagedObjectReference[] hosts = getComputeResourceHostMors(morComputeResource); + if(hosts != null) { + for(ManagedObjectReference host: hosts) + listHosts.add(host); + } + } + + return listHosts.toArray(new ManagedObjectReference[0]); + } + return null; + } + + private ManagedObjectReference[] getComputeResourceHostMors(ManagedObjectReference morCompute) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("HostSystem"); + pSpec.setPathSet(new String[] { "name"} ); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(morCompute); + oSpec.setSkip(true); + + TraversalSpec tSpec = new TraversalSpec(); + tSpec.setName("computeResource2Host"); + tSpec.setType("ComputeResource"); + tSpec.setPath("host"); + tSpec.setSkip(false); + oSpec.setSelectSet(new TraversalSpec[] { tSpec }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + + if(ocs != null) { + ManagedObjectReference[] morDatacenters = new ManagedObjectReference[ocs.length]; + for(int i = 0; i < ocs.length; i++) + morDatacenters[i] = ocs[i].getObj(); + + return morDatacenters; + } + return null; + } + + private ManagedObjectReference[] getClusterHostMors(ManagedObjectReference morCluster) throws RuntimeFault, RemoteException { + // ClusterComputeResource inherits from ComputeResource + return getComputeResourceHostMors(morCluster); + } + + private ObjectContent[] getDataCenterProperites(String[] properites) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType("Datacenter"); + pSpec.setPathSet(properites ); + + SelectionSpec recurseFolders = new SelectionSpec(); + recurseFolders.setName("folder2childEntity"); + + TraversalSpec folder2childEntity = new TraversalSpec(); + folder2childEntity.setType("Folder"); + folder2childEntity.setPath("childEntity"); + folder2childEntity.setName(recurseFolders.getName()); + folder2childEntity.setSelectSet(new SelectionSpec[] { recurseFolders }); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(cb.getConnection().getRootFolder()); + oSpec.setSkip(Boolean.TRUE); + oSpec.setSelectSet(new SelectionSpec[] { folder2childEntity }); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] { pSpec }); + pfSpec.setObjectSet(new ObjectSpec[] { oSpec }); + + return cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] { pfSpec }); + } + + private void printContent(ObjectContent[] objContent) { + if(objContent != null) { + for(ObjectContent oc : objContent) { + ManagedObjectReference mor = oc.getObj(); + DynamicProperty[] objProps = oc.getPropSet(); + + System.out.println("Object type: " + mor.getType()); + if(objProps != null) { + for(DynamicProperty objProp : objProps) { + if(!objProp.getClass().isArray()) { + System.out.println("\t" + objProp.getName() + "=" + objProp.getVal()); + } else { + Object[] ipcary = (Object[])objProp.getVal(); + System.out.print("\t" + objProp.getName() + "=["); + int i = 0; + for(Object item : ipcary) { + if (item.getClass().getName().indexOf("ManagedObjectReference") >= 0) { + ManagedObjectReference imor = (ManagedObjectReference)item; + System.out.print("(" + imor.getType() + "," + imor.get_value() + ")"); + } else { + System.out.print(item); + } + + if(i < ipcary.length - 1) + System.out.print(", "); + i++; + } + + System.out.println("]"); + } + } + } + } + } + } + + private void getProperites(ManagedObjectReference mor, Map properties) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType(mor.getType()); + pSpec.setPathSet(properties.keySet().toArray(new String[0])); + + ObjectSpec oSpec = new ObjectSpec(); + oSpec.setObj(mor); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] {pSpec} ); + pfSpec.setObjectSet(new ObjectSpec[] {oSpec} ); + + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] {pfSpec} ); + + if(ocs != null) { + for(ObjectContent oc : ocs) { + DynamicProperty[] propSet = oc.getPropSet(); + if(propSet != null) { + for(DynamicProperty prop : propSet) { + properties.put(prop.getName(), prop.getVal()); + } + } + } + } + } + + private Object[] getProperties(ManagedObjectReference moRef, String[] properties) throws RuntimeFault, RemoteException { + PropertySpec pSpec = new PropertySpec(); + pSpec.setType(moRef.getType()); + pSpec.setPathSet(properties); + + ObjectSpec oSpec = new ObjectSpec(); + // Set the starting object + oSpec.setObj(moRef); + + PropertyFilterSpec pfSpec = new PropertyFilterSpec(); + pfSpec.setPropSet(new PropertySpec[] {pSpec} ); + pfSpec.setObjectSet(new ObjectSpec[] {oSpec} ); + ObjectContent[] ocs = cb.getConnection().getService().retrieveProperties( + cb.getConnection().getServiceContent().getPropertyCollector(), + new PropertyFilterSpec[] {pfSpec} ); + + Object[] ret = new Object[properties.length]; + if(ocs != null) { + for(int i = 0; i< ocs.length; ++i) { + ObjectContent oc = ocs[i]; + DynamicProperty[] dps = oc.getPropSet(); + if(dps != null) { + for(int j = 0; j < dps.length; ++j) { + DynamicProperty dp = dps[j]; + for(int p = 0; p < ret.length; ++p) { + if(properties[p].equals(dp.getName())) { + ret[p] = dp.getVal(); + } + } + } + } + } + } + return ret; + } + + private void powerOnVm() throws Exception { + ManagedObjectReference morVm = new ManagedObjectReference(); + morVm.setType("VirtualMachine"); + morVm.set_value("vm-66"); + + cb.getConnection().getService().powerOnVM_Task(morVm, null); + } + + private void powerOffVm() throws Exception { + ManagedObjectReference morVm = new ManagedObjectReference(); + morVm.setType("VirtualMachine"); + morVm.set_value("vm-66"); + + cb.getConnection().getService().powerOffVM_Task(morVm); + } + + public static void main(String[] args) throws Exception { + setupLog4j(); + TestVMWare client = new TestVMWare(); + + // skip certificate check + System.setProperty("axis.socketSecureFactory", "org.apache.axis.components.net.SunFakeTrustSocketFactory"); + + String serviceUrl = "https://vsphere-1.lab.vmops.com/sdk/vimService"; + try { + String[] params = new String[] {"--url", serviceUrl, "--username", "Administrator", "--password", "Suite219" }; + + cb = AppUtil.initialize("Connect", params); + cb.connect(); + System.out.println("Connection Succesful."); + + // client.listInventoryFolders(); + // client.listDataCenters(); + client.powerOnVm(); + + cb.disConnect(); + } catch (Exception e) { + System.out.println("Failed to connect to " + serviceUrl); + } + } +} diff --git a/debian/rules b/debian/rules index 7ab70329fd8..c99b62b85a7 100755 --- a/debian/rules +++ b/debian/rules @@ -91,7 +91,7 @@ binary-common: dh_testdir dh_testroot dh_installchangelogs - dh_installdocs -A README HACKING + dh_installdocs -A README INSTALL HACKING README.html # dh_installexamples # dh_installmenu # dh_installdebconf diff --git a/python/lib/cloud_utils.py b/python/lib/cloud_utils.py index d6ee34d9bfc..a8d6cfbd72f 100644 --- a/python/lib/cloud_utils.py +++ b/python/lib/cloud_utils.py @@ -790,13 +790,14 @@ class SetupFirewall2(ConfigTask): def execute(self): - yield "Permitting traffic in the bridge interface and for VNC ports" + yield "Permitting traffic in the bridge interface, migration port and for VNC ports" if distro in (Fedora , CentOS): for rule in ( "-I FORWARD -i %s -o %s -j ACCEPT"%(self.brname,self.brname), "-I INPUT 1 -p tcp --dport 5900:6100 -j ACCEPT", + "-I INPUT 1 -p tcp --dport 49152:49216 -j ACCEPT", ): args = rule.split() o = iptables(*args) @@ -813,6 +814,7 @@ class SetupFirewall2(ConfigTask): newtext.append(line) file("/etc/ufw/before.rules","w").writelines(newtext) ufw.allow.proto.tcp("from","any","to","any","port","5900:6100") + ufw.allow.proto.tcp("from","any","to","any","port","49152:49216") stop_service("ufw") start_service("ufw") @@ -924,7 +926,7 @@ def setup_agent_config(configfile): zoneandpod = prompt_for_hostpods(x) if zoneandpod: confopts["zone"],confopts["pod"] = zoneandpod - stderr("You selected zone %s pod %s",e,confopts["zone"],confopts["pod"]) + stderr("You selected zone %s pod %s",confopts["zone"],confopts["pod"]) else: stderr("Skipped -- using the previous zone %s pod %s",confopts["zone"],confopts["pod"]) except (urllib2.URLError,urllib2.HTTPError),e: diff --git a/scripts/.project b/scripts/.project index 6f05a336478..8d1ff9adbeb 100644 --- a/scripts/.project +++ b/scripts/.project @@ -6,12 +6,12 @@ - org.eclipse.jdt.core.javabuilder + org.python.pydev.PyDevBuilder - org.eclipse.jdt.core.javanature + org.python.pydev.pythonNature diff --git a/scripts/.pydevproject b/scripts/.pydevproject new file mode 100644 index 00000000000..f8c0075c765 --- /dev/null +++ b/scripts/.pydevproject @@ -0,0 +1,7 @@ + + + + +Default +python 2.6 + diff --git a/scripts/storage/qcow2/createtmplt.sh b/scripts/storage/qcow2/createtmplt.sh index cc235e0d7ac..550901882b1 100755 --- a/scripts/storage/qcow2/createtmplt.sh +++ b/scripts/storage/qcow2/createtmplt.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# $Id: createtmplt.sh 11474 2010-08-06 05:53:02Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/qcow2/createtmplt.sh $ +# $Id: createtmplt.sh 11601 2010-08-11 17:26:15Z kris $ $HeadURL: svn://svn.lab.vmops.com/repos/branches/2.1.refactor/java/scripts/storage/qcow2/createtmplt.sh $ # createtmplt.sh -- install a template usage() { @@ -142,6 +142,11 @@ then exit 3 fi +tmpltimg=$(uncompress $tmpltimg) +if [ $? -ne 0 ] +then + printf "failed to uncompress $tmpltimg\n" +fi create_from_file $tmpltfs $tmpltimg $tmpltname diff --git a/scripts/storage/qcow2/managesnapshot.sh b/scripts/storage/qcow2/managesnapshot.sh index 5890a360f4f..eb689a20ebe 100755 --- a/scripts/storage/qcow2/managesnapshot.sh +++ b/scripts/storage/qcow2/managesnapshot.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# $Id: managesnapshot.sh 11474 2010-08-06 05:53:02Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/storage/qcow2/managesnapshot.sh $ +# $Id: managesnapshot.sh 11601 2010-08-11 17:26:15Z kris $ $HeadURL: svn://svn.lab.vmops.com/repos/branches/2.1.refactor/java/scripts/storage/qcow2/managesnapshot.sh $ # managesnapshot.sh -- manage snapshots for a single disk (create, destroy, rollback) # @@ -34,16 +34,19 @@ create_snapshot() { } destroy_snapshot() { - local disk=$1 + local backupSnapDir=$1 local snapshotname=$2 local failed=0 - qemu-img snapshot -d $snapshotname $disk - - if [ $? -gt 0 ] + if [ -f $backupSnapDir/$snapshotname ] then - printf "***Failed to delete snapshot $snapshotname for path $disk\n" >&2 - failed=1 + rm -f $backupSnapDir/$snapshotname + + if [ $? -gt 0 ] + then + printf "***Failed to delete snapshot $snapshotname for path $backupSnapDir\n" >&2 + failed=1 + fi fi return $failed diff --git a/scripts/vm/hypervisor/xenserver/launch_hb.sh b/scripts/vm/hypervisor/xenserver/launch_hb.sh index a16d3f4bcbc..849565e1e71 100755 --- a/scripts/vm/hypervisor/xenserver/launch_hb.sh +++ b/scripts/vm/hypervisor/xenserver/launch_hb.sh @@ -26,5 +26,5 @@ for psid in `ps -ef | grep xenheartbeat | grep -v grep | awk '{print $2}'`; do kill $psid done -nohup /opt/xensource/bin/heartbeat.sh $1 $2 >/var/log/heartbeat.log 2>&1 & +nohup /opt/xensource/bin/xenheartbeat.sh $1 $2 >/var/log/heartbeat.log 2>&1 & echo "======> DONE <======" diff --git a/scripts/vm/hypervisor/xenserver/setupxenserver.sh b/scripts/vm/hypervisor/xenserver/setupxenserver.sh index e8831142415..0a4b45f6173 100755 --- a/scripts/vm/hypervisor/xenserver/setupxenserver.sh +++ b/scripts/vm/hypervisor/xenserver/setupxenserver.sh @@ -23,5 +23,10 @@ iptables-save > /etc/sysconfig/iptables sed -i 's/127\.0\.0\.1/0\.0\.0\.0/' /opt/xensource/libexec/vncterm-wrapper 2>&1 sed -i 's/127\.0\.0\.1/0\.0\.0\.0/' /opt/xensource/libexec/qemu-dm-wrapper 2>&1 +# disable the default link local on xenserver +sed -i /NOZEROCONF/d /etc/sysconfig/network +echo "NOZEROCONF=yes" >> /etc/sysconfig/network + + echo "success" diff --git a/scripts/vm/network/vnet/modifyvlan.sh b/scripts/vm/network/vnet/modifyvlan.sh index 6806f244b95..930d5218d28 100644 --- a/scripts/vm/network/vnet/modifyvlan.sh +++ b/scripts/vm/network/vnet/modifyvlan.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# $Id: modifyvlan.sh 11388 2010-08-02 17:04:13Z edison $ $HeadURL: svn://svn.lab.vmops.com/repos/vmdev/java/scripts/vm/network/vnet/modifyvlan.sh $ +# $Id: modifyvlan.sh 11601 2010-08-11 17:26:15Z kris $ $HeadURL: svn://svn.lab.vmops.com/repos/branches/2.1.refactor/java/scripts/vm/network/vnet/modifyvlan.sh $ # modifyvlan.sh -- adds and deletes VLANs from a Routing Server # # @@ -128,6 +128,13 @@ then exit 2 fi +# Vlan module is loaded? +lsmod|grep ^8021q >& /dev/null +if [ $? -gt 0 ] +then + modprobe 8021q >& /dev/null +fi + if [ "$op" == "add" ] then # Add the vlan diff --git a/server/src/com/cloud/api/commands/CreateTemplateCmd.java b/server/src/com/cloud/api/commands/CreateTemplateCmd.java index 9b567b4f46d..ec09f0c8f09 100644 --- a/server/src/com/cloud/api/commands/CreateTemplateCmd.java +++ b/server/src/com/cloud/api/commands/CreateTemplateCmd.java @@ -219,14 +219,6 @@ public class CreateTemplateCmd extends BaseCmd { c.addCriteria(Criteria.NAME, name); c.addCriteria(Criteria.CREATED_BY, Long.valueOf(volume.getAccountId())); List templates = getManagementServer().searchForTemplates(c); - if ((templates != null) && !templates.isEmpty()) { - for (VMTemplateVO template : templates) { - if (template.getName().equalsIgnoreCase(name)) { - throw new ServerApiException(BaseCmd.PARAM_ERROR, "a private template with name " + name + " already exists for account " + - volume.getAccountId() + ", please try again with a different name"); - } - } - } // If command is executed via 8096 port, set userId to the id of System account (1) if (userId == null) { diff --git a/server/src/com/cloud/api/commands/CreateVolumeCmd.java b/server/src/com/cloud/api/commands/CreateVolumeCmd.java index 25bec40d0df..d16f41c5072 100644 --- a/server/src/com/cloud/api/commands/CreateVolumeCmd.java +++ b/server/src/com/cloud/api/commands/CreateVolumeCmd.java @@ -28,7 +28,9 @@ import com.cloud.api.BaseCmd; import com.cloud.api.Parameter; import com.cloud.api.ServerApiException; import com.cloud.async.executor.VolumeOperationResultObject; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.serializer.SerializerHelper; +import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.Snapshot; import com.cloud.user.Account; import com.cloud.utils.Pair; @@ -48,6 +50,7 @@ public class CreateVolumeCmd extends BaseCmd { s_properties.add(new Pair(BaseCmd.Properties.NAME, Boolean.TRUE)); s_properties.add(new Pair(BaseCmd.Properties.SNAPSHOT_ID, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.ZONE_ID, Boolean.FALSE)); + s_properties.add(new Pair(BaseCmd.Properties.SIZE, Boolean.FALSE)); } ///////////////////////////////////////////////////// @@ -128,6 +131,7 @@ public class CreateVolumeCmd extends BaseCmd { Long zoneId = (Long) params.get(BaseCmd.Properties.ZONE_ID.getName()); Long diskOfferingId = (Long) params.get(BaseCmd.Properties.DISK_OFFERING_ID.getName()); Long snapshotId = (Long)params.get(BaseCmd.Properties.SNAPSHOT_ID.getName()); + Long size = (Long)params.get(BaseCmd.Properties.SIZE.getName()); if (account == null) { // Admin API call @@ -167,12 +171,45 @@ public class CreateVolumeCmd extends BaseCmd { userId = Long.valueOf(Account.ACCOUNT_ID_SYSTEM); } + if(size==null){ + size = Long.valueOf(0); + } + boolean useSnapshot = false; - if (snapshotId == null) { - if ((zoneId == null) || (diskOfferingId == null)) { - throw new ServerApiException(BaseCmd.PARAM_ERROR, "Missing parameter(s), both zoneid and diskofferingid must be specified."); + if (snapshotId == null) + { + if ((zoneId == null)) + { + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Missing parameter,zoneid must be specified."); } - } else { + + if(diskOfferingId == null && size == 0) + { + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Missing parameter(s),either a positive volume size or a valid disk offering id must be specified."); + } + else if(diskOfferingId == null && size != 0) + { + //validate the size to ensure between min and max size range + try + { + boolean ok = getManagementServer().validateCustomVolumeSizeRange(size); + + if(!ok) + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Invalid size for custom volume creation:"); + + } catch (InvalidParameterValueException e) + { + s_logger.warn("Invalid size for custom volume creation"); + throw new ServerApiException(BaseCmd.PARAM_ERROR, "Invalid size for custom volume creation:"+e.getMessage()); + } + + //this is the case of creating var size vol with private disk offering + List privateTemplateList = getManagementServer().findPrivateDiskOffering(); + diskOfferingId = privateTemplateList.get(0).getId(); //we use this id for creating volume + } + } + else + { useSnapshot = true; //Verify parameters Snapshot snapshotCheck = getManagementServer().findSnapshotById(snapshotId); @@ -197,7 +234,7 @@ public class CreateVolumeCmd extends BaseCmd { if (useSnapshot) { jobId = getManagementServer().createVolumeFromSnapshotAsync(userId, account.getId(), snapshotId, name); } else { - jobId = getManagementServer().createVolumeAsync(userId, account.getId(), name, zoneId, diskOfferingId); + jobId = getManagementServer().createVolumeAsync(userId, account.getId(), name, zoneId, diskOfferingId, size); } if (jobId == 0) { diff --git a/server/src/com/cloud/api/commands/DeployVMCmd.java b/server/src/com/cloud/api/commands/DeployVMCmd.java index 195aaf15229..2f3ee1272d7 100644 --- a/server/src/com/cloud/api/commands/DeployVMCmd.java +++ b/server/src/com/cloud/api/commands/DeployVMCmd.java @@ -52,10 +52,10 @@ public class DeployVMCmd extends BaseCmd { s_properties.add(new Pair(BaseCmd.Properties.GROUP, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.NETWORK_GROUP_LIST, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.SERVICE_OFFERING_ID, Boolean.TRUE)); + s_properties.add(new Pair(BaseCmd.Properties.SIZE, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.TEMPLATE_ID, Boolean.TRUE)); s_properties.add(new Pair(BaseCmd.Properties.USER_DATA, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.ZONE_ID, Boolean.TRUE)); - s_properties.add(new Pair(BaseCmd.Properties.ACCOUNT_OBJ, Boolean.FALSE)); s_properties.add(new Pair(BaseCmd.Properties.USER_ID, Boolean.FALSE)); } @@ -85,6 +85,9 @@ public class DeployVMCmd extends BaseCmd { @Parameter(name="serviceofferingid", type=CommandType.LONG, required=true) private Long serviceOfferingId; + @Parameter(name="size", type=CommandType.LONG) + private Long size; + @Parameter(name="templateid", type=CommandType.LONG, required=true) private Long templateId; @@ -127,6 +130,10 @@ public class DeployVMCmd extends BaseCmd { return serviceOfferingId; } + public Long getSize() { + return size; + } + public Long getTemplateId() { return templateId; } @@ -172,10 +179,13 @@ public class DeployVMCmd extends BaseCmd { String group = (String)params.get(BaseCmd.Properties.GROUP.getName()); String userData = (String) params.get(BaseCmd.Properties.USER_DATA.getName()); String networkGroupList = (String)params.get(BaseCmd.Properties.NETWORK_GROUP_LIST.getName()); - + Long size = (Long)params.get(BaseCmd.Properties.SIZE.getName()); String password = null; Long accountId = null; - + + if(size == null) + size = Long.valueOf(0); + VMTemplateVO template = getManagementServer().findTemplateById(templateId); if (template == null) { throw new ServerApiException(BaseCmd.VM_INVALID_PARAM_ERROR, "Unable to find template with id " + templateId); @@ -253,7 +263,7 @@ public class DeployVMCmd extends BaseCmd { long jobId = mgr.deployVirtualMachineAsync(userId.longValue(), accountId.longValue(), zoneId.longValue(), serviceOfferingId.longValue(), templateId.longValue(), diskOfferingId, - null, password, displayName, group, userData, groups); + null, password, displayName, group, userData, groups,size); long vmId = 0; if (jobId == 0) { diff --git a/server/src/com/cloud/api/commands/ListTemplatesCmd.java b/server/src/com/cloud/api/commands/ListTemplatesCmd.java index be511abea1c..9faec83e3dc 100644 --- a/server/src/com/cloud/api/commands/ListTemplatesCmd.java +++ b/server/src/com/cloud/api/commands/ListTemplatesCmd.java @@ -214,10 +214,14 @@ public class ListTemplatesCmd extends BaseCmd { templateData.add(new Pair(BaseCmd.Properties.DISPLAY_TEXT.getName(), template.getDisplayText())); templateData.add(new Pair(BaseCmd.Properties.IS_PUBLIC.getName(), Boolean.valueOf(template.isPublicTemplate()).toString())); templateData.add(new Pair(BaseCmd.Properties.CREATED.getName(), getDateString(templateHostRef.getCreated()))); + if(template.getRemoved() != null){ + templateData.add(new Pair(BaseCmd.Properties.REMOVED.getName(), getDateString(template.getRemoved()))); + } templateData.add(new Pair(BaseCmd.Properties.IS_READY.getName(), Boolean.valueOf(templateHostRef.getDownloadState()==Status.DOWNLOADED).toString())); templateData.add(new Pair(BaseCmd.Properties.IS_FEATURED.getName(), Boolean.valueOf(template.isFeatured()).toString())); templateData.add(new Pair(BaseCmd.Properties.PASSWORD_ENABLED.getName(), Boolean.valueOf(template.getEnablePassword()).toString())); templateData.add(new Pair(BaseCmd.Properties.CROSS_ZONES.getName(), Boolean.valueOf(template.isCrossZones()).toString())); + templateData.add(new Pair(BaseCmd.Properties.FORMAT.getName(), template.getFormat())); GuestOS os = getManagementServer().findGuestOSById(template.getGuestOSId()); if (os != null) { diff --git a/server/src/com/cloud/api/commands/RecoverVMCmd.java b/server/src/com/cloud/api/commands/RecoverVMCmd.java index 3a6d8d4d419..0a65792f4d5 100644 --- a/server/src/com/cloud/api/commands/RecoverVMCmd.java +++ b/server/src/com/cloud/api/commands/RecoverVMCmd.java @@ -27,6 +27,7 @@ import org.apache.log4j.Logger; import com.cloud.api.BaseCmd; import com.cloud.api.Parameter; import com.cloud.api.ServerApiException; +import com.cloud.exception.InternalErrorException; import com.cloud.exception.ResourceAllocationException; import com.cloud.user.Account; import com.cloud.utils.Pair; @@ -100,6 +101,8 @@ public class RecoverVMCmd extends BaseCmd { return returnValues; } catch (ResourceAllocationException ex) { throw new ServerApiException(BaseCmd.VM_RECOVER_ERROR, "Failed to recover virtual machine with id " + vmId + "; " + ex.getMessage()); - } + } catch (InternalErrorException e) { + throw new ServerApiException(BaseCmd.VM_RECOVER_ERROR, "Failed to recover virtual machine with id " + vmId + "; " + e.getMessage()); + } } } diff --git a/server/src/com/cloud/api/commands/RegisterTemplateCmd.java b/server/src/com/cloud/api/commands/RegisterTemplateCmd.java index 887f7b92927..adba348abe5 100644 --- a/server/src/com/cloud/api/commands/RegisterTemplateCmd.java +++ b/server/src/com/cloud/api/commands/RegisterTemplateCmd.java @@ -269,7 +269,8 @@ public class RegisterTemplateCmd extends BaseCmd { listForEmbeddedObject.add(new Pair(BaseCmd.Properties.ID.getName(), template.getId().toString())); listForEmbeddedObject.add(new Pair(BaseCmd.Properties.NAME.getName(), template.getName())); listForEmbeddedObject.add(new Pair(BaseCmd.Properties.DISPLAY_TEXT.getName(), template.getDisplayText())); - listForEmbeddedObject.add(new Pair(BaseCmd.Properties.IS_PUBLIC.getName(), Boolean.valueOf(template.isPublicTemplate()).toString())); + listForEmbeddedObject.add(new Pair(BaseCmd.Properties.IS_PUBLIC.getName(), Boolean.valueOf(template.isPublicTemplate()).toString())); + listForEmbeddedObject.add(new Pair(BaseCmd.Properties.CROSS_ZONES.getName(), Boolean.valueOf(template.isCrossZones()).toString())); if (templateHostRef != null) { listForEmbeddedObject.add(new Pair(BaseCmd.Properties.CREATED.getName(), getDateString(templateHostRef.getCreated()))); diff --git a/server/src/com/cloud/api/commands/UpdateTemplateCmd.java b/server/src/com/cloud/api/commands/UpdateTemplateCmd.java index b8fc35dbf5f..46d5ee9b8f5 100644 --- a/server/src/com/cloud/api/commands/UpdateTemplateCmd.java +++ b/server/src/com/cloud/api/commands/UpdateTemplateCmd.java @@ -162,6 +162,7 @@ public class UpdateTemplateCmd extends BaseCmd { templateData.add(new Pair(BaseCmd.Properties.FORMAT.getName(), updatedTemplate.getFormat())); templateData.add(new Pair(BaseCmd.Properties.OS_TYPE_ID.getName(), updatedTemplate.getGuestOSId())); templateData.add(new Pair(BaseCmd.Properties.PASSWORD_ENABLED.getName(), updatedTemplate.getEnablePassword())); + templateData.add(new Pair(BaseCmd.Properties.CROSS_ZONES.getName(), Boolean.valueOf(updatedTemplate.isCrossZones()).toString())); return templateData; } else { throw new ServerApiException(BaseCmd.INTERNAL_ERROR, "internal error updating template"); diff --git a/server/src/com/cloud/api/commands/UpdateTemplateOrIsoPermissionsCmd.java b/server/src/com/cloud/api/commands/UpdateTemplateOrIsoPermissionsCmd.java index 4e8d5b3d13e..c33f639e794 100644 --- a/server/src/com/cloud/api/commands/UpdateTemplateOrIsoPermissionsCmd.java +++ b/server/src/com/cloud/api/commands/UpdateTemplateOrIsoPermissionsCmd.java @@ -131,6 +131,12 @@ public abstract class UpdateTemplateOrIsoPermissionsCmd extends BaseCmd { } } + // If the template is removed throw an error. + if (template.getRemoved() != null){ + s_logger.error("unable to update permissions for " + getMediaType() + " with id " + id + " as it is removed "); + throw new ServerApiException(BaseCmd.ACCOUNT_ERROR, "unable to update permissions for " + getMediaType() + " with id " + id + " as it is removed "); + } + if (id == Long.valueOf(1)) { throw new ServerApiException(BaseCmd.PARAM_ERROR, "unable to update permissions for " + getMediaType() + " with id " + id); } diff --git a/server/src/com/cloud/async/executor/CreatePrivateTemplateExecutor.java b/server/src/com/cloud/async/executor/CreatePrivateTemplateExecutor.java index 2a44c5e6650..32fda8b7ac9 100644 --- a/server/src/com/cloud/async/executor/CreatePrivateTemplateExecutor.java +++ b/server/src/com/cloud/async/executor/CreatePrivateTemplateExecutor.java @@ -188,6 +188,7 @@ public class CreatePrivateTemplateExecutor extends VolumeOperationExecutor { resultObject.setCreated(templateHostRef.getCreated()); resultObject.setReady(templateHostRef != null && templateHostRef.getDownloadState() == Status.DOWNLOADED); resultObject.setPasswordEnabled(template.getEnablePassword()); + resultObject.setCrossZones(template.isCrossZones()); ManagementServer managerServer = getAsyncJobMgr().getExecutorContext().getManagementServer(); GuestOS os = managerServer.findGuestOSById(template.getGuestOSId()); if (os != null) { diff --git a/server/src/com/cloud/async/executor/CreatePrivateTemplateResultObject.java b/server/src/com/cloud/async/executor/CreatePrivateTemplateResultObject.java index c18ef4d3180..beee9b6c247 100644 --- a/server/src/com/cloud/async/executor/CreatePrivateTemplateResultObject.java +++ b/server/src/com/cloud/async/executor/CreatePrivateTemplateResultObject.java @@ -52,6 +52,9 @@ public class CreatePrivateTemplateResultObject { @Param(name="passwordenabled") private boolean passwordEnabled; + @Param(name="crossZones") + private boolean crossZones; + @Param(name="ostypeid") private Long osTypeId; @@ -197,6 +200,14 @@ public class CreatePrivateTemplateResultObject { this.passwordEnabled = passwordEnabled; } + public boolean isCrossZones() { + return crossZones; + } + + public void setCrossZones(boolean crossZones) { + this.crossZones = crossZones; + } + public long getDomainId() { return domainId; } diff --git a/server/src/com/cloud/async/executor/DeployVMExecutor.java b/server/src/com/cloud/async/executor/DeployVMExecutor.java index b0da90c7b32..23fcc8c7493 100644 --- a/server/src/com/cloud/async/executor/DeployVMExecutor.java +++ b/server/src/com/cloud/async/executor/DeployVMExecutor.java @@ -60,7 +60,7 @@ public class DeployVMExecutor extends VMOperationExecutor { param.getUserId(), param.getAccountId(), param.getDataCenterId(), param.getServiceOfferingId(), param.getTemplateId(), param.getDiskOfferingId(), param.getDomain(), - param.getPassword(), param.getDisplayName(), param.getGroup(), param.getUserData(), param.getNetworkGroup(), param.getEventId()); + param.getPassword(), param.getDisplayName(), param.getGroup(), param.getUserData(), param.getNetworkGroup(), param.getEventId(), param.getSize()); asyncMgr.completeAsyncJob(getJob().getId(), AsyncJobResult.STATUS_SUCCEEDED, 0, composeResultObject(param.getUserId(), vm, param)); diff --git a/server/src/com/cloud/async/executor/DeployVMParam.java b/server/src/com/cloud/async/executor/DeployVMParam.java index d857b5eaab3..4727b51142c 100644 --- a/server/src/com/cloud/async/executor/DeployVMParam.java +++ b/server/src/com/cloud/async/executor/DeployVMParam.java @@ -31,6 +31,7 @@ public class DeployVMParam extends VMOperationParam { private String userData; private long domainId; private String [] networkGroups; + private long size; public DeployVMParam() { } @@ -58,7 +59,7 @@ public class DeployVMParam extends VMOperationParam { long serviceOfferingId, long templateId, Long diskOfferingId, String domain, String password, String displayName, String group, String userData, - String [] networkGroups, long eventId) { + String [] networkGroups, long eventId, long size) { setUserId(userId); setAccountId(accountId); @@ -73,7 +74,12 @@ public class DeployVMParam extends VMOperationParam { this.userData = userData; this.setNetworkGroups(networkGroups); this.eventId = eventId; - } + this.size = size; + } + + public long getSize(){ + return size; + } public long getDataCenterId() { return dataCenterId; diff --git a/server/src/com/cloud/async/executor/VolumeOperationExecutor.java b/server/src/com/cloud/async/executor/VolumeOperationExecutor.java index 10f6b1c0dbc..9f32126c568 100644 --- a/server/src/com/cloud/async/executor/VolumeOperationExecutor.java +++ b/server/src/com/cloud/async/executor/VolumeOperationExecutor.java @@ -62,7 +62,7 @@ public class VolumeOperationExecutor extends BaseAsyncJobExecutor { if (op == VolumeOp.Create) { eventType = EventTypes.EVENT_VOLUME_CREATE; failureDescription = "Failed to create volume"; - volume = asyncMgr.getExecutorContext().getManagementServer().createVolume(param.getUserId(), param.getAccountId(), param.getName(), param.getZoneId(), param.getDiskOfferingId(), param.getEventId()); + volume = asyncMgr.getExecutorContext().getManagementServer().createVolume(param.getUserId(), param.getAccountId(), param.getName(), param.getZoneId(), param.getDiskOfferingId(), param.getEventId(), param.getSize()); if (volume.getStatus() == AsyncInstanceCreateStatus.Corrupted) { asyncMgr.completeAsyncJob(getJob().getId(), AsyncJobResult.STATUS_FAILED, BaseCmd.INTERNAL_ERROR, "Failed to create volume."); } else { diff --git a/server/src/com/cloud/async/executor/VolumeOperationParam.java b/server/src/com/cloud/async/executor/VolumeOperationParam.java index 86c10aa53ed..4ee39e087a0 100644 --- a/server/src/com/cloud/async/executor/VolumeOperationParam.java +++ b/server/src/com/cloud/async/executor/VolumeOperationParam.java @@ -31,7 +31,8 @@ public class VolumeOperationParam { private long zoneId; private String name; private long diskOfferingId; - + private long size; + // Used for Attach and Detach private long vmId; @@ -124,4 +125,11 @@ public class VolumeOperationParam { return deviceId; } + public long getSize(){ + return size; + } + + public void setSize(long size){ + this.size = size; + } } diff --git a/server/src/com/cloud/configuration/ConfigurationManager.java b/server/src/com/cloud/configuration/ConfigurationManager.java index b9fc95ba032..2acf9ee9e27 100644 --- a/server/src/com/cloud/configuration/ConfigurationManager.java +++ b/server/src/com/cloud/configuration/ConfigurationManager.java @@ -96,6 +96,7 @@ public interface ConfigurationManager extends Manager { * @param description * @param numGibibytes * @param mirrored + * @param size * @return ID */ DiskOfferingVO createDiskOffering(long domainId, String name, String description, int numGibibytes, String tags); diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index f5efe1c0438..52a7b58dfc3 100644 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -142,7 +142,7 @@ public class ConfigurationManagerImpl implements ConfigurationManager { saveConfigurationEvent(userId, null, EventTypes.EVENT_CONFIGURATION_VALUE_EDIT, "Successfully edited configuration value.", "name=" + name, "value=" + value); } - private String validateConfigurationValue(String name, String value) { + private String validateConfigurationValue(String name, String value) throws InvalidParameterValueException { if (value == null) { return null; } @@ -170,11 +170,18 @@ public class ConfigurationManagerImpl implements ConfigurationManager { } if(type.equals(String.class)) { - if (range.equals("privateip")) { - if (!NetUtils.isSiteLocalAddress(value)) { - s_logger.error("privateip range " + value - + " is not a site local address for configuration variable " + name); - return "Please enter a site local IP address."; + if (range.equals("privateip")) + { + try { + if (!NetUtils.isSiteLocalAddress(value)) { + s_logger.error("privateip range " + value + + " is not a site local address for configuration variable " + name); + return "Please enter a site local IP address."; + } + } catch (NullPointerException e) + { + s_logger.error("Error parsing ip address for " + name); + throw new InvalidParameterValueException("Error parsing ip address"); } } else if (range.equals("netmask")) { if (!NetUtils.isValidNetmask(value)) { diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index c0bb446a6e3..3cfbc1dacaf 100644 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -985,7 +985,7 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, final AccountVO account = _accountDao.findById(Account.ACCOUNT_ID_SYSTEM); try { - List vols = _storageMgr.create(account, proxy, _template, dc, pod, _serviceOffering, null); + List vols = _storageMgr.create(account, proxy, _template, dc, pod, _serviceOffering, null,0); if (vols == null) { s_logger.error("Unable to alloc storage for console proxy"); return null; @@ -1383,10 +1383,9 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, if (s_logger.isTraceEnabled()) s_logger.trace("Begin console proxy capacity scan"); - //config var for consoleproxy.restart check + // config var for consoleproxy.restart check String restart = _configDao.getValue("consoleproxy.restart"); - - if(restart.equalsIgnoreCase("false")) + if(restart != null && restart.equalsIgnoreCase("false")) { s_logger.debug("Capacity scan disabled purposefully, consoleproxy.restart = false. This happens when the primarystorage is in maintenance mode"); return; diff --git a/server/src/com/cloud/hypervisor/vmware/discoverer/VmwareServerDiscoverer.java b/server/src/com/cloud/hypervisor/vmware/discoverer/VmwareServerDiscoverer.java new file mode 100644 index 00000000000..a7f186a19e0 --- /dev/null +++ b/server/src/com/cloud/hypervisor/vmware/discoverer/VmwareServerDiscoverer.java @@ -0,0 +1,40 @@ +package com.cloud.hypervisor.vmware.discoverer; + +import java.net.URI; +import java.util.List; +import java.util.Map; + +import javax.ejb.Local; +import javax.naming.ConfigurationException; + +import com.cloud.exception.DiscoveryException; +import com.cloud.host.HostVO; +import com.cloud.resource.Discoverer; +import com.cloud.resource.DiscovererBase; +import com.cloud.resource.ServerResource; + +@Local(value=Discoverer.class) +public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer { + + @Override + public Map> find(long dcId, Long podId, Long clusterId, URI url, + String username, String password) throws DiscoveryException { + + // ??? + return null; + } + + @Override + public void postDiscovery(List hosts, long msId) { + //do nothing + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + + // TODO + return true; + } +} + diff --git a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index d5ae622d890..ecdf216f5df 100644 --- a/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/server/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -214,17 +214,17 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L details.put(HostInfo.HOST_OS_KERNEL_VERSION, hostKernelVer); details.put(HostInfo.HYPERVISOR_VERSION, xenVersion); - if (!params.containsKey("public.network.device")) { + if (!params.containsKey("public.network.device") && _publicNic != null) { params.put("public.network.device", _publicNic); details.put("public.network.device", _publicNic); } - if (!params.containsKey("guest.network.device")) { + if (!params.containsKey("guest.network.device") && _guestNic != null) { params.put("guest.network.device", _guestNic); details.put("guest.network.device", _guestNic); } - if (!params.containsKey("private.network.device")) { + if (!params.containsKey("private.network.device") && _privateNic != null) { params.put("private.network.device", _privateNic); details.put("private.network.device", _privateNic); } diff --git a/server/src/com/cloud/network/NetworkManagerImpl.java b/server/src/com/cloud/network/NetworkManagerImpl.java index 9381bfb5c87..d8c4980bf71 100755 --- a/server/src/com/cloud/network/NetworkManagerImpl.java +++ b/server/src/com/cloud/network/NetworkManagerImpl.java @@ -94,6 +94,7 @@ import com.cloud.ha.HighAvailabilityManager; import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.LoadBalancerDao; @@ -430,7 +431,7 @@ public class NetworkManagerImpl implements NetworkManager, VirtualMachineManager txn.commit(); - List vols = _storageMgr.create(account, router, rtrTemplate, dc, pod, _offering, null); + List vols = _storageMgr.create(account, router, rtrTemplate, dc, pod, _offering, null,0); if (vols == null){ _ipAddressDao.unassignIpAddress(guestIp); _routerDao.delete(router.getId()); @@ -603,7 +604,7 @@ public class NetworkManagerImpl implements NetworkManager, VirtualMachineManager router.setLastHostId(pod.second()); router = _routerDao.persist(router); - List vols = _storageMgr.create(account, router, template, dc, pod.first(), _offering, null); + List vols = _storageMgr.create(account, router, template, dc, pod.first(), _offering, null,0); if(vols != null) { found = true; break; @@ -2231,7 +2232,7 @@ public class NetworkManagerImpl implements NetworkManager, VirtualMachineManager final HashSet avoid = new HashSet(); final HostVO fromHost = _hostDao.findById(router.getHostId()); - if (fromHost.getClusterId() == null) { + if (fromHost.getHypervisorType() != Hypervisor.Type.KVM && fromHost.getClusterId() == null) { s_logger.debug("The host is not in a cluster"); return null; } diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index ba45f291447..b07169a1a62 100644 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -145,7 +145,8 @@ public class ConfigurationServerImpl implements ConfigurationServer { _configMgr.createDiskOffering(DomainVO.ROOT_DOMAIN, "Small", "Small Disk, 5 GB", 5, null); _configMgr.createDiskOffering(DomainVO.ROOT_DOMAIN, "Medium", "Medium Disk, 20 GB", 20, null); _configMgr.createDiskOffering(DomainVO.ROOT_DOMAIN, "Large", "Large Disk, 100 GB", 100, null); - + _configMgr.createDiskOffering(DomainVO.ROOT_DOMAIN, "Private", "Private Disk", 0, null); + //Add default manual snapshot policy SnapshotPolicyVO snapPolicy = new SnapshotPolicyVO(0L, "00", "GMT", (short)4, 0); _snapPolicyDao.persist(snapPolicy); diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index cbcfad832b8..7fbac046b9a 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -922,9 +922,7 @@ public class ManagementServerImpl implements ManagementServer { if (!_vmMgr.destroyVirtualMachine(userId, vm.getId())) { s_logger.error("Unable to destroy vm: " + vm.getId()); accountCleanupNeeded = true; - } else { - //_vmMgr.releaseGuestIpAddress(vm); FIXME FIXME bug 5561 - } + } } // Mark the account's volumes as destroyed @@ -1656,7 +1654,12 @@ public class ManagementServerImpl implements ManagementServer { if (!vlan.getVlanType().equals(VlanType.VirtualNetwork)) { throw new IllegalArgumentException("only ip addresses that belong to a virtual network may be disassociated."); } - + + //Check for account wide pool. It will have an entry for account_vlan_map. + if (_accountVlanMapDao.findAccountVlanMap(accountId,ipVO.getVlanDbId()) != null){ + throw new PermissionDeniedException(publicIPAddress + " belongs to Account wide IP pool and cannot be disassociated"); + } + txn.start(); boolean success = _networkMgr.releasePublicIpAddress(userId, publicIPAddress); if (success) @@ -1727,11 +1730,11 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public VolumeVO createVolume(long userId, long accountId, String name, long zoneId, long diskOfferingId, long startEventId) throws InternalErrorException { + public VolumeVO createVolume(long userId, long accountId, String name, long zoneId, long diskOfferingId, long startEventId, long size) throws InternalErrorException { saveStartedEvent(userId, accountId, EventTypes.EVENT_VOLUME_CREATE, "Creating volume", startEventId); DataCenterVO zone = _dcDao.findById(zoneId); DiskOfferingVO diskOffering = _diskOfferingDao.findById(diskOfferingId); - VolumeVO createdVolume = _storageMgr.createVolume(accountId, userId, name, zone, diskOffering, startEventId); + VolumeVO createdVolume = _storageMgr.createVolume(accountId, userId, name, zone, diskOffering, startEventId,size); if (createdVolume != null) return createdVolume; @@ -1740,7 +1743,7 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public long createVolumeAsync(long userId, long accountId, String name, long zoneId, long diskOfferingId) throws InvalidParameterValueException, InternalErrorException, ResourceAllocationException { + public long createVolumeAsync(long userId, long accountId, String name, long zoneId, long diskOfferingId, long size) throws InvalidParameterValueException, InternalErrorException, ResourceAllocationException { // Check that the account is valid AccountVO account = _accountDao.findById(accountId); if (account == null) { @@ -1795,7 +1798,8 @@ public class ManagementServerImpl implements ManagementServer { param.setZoneId(zoneId); param.setDiskOfferingId(diskOfferingId); param.setEventId(eventId); - + param.setSize(size); + Gson gson = GsonHelper.getBuilder().create(); AsyncJobVO job = new AsyncJobVO(); @@ -2188,7 +2192,7 @@ public class ManagementServerImpl implements ManagementServer { @Override public UserVm deployVirtualMachine(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, Long diskOfferingId, - String domain, String password, String displayName, String group, String userData, String [] networkGroups, long startEventId) throws ResourceAllocationException, InvalidParameterValueException, InternalErrorException, + String domain, String password, String displayName, String group, String userData, String [] networkGroups, long startEventId, long size) throws ResourceAllocationException, InvalidParameterValueException, InternalErrorException, InsufficientStorageCapacityException, PermissionDeniedException, ExecutionException, StorageUnavailableException, ConcurrentOperationException { saveStartedEvent(userId, accountId, EventTypes.EVENT_VM_CREATE, "Deploying Vm", startEventId); @@ -2272,7 +2276,7 @@ public class ManagementServerImpl implements ManagementServer { ArrayList a = new ArrayList(avoids.values()); if (_directAttachNetworkExternalIpAllocator) { try { - created = _vmMgr.createDirectlyAttachedVMExternal(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, networkGroupVOs, startEventId); + created = _vmMgr.createDirectlyAttachedVMExternal(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, networkGroupVOs, startEventId, size); } catch (ResourceAllocationException rae) { throw rae; } @@ -2293,13 +2297,13 @@ public class ManagementServerImpl implements ManagementServer { } try { - created = _vmMgr.createVirtualMachine(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, startEventId); + created = _vmMgr.createVirtualMachine(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, startEventId, size); } catch (ResourceAllocationException rae) { throw rae; } } else { try { - created = _vmMgr.createDirectlyAttachedVM(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, networkGroupVOs, startEventId); + created = _vmMgr.createDirectlyAttachedVM(vmId, userId, account, dc, offering, template, diskOffering, displayName, group, userData, a, networkGroupVOs, startEventId, size); } catch (ResourceAllocationException rae) { throw rae; } @@ -2418,7 +2422,7 @@ public class ManagementServerImpl implements ManagementServer { @Override public long deployVirtualMachineAsync(long userId, long accountId, long dataCenterId, long serviceOfferingId, long templateId, - Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] networkGroups) throws InvalidParameterValueException, PermissionDeniedException { + Long diskOfferingId, String domain, String password, String displayName, String group, String userData, String [] networkGroups, long size) throws InvalidParameterValueException, PermissionDeniedException { AccountVO account = _accountDao.findById(accountId); if (account == null) { @@ -2512,7 +2516,7 @@ public class ManagementServerImpl implements ManagementServer { long eventId = saveScheduledEvent(userId, accountId, EventTypes.EVENT_VM_CREATE, "deploying Vm"); DeployVMParam param = new DeployVMParam(userId, accountId, dataCenterId, serviceOfferingId, templateId, diskOfferingId, domain, password, - displayName, group, userData, networkGroups, eventId); + displayName, group, userData, networkGroups, eventId, size); Gson gson = GsonHelper.getBuilder().create(); AsyncJobVO job = new AsyncJobVO(); @@ -2623,7 +2627,7 @@ public class ManagementServerImpl implements ManagementServer { } @Override - public boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException { + public boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException, InternalErrorException { return _vmMgr.recoverVirtualMachine(vmId); } @@ -4228,6 +4232,14 @@ public class ManagementServerImpl implements ManagementServer { template = _templateDao.findById(templateId); if (template == null) { throw new InvalidParameterValueException("Please specify a valid template ID."); + }// If ISO requested then it should be ISO. + if (isIso && template.getFormat() != ImageFormat.ISO){ + s_logger.error("Template Id " + templateId + " is not an ISO"); + throw new InvalidParameterValueException("Template Id " + templateId + " is not an ISO"); + }// If ISO not requested then it shouldn't be an ISO. + if (!isIso && template.getFormat() == ImageFormat.ISO){ + s_logger.error("Incorrect format of the template id " + templateId); + throw new InvalidParameterValueException("Incorrect format " + template.getFormat() + " of the template id " + templateId); } } @@ -4267,7 +4279,7 @@ public class ManagementServerImpl implements ManagementServer { return _templateHostDao.listByHostTemplate(secondaryStorageHost.getId(), templateId); } } else { - return _templateHostDao.listByTemplateId(templateId); + return _templateHostDao.listByOnlyTemplateId(templateId); } } @@ -4761,20 +4773,6 @@ public class ManagementServerImpl implements ManagementServer { VMTemplateVO template = _templateDao.createForUpdate(id); if (name != null) { - // Check for duplicate name - VMTemplateVO foundTemplate = _templateDao.findByTemplateName(name); - if (foundTemplate != null) - { - if(foundTemplate.getId()==id) - { - //do nothing, you are updating the same template you own - } - else - { - s_logger.error("updateTemplate - Template name " + name + " already exists "); - return false; - } - } template.setName(name); } @@ -6657,6 +6655,11 @@ public class ManagementServerImpl implements ManagementServer { return _diskOfferingDao.findById(diskOfferingId); } + @Override + public List findPrivateDiskOffering() { + return _diskOfferingDao.findPrivateDiskOffering(); + } + @Override @DB public boolean updateTemplatePermissions(long templateId, String operation, Boolean isPublic, Boolean isFeatured, List accountNames) throws InvalidParameterValueException, @@ -6810,7 +6813,7 @@ public class ManagementServerImpl implements ManagementServer { @Override public DiskOfferingVO createDiskOffering(long domainId, String name, String description, int numGibibytes, String tags) throws InvalidParameterValueException { - if (numGibibytes < 1) { + if (numGibibytes!=0 && numGibibytes < 1) { throw new InvalidParameterValueException("Please specify a disk size of at least 1 Gb."); } else if (numGibibytes > _maxVolumeSizeInGb) { throw new InvalidParameterValueException("The maximum size for a disk is " + _maxVolumeSizeInGb + " Gb."); @@ -7584,7 +7587,10 @@ public class ManagementServerImpl implements ManagementServer { return; } + Transaction txn = null; try { + txn = Transaction.open(Transaction.CLOUD_DB); + List accounts = _accountDao.findCleanups(); s_logger.info("Found " + accounts.size() + " accounts to cleanup"); for (AccountVO account : accounts) { @@ -7598,6 +7604,9 @@ public class ManagementServerImpl implements ManagementServer { } catch (Exception e) { s_logger.error("Exception ", e); } finally { + if(txn != null) + txn.close(); + lock.unlock(); } } catch (Exception e) { @@ -8352,5 +8361,16 @@ public class ManagementServerImpl implements ManagementServer { return _asyncMgr.submitAsyncJob(job); } + + @Override + public boolean validateCustomVolumeSizeRange(long size) throws InvalidParameterValueException { + if (size<0 || (size>0 && size < 1)) { + throw new InvalidParameterValueException("Please specify a size of at least 1 Gb."); + } else if (size > _maxVolumeSizeInGb) { + throw new InvalidParameterValueException("The maximum size allowed is " + _maxVolumeSizeInGb + " Gb."); + } + + return true; + } } diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 312f2c02d2c..a25e42c3f0c 100644 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -41,10 +41,12 @@ import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; +import com.cloud.agent.api.BackupSnapshotCommand; import com.cloud.agent.api.Command; import com.cloud.agent.api.CreateVolumeFromSnapshotAnswer; import com.cloud.agent.api.CreateVolumeFromSnapshotCommand; import com.cloud.agent.api.DeleteStoragePoolCommand; +import com.cloud.agent.api.ManageSnapshotCommand; import com.cloud.agent.api.ModifyStoragePoolAnswer; import com.cloud.agent.api.ModifyStoragePoolCommand; import com.cloud.agent.api.storage.CopyVolumeAnswer; @@ -203,7 +205,7 @@ public class StorageManagerImpl implements StorageManager { private int _totalRetries; private int _pauseInterval; private final boolean _shouldBeSnapshotCapable = true; - private String _hypervisorType; + private Hypervisor.Type _hypervisorType; @Override public boolean share(VMInstanceVO vm, List vols, HostVO host, boolean cancelPreviousShare) { @@ -266,7 +268,7 @@ public class StorageManagerImpl implements StorageManager { } else { offering = _offeringDao.findById(vol.getDiskOfferingId()); } - VolumeVO created = createVolume(create, vm, template, dc, pod, host.getClusterId(), offering, diskOffering, new ArrayList()); + VolumeVO created = createVolume(create, vm, template, dc, pod, host.getClusterId(), offering, diskOffering, new ArrayList(),0); if (created == null) { break; } @@ -671,7 +673,7 @@ public class StorageManagerImpl implements StorageManager { basicErrMsg, _totalRetries, _pauseInterval, - _shouldBeSnapshotCapable); + _shouldBeSnapshotCapable, null); if (answer != null && answer.getResult()) { vdiUUID = answer.getVdi(); } @@ -684,7 +686,7 @@ public class StorageManagerImpl implements StorageManager { @DB protected VolumeVO createVolume(VolumeVO volume, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, Long clusterId, - ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids) { + ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids, long size) { StoragePoolVO pool = null; final HashSet avoidPools = new HashSet(avoids); @@ -741,7 +743,7 @@ public class StorageManagerImpl implements StorageManager { } cmd = new CreateCommand(volume, vm, dskCh, tmpltStoredOn.getLocalDownloadPath(), pool); } else { - cmd = new CreateCommand(volume, vm, dskCh, pool); + cmd = new CreateCommand(volume, vm, dskCh, pool, size); } Answer answer = sendToPool(pool, cmd); @@ -778,21 +780,21 @@ public class StorageManagerImpl implements StorageManager { } @Override - public List create(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering) throws StorageUnavailableException, ExecutionException { + public List create(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering, long size) throws StorageUnavailableException, ExecutionException { List avoids = new ArrayList(); - return create(account, vm, template, dc, pod, offering, diskOffering, avoids); + return create(account, vm, template, dc, pod, offering, diskOffering, avoids, size); } @DB protected List create(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, - ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids) { + ServiceOfferingVO offering, DiskOfferingVO diskOffering, List avoids, long size) { ArrayList vols = new ArrayList(2); VolumeVO dataVol = null; VolumeVO rootVol = null; Transaction txn = Transaction.currentTxn(); txn.start(); if (Storage.ImageFormat.ISO == template.getFormat()) { - rootVol = new VolumeVO(VolumeType.ROOT, vm.getId(), vm.getInstanceName() + "-ROOT", dc.getId(), pod.getId(), account.getId(), account.getDomainId(), diskOffering.getDiskSizeInBytes()); + rootVol = new VolumeVO(VolumeType.ROOT, vm.getId(), vm.getInstanceName() + "-ROOT", dc.getId(), pod.getId(), account.getId(), account.getDomainId(),(size>0)? size : diskOffering.getDiskSizeInBytes()); rootVol.setDiskOfferingId(diskOffering.getId()); rootVol.setDeviceId(0l); rootVol = _volsDao.persist(rootVol); @@ -804,7 +806,7 @@ public class StorageManagerImpl implements StorageManager { rootVol = _volsDao.persist(rootVol); if (diskOffering != null && diskOffering.getDiskSizeInBytes() > 0) { - dataVol = new VolumeVO(VolumeType.DATADISK, vm.getId(), vm.getInstanceName() + "-DATA", dc.getId(), pod.getId(), account.getId(), account.getDomainId(), diskOffering.getDiskSizeInBytes()); + dataVol = new VolumeVO(VolumeType.DATADISK, vm.getId(), vm.getInstanceName() + "-DATA", dc.getId(), pod.getId(), account.getId(), account.getDomainId(), (size>0)? size : diskOffering.getDiskSizeInBytes()); dataVol.setDiskOfferingId(diskOffering.getId()); dataVol.setDeviceId(1l); dataVol = _volsDao.persist(dataVol); @@ -815,7 +817,7 @@ public class StorageManagerImpl implements StorageManager { VolumeVO dataCreated = null; VolumeVO rootCreated = null; try { - rootCreated = createVolume(rootVol, vm, template, dc, pod, null, offering, diskOffering, avoids); + rootCreated = createVolume(rootVol, vm, template, dc, pod, null, offering, diskOffering, avoids,size); if (rootCreated == null) { throw new CloudRuntimeException("Unable to create " + rootVol); } @@ -824,7 +826,7 @@ public class StorageManagerImpl implements StorageManager { if (dataVol != null) { StoragePoolVO pool = _storagePoolDao.findById(rootCreated.getPoolId()); - dataCreated = createVolume(dataVol, vm, template, dc, pod, pool.getClusterId(), offering, diskOffering, avoids); + dataCreated = createVolume(dataVol, vm, template, dc, pod, pool.getClusterId(), offering, diskOffering, avoids,size); if (dataCreated == null) { throw new CloudRuntimeException("Unable to create " + dataVol); } @@ -845,8 +847,8 @@ public class StorageManagerImpl implements StorageManager { @Override public long createUserVM(Account account, VMInstanceVO vm, VMTemplateVO template, DataCenterVO dc, HostPodVO pod, ServiceOfferingVO offering, DiskOfferingVO diskOffering, - List avoids) { - List volumes = create(account, vm, template, dc, pod, offering, diskOffering, avoids); + List avoids, long size) { + List volumes = create(account, vm, template, dc, pod, offering, diskOffering, avoids, size); for (VolumeVO v : volumes) { long volumeId = v.getId(); @@ -866,13 +868,22 @@ public class StorageManagerImpl implements StorageManager { return volumes.get(0).getPoolId(); } - public StoragePoolHostVO chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts) { + public Long chooseHostForStoragePool(StoragePoolVO poolVO, List avoidHosts, boolean sendToVmResidesOn, Long vmId) { + if (sendToVmResidesOn) { + if (vmId != null) { + VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); + if (vmInstance != null) { + return vmInstance.getHostId(); + } + } + return null; + } List poolHosts = _poolHostDao.listByHostStatus(poolVO.getId(), Status.Up); Collections.shuffle(poolHosts); if (poolHosts != null && poolHosts.size() > 0) { for (StoragePoolHostVO sphvo : poolHosts) { if (!avoidHosts.contains(sphvo.getHostId())) { - return sphvo; + return sphvo.getHostId(); } } } @@ -982,8 +993,10 @@ public class StorageManagerImpl implements StorageManager { _totalRetries = NumbersUtil.parseInt(configDao.getValue("total.retries"), 4); _pauseInterval = 2*NumbersUtil.parseInt(configDao.getValue("ping.interval"), 60); - _hypervisorType = configDao.getValue("hypervisor.type"); - + String hypervisoType = configDao.getValue("hypervisor.type"); + if (hypervisoType.equalsIgnoreCase("KVM")) { + _hypervisorType = Hypervisor.Type.KVM; + } _agentMgr.registerForHostEvents(new StoragePoolMonitor(this, _hostDao, _storagePoolDao), true, false, true); String storageCleanupEnabled = configs.get("storage.cleanup.enabled"); @@ -1064,6 +1077,19 @@ public class StorageManagerImpl implements StorageManager { return true; } + + public String getVmNameOnVolume(VolumeVO volume) { + Long vmId = volume.getInstanceId(); + if (vmId != null) { + VMInstanceVO vm = _vmInstanceDao.findById(vmId); + + if (vm == null) { + return null; + } + return vm.getInstanceName(); + } + return null; + } public String getAbsoluteIsoPath(long templateId, long dataCenterId) { String isoPath = null; @@ -1162,7 +1188,7 @@ public class StorageManagerImpl implements StorageManager { } } if (hypervisorType == null) { - if (_hypervisorType.equalsIgnoreCase("KVM")) { + if (_hypervisorType == Hypervisor.Type.KVM) { hypervisorType = Hypervisor.Type.KVM; } else { s_logger.debug("Couldn't find a host to serve in the server pool"); @@ -1244,7 +1270,7 @@ public class StorageManagerImpl implements StorageManager { // perhaps do this on demand, or perhaps mount on a couple of hosts per // pod List allHosts = _hostDao.listBy(Host.Type.Routing, clusterId, podId, zoneId); - if (allHosts.isEmpty() && !_hypervisorType.equalsIgnoreCase("KVM")) { + if (allHosts.isEmpty() && _hypervisorType != Hypervisor.Type.KVM) { throw new ResourceAllocationException("No host exists to associate a storage pool with"); } long poolId = _storagePoolDao.getNextInSequence(Long.class, "id"); @@ -1258,7 +1284,7 @@ public class StorageManagerImpl implements StorageManager { pool.setClusterId(clusterId); pool.setStatus(Status.Up); pool = _storagePoolDao.persist(pool, details); - if (_hypervisorType.equalsIgnoreCase("KVM") && allHosts.isEmpty()) { + if (_hypervisorType == Hypervisor.Type.KVM && allHosts.isEmpty()) { return pool; } s_logger.debug("In createPool Adding the pool to each of the hosts"); @@ -1477,7 +1503,7 @@ public class StorageManagerImpl implements StorageManager { @Override @DB - public VolumeVO createVolume(long accountId, long userId, String userSpecifiedName, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId) + public VolumeVO createVolume(long accountId, long userId, String userSpecifiedName, DataCenterVO dc, DiskOfferingVO diskOffering, long startEventId, long size) { String volumeName = ""; VolumeVO createdVolume = null; @@ -1520,7 +1546,7 @@ public class StorageManagerImpl implements StorageManager { Pair pod = null; while ((pod = _agentMgr.findPod(null, null, dc, account.getId(), podsToAvoid)) != null) { - if ((createdVolume = createVolume(volume, null, null, dc, pod.first(), null, null, diskOffering, poolsToAvoid)) != null) { + if ((createdVolume = createVolume(volume, null, null, dc, pod.first(), null, null, diskOffering, poolsToAvoid, size)) != null) { break; } else { podsToAvoid.add(pod.first().getId()); @@ -1643,18 +1669,21 @@ public class StorageManagerImpl implements StorageManager { @Override public Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg) { - return sendToHostsOnStoragePool(poolId, cmd, basicErrMsg, 1, 0, false); + return sendToHostsOnStoragePool(poolId, cmd, basicErrMsg, 1, 0, false, null); } @Override - public Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg, int totalRetries, int pauseBeforeRetry, boolean shouldBeSnapshotCapable) { + public Answer sendToHostsOnStoragePool(Long poolId, Command cmd, String basicErrMsg, int totalRetries, int pauseBeforeRetry, boolean shouldBeSnapshotCapable, + Long vmId) { Answer answer = null; Long hostId = null; StoragePoolVO storagePool = _storagePoolDao.findById(poolId); List hostsToAvoid = new ArrayList(); - StoragePoolHostVO storagePoolHost; + int tryCount = 0; - if (chooseHostForStoragePool(storagePool, hostsToAvoid) == null) { + boolean sendToVmHost = sendToVmResidesOn(cmd); + + if (chooseHostForStoragePool(storagePool, hostsToAvoid, sendToVmHost, vmId) == null) { // Don't just fail. The host could be reconnecting. // wait for some time for it to get connected // Wait for 3*ping.interval, since the code attempts a manual @@ -1666,10 +1695,9 @@ public class StorageManagerImpl implements StorageManager { // continue. } } - while ((storagePoolHost = chooseHostForStoragePool(storagePool, hostsToAvoid)) != null && tryCount++ < totalRetries) { + while ((hostId = chooseHostForStoragePool(storagePool, hostsToAvoid, sendToVmHost, vmId)) != null && tryCount++ < totalRetries) { String errMsg = basicErrMsg + " on host: " + hostId + " try: " + tryCount + ", reason: "; try { - hostId = storagePoolHost.getHostId(); HostVO hostVO = _hostDao.findById(hostId); if (shouldBeSnapshotCapable) { if (hostVO == null ) { @@ -2147,4 +2175,14 @@ public class StorageManagerImpl implements StorageManager { return true; } + + private boolean sendToVmResidesOn(Command cmd) { + if ((_hypervisorType == Hypervisor.Type.KVM) && + ((cmd instanceof ManageSnapshotCommand) || + (cmd instanceof BackupSnapshotCommand))) { + return true; + } else { + return false; + } + } } diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index dadef9db822..815ead3bc0f 100644 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -784,7 +784,7 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V final AccountVO account = _accountDao.findById(Account.ACCOUNT_ID_SYSTEM); try { - List vols = _storageMgr.create(account, secStorageVm, _template, dc, pod, _serviceOffering, null); + List vols = _storageMgr.create(account, secStorageVm, _template, dc, pod, _serviceOffering, null,0); if( vols == null ){ s_logger.error("Unable to alloc storage for secondary storage vm"); return null; diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index ec5e9f63963..a372e17b6e8 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -86,6 +86,7 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; +import com.cloud.uservm.UserVm; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ComponentLocator; @@ -134,6 +135,7 @@ public class SnapshotManagerImpl implements SnapshotManager { protected SearchBuilder PolicySnapshotSearch; protected SearchBuilder PoliciesForSnapSearch; + private String _hypervisorType; private final boolean _shouldBeSnapshotCapable = true; // all methods here should be snapshot capable. @Override @DB @@ -344,9 +346,12 @@ public class SnapshotManagerImpl implements SnapshotManager { txn.commit(); // Send a ManageSnapshotCommand to the agent - ManageSnapshotCommand cmd = new ManageSnapshotCommand(ManageSnapshotCommand.CREATE_SNAPSHOT, id, volume.getPath(), snapshotName, _vmDao.findById(volume.getInstanceId()).getInstanceName()); + String vmName = _storageMgr.getVmNameOnVolume(volume); + + ManageSnapshotCommand cmd = new ManageSnapshotCommand(ManageSnapshotCommand.CREATE_SNAPSHOT, id, volume.getPath(), snapshotName, vmName); String basicErrMsg = "Failed to create snapshot for volume: " + volume.getId(); - ManageSnapshotAnswer answer = (ManageSnapshotAnswer) _storageMgr.sendToHostsOnStoragePool(volume.getPoolId(), cmd, basicErrMsg, _totalRetries, _pauseInterval, _shouldBeSnapshotCapable); + ManageSnapshotAnswer answer = (ManageSnapshotAnswer) _storageMgr.sendToHostsOnStoragePool(volume.getPoolId(), cmd, basicErrMsg, _totalRetries, _pauseInterval, + _shouldBeSnapshotCapable, volume.getInstanceId()); txn = Transaction.currentTxn(); txn.start(); @@ -521,6 +526,7 @@ public class SnapshotManagerImpl implements SnapshotManager { } String firstBackupUuid = volume.getFirstSnapshotBackupUuid(); boolean isVolumeInactive = _storageMgr.volumeInactive(volume); + String vmName = _storageMgr.getVmNameOnVolume(volume); BackupSnapshotCommand backupSnapshotCommand = new BackupSnapshotCommand(primaryStoragePoolNameLabel, secondaryStoragePoolUrl, @@ -533,7 +539,8 @@ public class SnapshotManagerImpl implements SnapshotManager { prevBackupUuid, firstBackupUuid, isFirstSnapshotOfRootVolume, - isVolumeInactive); + isVolumeInactive, + vmName); String backedUpSnapshotUuid = null; // By default, assume failed. @@ -544,7 +551,8 @@ public class SnapshotManagerImpl implements SnapshotManager { basicErrMsg, _totalRetries, _pauseInterval, - _shouldBeSnapshotCapable); + _shouldBeSnapshotCapable, + volume.getInstanceId()); if (answer != null && answer.getResult()) { backedUpSnapshotUuid = answer.getBackupSnapshotName(); if (backedUpSnapshotUuid != null) { @@ -734,7 +742,8 @@ public class SnapshotManagerImpl implements SnapshotManager { basicErrMsg, _totalRetries, _pauseInterval, - _shouldBeSnapshotCapable); + _shouldBeSnapshotCapable, + volume.getInstanceId()); } return answer; @@ -893,7 +902,7 @@ public class SnapshotManagerImpl implements SnapshotManager { details, _totalRetries, _pauseInterval, - _shouldBeSnapshotCapable); + _shouldBeSnapshotCapable, volume.getInstanceId()); if ((answer != null) && answer.getResult()) { // This is not the last snapshot. @@ -1020,7 +1029,7 @@ public class SnapshotManagerImpl implements SnapshotManager { Long poolId = volume.getPoolId(); if (poolId != null) { // Retry only once for this command. There's low chance of failure because of a connection problem. - answer = _storageMgr.sendToHostsOnStoragePool(poolId, cmd, basicErrMsg, 1, _pauseInterval, _shouldBeSnapshotCapable); + answer = _storageMgr.sendToHostsOnStoragePool(poolId, cmd, basicErrMsg, 1, _pauseInterval, _shouldBeSnapshotCapable, volume.getInstanceId()); } else { s_logger.info("Pool id for volume id: " + volumeId + " belonging to account id: " + accountId + " is null. Assuming the snapshotsDir for the account has already been deleted"); @@ -1230,7 +1239,7 @@ public class SnapshotManagerImpl implements SnapshotManager { snapshot.getName(), backupOfNextSnapshot); String basicErrMsg = "Failed to destroy snapshot id: " + snapshotId + " for volume id: " + volumeId; - Answer answer = _storageMgr.sendToHostsOnStoragePool(volume.getPoolId(), cmd, basicErrMsg, _totalRetries, _pauseInterval, _shouldBeSnapshotCapable); + Answer answer = _storageMgr.sendToHostsOnStoragePool(volume.getPoolId(), cmd, basicErrMsg, _totalRetries, _pauseInterval, _shouldBeSnapshotCapable, volume.getInstanceId()); if ((answer != null) && answer.getResult()) { success = true; @@ -1287,6 +1296,8 @@ public class SnapshotManagerImpl implements SnapshotManager { throw new ConfigurationException("Unable to get the configuration dao."); } + _hypervisorType = configDao.getValue("hypervisor.type"); + DateUtil.IntervalType.HOURLY.setMax(NumbersUtil.parseInt(configDao.getValue("snapshot.max.hourly"), HOURLYMAX)); DateUtil.IntervalType.DAILY.setMax(NumbersUtil.parseInt(configDao.getValue("snapshot.max.daily"), DAILYMAX)); DateUtil.IntervalType.WEEKLY.setMax(NumbersUtil.parseInt(configDao.getValue("snapshot.max.weekly"), WEEKLYMAX)); @@ -1329,5 +1340,7 @@ public class SnapshotManagerImpl implements SnapshotManager { public boolean stop() { return true; } + + } diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index b56d6a7273a..4c2a3e98e3d 100644 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -29,12 +29,14 @@ import com.cloud.async.executor.StopVMExecutor; import com.cloud.async.executor.VMOperationParam; import com.cloud.dc.DataCenterVO; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientStorageCapacityException; import com.cloud.exception.InternalErrorException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.network.security.NetworkGroupVO; +import com.cloud.offerings.NetworkOfferingVO; import com.cloud.service.ServiceOfferingVO; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.SnapshotVO; @@ -52,7 +54,8 @@ import com.cloud.vm.VirtualMachine.Event; * */ public interface UserVmManager extends Manager, VirtualMachineManager { - + + UserVmVO allocate(String displayName, VMTemplateVO template, ServiceOfferingVO serviceOffering, NetworkOfferingVO[] networkOfferings, DiskOfferingVO[] diskOfferings, AccountVO owner, long userId) throws InsufficientCapacityException; static final int MAX_USER_DATA_LENGTH_BYTES = 2048; /** @@ -77,11 +80,11 @@ public interface UserVmManager extends Manager, VirtualMachineManager * @param diskOffering the disk offering for the root disk (deploying from ISO) or the data disk (deploying from a normal template) * @return UserVmVO if created; null if not. */ - UserVmVO createVirtualMachine(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List avoids, long startEventId) throws InsufficientStorageCapacityException, InternalErrorException, ResourceAllocationException; + UserVmVO createVirtualMachine(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List avoids, long startEventId, long size) throws InsufficientStorageCapacityException, InternalErrorException, ResourceAllocationException; - UserVmVO createDirectlyAttachedVM(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroupVO, long startEventId) throws InternalErrorException, ResourceAllocationException; + UserVmVO createDirectlyAttachedVM(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroupVO, long startEventId, long size) throws InternalErrorException, ResourceAllocationException; - UserVmVO createDirectlyAttachedVMExternal(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroupVO, long startEventId) throws InternalErrorException, ResourceAllocationException; + UserVmVO createDirectlyAttachedVMExternal(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroupVO, long startEventId, long size) throws InternalErrorException, ResourceAllocationException; /** * Destroys one virtual machine @@ -181,7 +184,7 @@ public interface UserVmManager extends Manager, VirtualMachineManager boolean rebootVirtualMachine(long userId, long vmId); OperationResponse executeRebootVM(RebootVMExecutor executor, VMOperationParam param); - boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException; + boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException, InternalErrorException; VMTemplateVO createPrivateTemplateRecord(Long userId, long vmId, String name, String description, long guestOsId, Boolean requiresHvm, Integer bits, Boolean passwordEnabled, boolean isPublic, boolean featured) throws InvalidParameterValueException; diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index b4876bc7566..89ae3351502 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -82,8 +82,8 @@ import com.cloud.configuration.dao.ConfigurationDao; import com.cloud.configuration.dao.ResourceLimitDao; import com.cloud.dc.DataCenterVO; import com.cloud.dc.HostPodVO; -import com.cloud.dc.VlanVO; import com.cloud.dc.Vlan.VlanType; +import com.cloud.dc.VlanVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; @@ -94,6 +94,7 @@ import com.cloud.event.EventVO; import com.cloud.event.dao.EventDao; import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InternalErrorException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.OperationTimedoutException; @@ -121,24 +122,25 @@ import com.cloud.network.security.NetworkGroupManager; import com.cloud.network.security.NetworkGroupVO; import com.cloud.offering.ServiceOffering; import com.cloud.offering.ServiceOffering.GuestIpType; +import com.cloud.offerings.NetworkOfferingVO; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; +import com.cloud.storage.Snapshot.SnapshotType; import com.cloud.storage.SnapshotVO; import com.cloud.storage.Storage; +import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolVO; import com.cloud.storage.VMTemplateHostVO; -import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.Snapshot.SnapshotType; -import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VirtualMachineTemplate.BootloaderType; +import com.cloud.storage.Volume; import com.cloud.storage.Volume.VolumeType; +import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.DiskTemplateDao; import com.cloud.storage.dao.GuestOSCategoryDao; @@ -1199,11 +1201,180 @@ public class UserVmManagerImpl implements UserVmManager { } userVm.setGuestIpAddress(null); - //_vmDao.update(userVm.getId(), userVm); FIXME need an updateIf + _vmDao.update(userVm.getId(), userVm); + } + + @Override + public UserVmVO allocate(String displayName, VMTemplateVO template, ServiceOfferingVO serviceOffering, NetworkOfferingVO[] networkOfferings, DiskOfferingVO[] diskOfferings, AccountVO owner, long userId) throws InsufficientCapacityException { + /* + long accountId = account.getId(); + long dataCenterId = dc.getId(); + long serviceOfferingId = offering.getId(); + UserVmVO vm = new UserVmVO(); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Creating vm for account id=" + account.getId() + + ", name="+ account.getAccountName() + "; dc=" + dc.getName() + + "; offering=" + offering.getId() + "; diskOffering=" + ((diskOffering != null) ? diskOffering.getName() : "none") + + "; template=" + template.getId()); + } + + DomainRouterVO router = _routerDao.findBy(accountId, dataCenterId, Role.DHCP_FIREWALL_LB_PASSWD_USERDATA); + if (router == null) { + throw new InternalErrorException("Cannot find a router for account (" + accountId + "/" + + account.getAccountName() + ") in " + dataCenterId); + } + + // Determine the Guest OS Id + long guestOSId; + if (template != null) { + guestOSId = template.getGuestOSId(); + } else { + throw new InternalErrorException("No template or ISO was specified for the VM."); + } + long numVolumes = -1; + Transaction txn = Transaction.currentTxn(); + long routerId = router.getId(); + + String name; + txn.start(); + + account = _accountDao.lock(accountId, true); + if (account == null) { + throw new InternalErrorException("Unable to lock up the account: " + accountId); + } + + // First check that the maximum number of UserVMs for the given accountId will not be exceeded + if (_accountMgr.resourceLimitExceeded(account, ResourceType.user_vm)) { + ResourceAllocationException rae = new ResourceAllocationException("Maximum number of virtual machines for account: " + account.getAccountName() + " has been exceeded."); + rae.setResourceType("vm"); + throw rae; + } + + boolean isIso = Storage.ImageFormat.ISO.equals(template.getFormat()); + numVolumes = (isIso || (diskOffering == null)) ? 1 : 2; + _accountMgr.incrementResourceCount(account.getId(), ResourceType.user_vm); + _accountMgr.incrementResourceCount(account.getId(), ResourceType.volume, numVolumes); + txn.commit(); + + name = VirtualMachineName.getVmName(vmId, accountId, _instance); + + String diskOfferingIdentifier = (diskOffering != null) ? String.valueOf(diskOffering.getId()) : "-1"; + String eventParams = "id=" + vmId + "\nvmName=" + name + "\nsoId=" + serviceOfferingId + "\ndoId=" + diskOfferingIdentifier + "\ntId=" + template.getId() + "\ndcId=" + dataCenterId; + EventVO event = new EventVO(); + event.setUserId(userId); + event.setAccountId(accountId); + event.setStartId(startEventId); + event.setState(EventState.Completed); + event.setType(EventTypes.EVENT_VM_CREATE); + event.setParameters(eventParams); + + try { + Pair pod = null; + long poolid = 0; + Set podsToAvoid = new HashSet(); + + while ((pod = _agentMgr.findPod(template, offering, dc, account.getId(), podsToAvoid)) != null) { + if (vm == null) { + vm = new UserVmVO(vmId, name, template.getId(), guestOSId, accountId, account.getDomainId().longValue(), + serviceOfferingId, null, null, router.getGuestNetmask(), + null,null,null, + routerId, pod.first().getId(), dataCenterId, + offering.getOfferHA(), displayName, group, userData); + + if (diskOffering != null) { + vm.setMirroredVols(diskOffering.isMirrored()); + } + + vm.setLastHostId(pod.second()); + + vm = _vmDao.persist(vm); + } else { + vm.setPodId(pod.first().getId()); + _vmDao.updateIf(vm, Event.OperationRetry, null); + } + + String ipAddressStr = acquireGuestIpAddress(dataCenterId, accountId, vm); + if (ipAddressStr == null) { + s_logger.warn("Failed user vm creation : no guest ip address available"); + releaseGuestIpAddress(vm); + ResourceAllocationException rae = new ResourceAllocationException("No guest ip addresses available for " + account.getAccountName() + " (try destroying some instances)"); + rae.setResourceType("vm"); + throw rae; + } + + poolid = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, avoids); + if ( poolid != 0) { + break; + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Unable to find storage host in pod " + pod.first().getName() + " (id:" + pod.first().getId() + ") while creating " + vm.toString() + ", checking other pods"); + } + + // if it fails at storage allocation round, reset lastHostId to "release" + // the CPU/memory allocation on the candidate host + vm.setLastHostId(null); + _vmDao.update(vm.getId(), vm); + + podsToAvoid.add(pod.first().getId()); + } + + if ((vm == null) || (poolid == 0)) { + throw new ResourceAllocationException("Create VM " + ((vm == null) ? vmId : vm.toString()) + " failed due to no Storage Pool is available"); + } + + txn.start(); + if(vm != null && vm.getName() != null && vm.getDisplayName() != null) + { + if(!vm.getName().equals(vm.getDisplayName())) + event.setDescription("successfully created VM instance : " + vm.getName()+"("+vm.getDisplayName()+")"); + else + event.setDescription("successfully created VM instance : " + vm.getName()); + } + else + { + event.setDescription("successfully created VM instance :"+name); + } + + _eventDao.persist(event); + + _vmDao.updateIf(vm, Event.OperationSucceeded, null); + if (s_logger.isDebugEnabled()) { + s_logger.debug("vm created " + vmId); + } + txn.commit(); + + return _vmDao.findById(vmId); + } catch (Throwable th) { + s_logger.error("Unable to create vm", th); + if (vm != null) { + _vmDao.delete(vmId); + } + _accountMgr.decrementResourceCount(account.getId(), ResourceType.user_vm); + _accountMgr.decrementResourceCount(account.getId(), ResourceType.volume, numVolumes); + + String eventDescription = "Failed to create VM: "; + if (vm == null) { + eventDescription += "new instance"; + } else { + eventDescription += vm.getName(); + if (!vm.getName().equals(vm.getDisplayName())) { + eventDescription += " (" + vm.getDisplayName() + ")"; + } + } + + if (th instanceof ResourceAllocationException) { + throw (ResourceAllocationException)th; + } + throw new CloudRuntimeException("Unable to create vm", th); + } + */ + + return null; } @Override @DB - public UserVmVO createVirtualMachine(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List avoids, long startEventId) throws InternalErrorException, ResourceAllocationException { + public UserVmVO createVirtualMachine(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List avoids, long startEventId, long size) throws InternalErrorException, ResourceAllocationException { long accountId = account.getId(); long dataCenterId = dc.getId(); long serviceOfferingId = offering.getId(); @@ -1300,7 +1471,7 @@ public class UserVmManagerImpl implements UserVmManager { throw rae; } - poolid = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, avoids); + poolid = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, avoids,size); if ( poolid != 0) { break; } @@ -1526,8 +1697,9 @@ public class UserVmManagerImpl implements UserVmManager { } @Override @DB - public boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException { + public boolean recoverVirtualMachine(long vmId) throws ResourceAllocationException, InternalErrorException { UserVmVO vm = _vmDao.findById(vmId); + if (vm == null || vm.getRemoved() != null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to find vm or vm is removed: " + vmId); @@ -1558,6 +1730,10 @@ public class UserVmManagerImpl implements UserVmManager { account = _accountDao.lock(vm.getAccountId(), true); + //if the account is deleted, throw error + if(account.getRemoved()!=null) + throw new InternalErrorException("Unable to recover VM as the account is deleted"); + // First check that the maximum number of UserVMs for the given accountId will not be exceeded if (_accountMgr.resourceLimitExceeded(account, ResourceType.user_vm)) { ResourceAllocationException rae = new ResourceAllocationException("Maximum number of virtual machines for account: " + account.getAccountName() + " has been exceeded."); @@ -2382,7 +2558,7 @@ public class UserVmManagerImpl implements UserVmManager { @DB @Override - public UserVmVO createDirectlyAttachedVM(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroups, long startEventId) throws InternalErrorException, ResourceAllocationException { + public UserVmVO createDirectlyAttachedVM(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroups, long startEventId, long size) throws InternalErrorException, ResourceAllocationException { long accountId = account.getId(); long dataCenterId = dc.getId(); @@ -2509,7 +2685,7 @@ public class UserVmManagerImpl implements UserVmManager { vm = _vmDao.findById(vmId); try { - poolId = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, a); + poolId = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, a,size); } catch (CloudRuntimeException e) { _vmDao.delete(vmId); _ipAddressDao.unassignIpAddress(guestIp); @@ -2582,7 +2758,7 @@ public class UserVmManagerImpl implements UserVmManager { @DB @Override - public UserVmVO createDirectlyAttachedVMExternal(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroups, long startEventId) throws InternalErrorException, ResourceAllocationException { + public UserVmVO createDirectlyAttachedVMExternal(Long vmId, long userId, AccountVO account, DataCenterVO dc, ServiceOfferingVO offering, VMTemplateVO template, DiskOfferingVO diskOffering, String displayName, String group, String userData, List a, List networkGroups, long startEventId, long size) throws InternalErrorException, ResourceAllocationException { long accountId = account.getId(); long dataCenterId = dc.getId(); long serviceOfferingId = offering.getId(); @@ -2668,7 +2844,7 @@ public class UserVmManagerImpl implements UserVmManager { vm = _vmDao.findById(vmId); try { - poolId = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering, a); + poolId = _storageMgr.createUserVM(account, vm, template, dc, pod.first(), offering, diskOffering,a,size); } catch (CloudRuntimeException e) { _vmDao.delete(vmId); _accountMgr.decrementResourceCount(account.getId(), ResourceType.user_vm); diff --git a/ui/content/tab_accounts.html b/ui/content/tab_accounts.html index 91857da6f63..5a63bb419c5 100644 --- a/ui/content/tab_accounts.html +++ b/ui/content/tab_accounts.html @@ -1,3 +1,5 @@ + +