diff --git a/INSTALL.md b/INSTALL.md index 37415dc25ec..893f5551e44 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -96,25 +96,22 @@ Clean and build: $ mvn clean install -P systemvm,developer -In case you want support for VMWare, SRX and other non-Apache (referred to as nonoss) -compliant libs, you may download the following jar artifacts from respective vendors: +CloudStack supports several plugins that depend on libraries with distribution restrictions. +Because of this they are not included in the default build. Enable these additional plugins +activate their respective profiles. For convenience adding -Dnoredist will enable all plugins +that depend on libraries with distribution restrictions. The build procedure expects that the +required libraries are present in the maven repository. - deps/cloud-iControl.jar - deps/cloud-manageontap.jar - deps/cloud-netscaler-sdx.jar - deps/cloud-netscaler.jar - deps/vmware-apputils.jar - deps/vmware-vim.jar - deps/vmware-vim25.jar - -Install them to ~/.m2 so maven can get them as dependencies: +The following procedure can be used to add the libraries to the local maven repository. Details +on obtaining the required libraries can be found in this file. Note that this will vary between +releases of cloudstack $ cd deps $ ./install-non-oss.sh -To build with nonoss components, use the build command with the nonoss flag: +To build all non redistributable components, add the noredist flag to the build command: - $ mvn clean install -P systemvm,developer -Dnonoss + $ mvn clean install -P systemvm,developer -Dnoredist Clear old database (if any) and deploy the database schema: @@ -153,7 +150,7 @@ This section describes packaging and installation. To create debs: - $ mvn -P deps # -D nonoss, for nonoss as described in the "Building" section above + $ mvn -P deps # -D noredist, for noredist as described in the "Building" section above $ dpkg-buildpackage All the deb packages will be created in ../$PWD @@ -183,15 +180,15 @@ Install needed packages, apt-get upgrade for upgrading: To create rpms: - $ mvn -P deps # -D nonoss, for nonoss as described in the "Building" section above - $ ./waf rpm + $ cd packaging/centos63 + $ bash packaging.sh [ -p NOREDIST ] -All the rpm packages will be create in artifacts/rpmbuild/RPMS/x86_64 +All the rpm packages will be create in dist/rpmbuild/RPMS/x86_64 To create a yum repo: (assuming appropriate user privileges) $ path=/path/to/your/webserver/cloudstack - $ cd artifacts/rpmbuild/RPMS/x86_64 + $ cd dist/rpmbuild/RPMS/x86_64 $ mv *.rpm $path $ createrepo $path @@ -208,10 +205,10 @@ Installation: Install needed packages: $ yum update - $ yum install cloud-client # management server + $ yum install cloudstack-management # management server $ yum install mysql-server # mysql server - $ yum install cloud-agent # agent (kvm) - $ yum install cloud-usage # usage server + $ yum install cloudstack-agent # agent (kvm) + $ yum install cloudstack-usage # usage server ## Installing CloudMonkey CLI diff --git a/agent/bindir/cloudstack-agent-upgrade.in b/agent/bindir/cloudstack-agent-upgrade.in new file mode 100644 index 00000000000..72b0fae5853 --- /dev/null +++ b/agent/bindir/cloudstack-agent-upgrade.in @@ -0,0 +1,63 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from cloudutils.networkConfig import networkConfig +from cloudutils.utilities import bash +import logging +import re +def isOldStyleBridge(brName): + if brName.find("cloudVirBr") == 0: + return True + else: + return False +def upgradeBridgeName(brName, enslavedDev): + print("upgrade bridge: %s, %s"%(brName, enslavedDev)) + vlanId = brName.replace("cloudVirBr", "") + print("find vlan Id: %s"%vlanId) + phyDev = enslavedDev.split(".")[0] + print("find physical device %s"%phyDev) + newBrName = "br" + phyDev + "-" + vlanId + print("new bridge name %s"%newBrName) + bash("ip link set %s down"%brName) + bash("ip link set %s name %s"%(brName, newBrName)) + bash("ip link set %s up" %newBrName) + cmd = "iptables-save | grep FORWARD | grep -w " + brName + rules = bash(cmd).stdout.split('\n') + rules.pop() + for rule in rules: + try: + delrule = re.sub("-A", "-D", rule) + newrule = re.sub(" " + brName + " ", " " + newBrName + " ", rule) + bash("iptables " + delrule) + bash("iptables " + newrule) + except: + logging.exception("Ignoring failure to update rules for rule " + rule + " on bridge " + brName) +if __name__ == '__main__': + netlib = networkConfig() + bridges = netlib.listNetworks() + bridges = filter(isOldStyleBridge, bridges) + for br in bridges: + enslavedDev = netlib.getEnslavedDev(br, 1) + if enslavedDev is not None: + upgradeBridgeName(br, enslavedDev) + + bridges = netlib.listNetworks() + bridges = filter(isOldStyleBridge, bridges) + if len(bridges) > 0: + print("Warning: upgrade is not finished, still some bridges have the old style name:" + str(bridges)) + else: + print("Upgrade succeed") diff --git a/agent/bindir/libvirtqemuhook.in b/agent/bindir/libvirtqemuhook.in new file mode 100755 index 00000000000..7bf9634fdf5 --- /dev/null +++ b/agent/bindir/libvirtqemuhook.in @@ -0,0 +1,53 @@ +#!/usr/bin/python +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import sys +from xml.dom.minidom import parse +from cloudutils.configFileOps import configFileOps +from cloudutils.networkConfig import networkConfig +def isOldStyleBridge(brName): + if brName.find("cloudVirBr") == 0: + return True + else: + return False +def getGuestNetworkDevice(): + netlib = networkConfig() + cfo = configFileOps("/etc/cloudstack/agent/agent.properties") + guestDev = cfo.getEntry("guest.network.device") + enslavedDev = netlib.getEnslavedDev(guestDev, 1) + return enslavedDev +def handleMigrateBegin(): + try: + domain = parse(sys.stdin) + for interface in domain.getElementsByTagName("interface"): + source = interface.getElementsByTagName("source")[0] + bridge = source.getAttribute("bridge") + if not isOldStyleBridge(bridge): + continue + vlanId = bridge.replace("cloudVirBr","") + phyDev = getGuestNetworkDevice() + newBrName="br" + phyDev + "-" + vlanId + source.setAttribute("bridge", newBrName) + print(domain.toxml()) + except: + pass +if __name__ == '__main__': + if len(sys.argv) != 5: + sys.exit(0) + + if sys.argv[2] == "migrate" and sys.argv[3] == "begin": + handleMigrateBegin() diff --git a/agent/pom.xml b/agent/pom.xml index 7b00a93963f..14133226053 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -36,6 +36,10 @@ cloud-utils ${project.version} + + commons-io + commons-io + commons-daemon commons-daemon diff --git a/agent/scripts/run.sh b/agent/scripts/run.sh deleted file mode 100755 index 1fa427539fd..00000000000 --- a/agent/scripts/run.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -#run.sh runs the agent client. -java $1 -Xms128M -Xmx384M -cp cglib-nodep-2.2.jar:trilead-ssh2-build213.jar:cloud-api.jar:cloud-core-extras.jar:cloud-utils.jar:cloud-agent.jar:cloud-console-proxy.jar:cloud-console-common.jar:freemarker.jar:log4j-1.2.15.jar:ws-commons-util-1.0.2.jar:xmlrpc-client-3.1.3.jar:cloud-core.jar:xmlrpc-common-3.1.3.jar:javaee-api-5.0-1.jar:gson-1.3.jar:commons-httpclient-3.1.jar:commons-logging-1.1.1.jar:commons-codec-1.4.jar:commons-collections-3.2.1.jar:commons-pool-1.4.jar:apache-log4j-extras-1.0.jar:libvirt-0.4.5.jar:jna.jar:.:/etc/cloud:./*:/usr/share/java/*:./conf com.cloud.agent.AgentShell diff --git a/agent/src/com/cloud/agent/Agent.java b/agent/src/com/cloud/agent/Agent.java index f309474fbc5..c4f17b24ae7 100755 --- a/agent/src/com/cloud/agent/Agent.java +++ b/agent/src/com/cloud/agent/Agent.java @@ -26,7 +26,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Timer; -import java.util.TimerTask; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.SynchronousQueue; @@ -36,6 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.naming.ConfigurationException; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.log4j.Logger; import com.cloud.agent.api.AgentControlAnswer; @@ -45,7 +45,6 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.CronCommand; import com.cloud.agent.api.MaintainAnswer; import com.cloud.agent.api.MaintainCommand; -import com.cloud.agent.api.ModifySshKeysCommand; import com.cloud.agent.api.PingCommand; import com.cloud.agent.api.ReadyCommand; import com.cloud.agent.api.ShutdownCommand; @@ -731,7 +730,7 @@ public class Agent implements HandlerFactory, IAgentControl { } } - public class WatchTask extends TimerTask { + public class WatchTask extends ManagedContextTimerTask { protected Request _request; protected Agent _agent; protected Link _link; @@ -744,7 +743,7 @@ public class Agent implements HandlerFactory, IAgentControl { } @Override - public void run() { + protected void runInContext() { if (s_logger.isTraceEnabled()) { s_logger.trace("Scheduling " + (_request instanceof Response ? "Ping" : "Watch Task")); } @@ -760,7 +759,7 @@ public class Agent implements HandlerFactory, IAgentControl { } } - public class StartupTask extends TimerTask { + public class StartupTask extends ManagedContextTimerTask { protected Link _link; protected volatile boolean cancelled = false; @@ -782,7 +781,7 @@ public class Agent implements HandlerFactory, IAgentControl { } @Override - public synchronized void run() { + protected synchronized void runInContext() { if (!cancelled) { if (s_logger.isInfoEnabled()) { s_logger.info("The startup command is now cancelled"); diff --git a/agent/src/com/cloud/agent/AgentShell.java b/agent/src/com/cloud/agent/AgentShell.java index bf1e8180e44..900a13f4ab1 100644 --- a/agent/src/com/cloud/agent/AgentShell.java +++ b/agent/src/com/cloud/agent/AgentShell.java @@ -19,14 +19,12 @@ package com.cloud.agent; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.Collections; -import java.util.Date; import java.util.Enumeration; import java.util.HashMap; import java.util.List; @@ -39,9 +37,8 @@ import javax.naming.ConfigurationException; import org.apache.commons.daemon.Daemon; import org.apache.commons.daemon.DaemonContext; import org.apache.commons.daemon.DaemonInitException; -import org.apache.commons.httpclient.HttpClient; -import org.apache.commons.httpclient.MultiThreadedHttpConnectionManager; -import org.apache.commons.httpclient.methods.GetMethod; +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang.math.NumberUtils; import org.apache.log4j.Logger; import org.apache.log4j.xml.DOMConfigurator; @@ -56,12 +53,10 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.backoff.BackoffAlgorithm; import com.cloud.utils.backoff.impl.ConstantTimeBackoff; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.script.Script; public class AgentShell implements IAgentShell, Daemon { private static final Logger s_logger = Logger.getLogger(AgentShell.class .getName()); - private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); private final Properties _properties = new Properties(); private final Map _cmdLineProperties = new HashMap(); @@ -172,7 +167,7 @@ public class AgentShell implements IAgentShell, Daemon { _storage.persist(name, value); } - private void loadProperties() throws ConfigurationException { + void loadProperties() throws ConfigurationException { final File file = PropertiesUtil.findConfigFile("agent.properties"); if (file == null) { throw new ConfigurationException("Unable to find agent.properties."); @@ -180,14 +175,18 @@ public class AgentShell implements IAgentShell, Daemon { s_logger.info("agent.properties found at " + file.getAbsolutePath()); + InputStream propertiesStream = null; try { - _properties.load(new FileInputStream(file)); + propertiesStream = new FileInputStream(file); + _properties.load(propertiesStream); } catch (final FileNotFoundException ex) { throw new CloudRuntimeException("Cannot find the file: " + file.getAbsolutePath(), ex); } catch (final IOException ex) { throw new CloudRuntimeException("IOException in reading " + file.getAbsolutePath(), ex); + } finally { + IOUtils.closeQuietly(propertiesStream); } } @@ -199,30 +198,32 @@ public class AgentShell implements IAgentShell, Daemon { String zone = null; String pod = null; String guid = null; - for (int i = 0; i < args.length; i++) { - final String[] tokens = args[i].split("="); + for (String param : args) { + final String[] tokens = param.split("="); if (tokens.length != 2) { - System.out.println("Invalid Parameter: " + args[i]); + System.out.println("Invalid Parameter: " + param); continue; } + final String paramName = tokens[0]; + final String paramValue = tokens[1]; // save command line properties - _cmdLineProperties.put(tokens[0], tokens[1]); + _cmdLineProperties.put(paramName, paramValue); - if (tokens[0].equalsIgnoreCase("port")) { - port = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("threads") || tokens[0].equalsIgnoreCase("workers")) { - workers = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("host")) { - host = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("zone")) { - zone = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("pod")) { - pod = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("guid")) { - guid = tokens[1]; - } else if (tokens[0].equalsIgnoreCase("eth1ip")) { - _privateIp = tokens[1]; + if (paramName.equalsIgnoreCase("port")) { + port = paramValue; + } else if (paramName.equalsIgnoreCase("threads") || paramName.equalsIgnoreCase("workers")) { + workers = paramValue; + } else if (paramName.equalsIgnoreCase("host")) { + host = paramValue; + } else if (paramName.equalsIgnoreCase("zone")) { + zone = paramValue; + } else if (paramName.equalsIgnoreCase("pod")) { + pod = paramValue; + } else if (paramName.equalsIgnoreCase("guid")) { + guid = paramValue; + } else if (paramName.equalsIgnoreCase("eth1ip")) { + _privateIp = paramValue; } } @@ -230,16 +231,16 @@ public class AgentShell implements IAgentShell, Daemon { port = getProperty(null, "port"); } - _port = NumbersUtil.parseInt(port, 8250); + _port = NumberUtils.toInt(port, 8250); - _proxyPort = NumbersUtil.parseInt( + _proxyPort = NumberUtils.toInt( getProperty(null, "consoleproxy.httpListenPort"), 443); if (workers == null) { workers = getProperty(null, "workers"); } - _workers = NumbersUtil.parseInt(workers, 5); + _workers = NumberUtils.toInt(workers, 5); if (host == null) { host = getProperty(null, "host"); @@ -309,7 +310,7 @@ public class AgentShell implements IAgentShell, Daemon { // For KVM agent, do it specially here File file = new File("/etc/cloudstack/agent/log4j-cloud.xml"); - if(file == null || !file.exists()) { + if(!file.exists()) { file = PropertiesUtil.findConfigFile("log4j-cloud.xml"); } DOMConfigurator.configureAndWatch(file.getAbsolutePath()); diff --git a/agent/src/com/cloud/agent/dao/impl/PropertiesStorage.java b/agent/src/com/cloud/agent/dao/impl/PropertiesStorage.java index 2bf26f48642..411d946a294 100755 --- a/agent/src/com/cloud/agent/dao/impl/PropertiesStorage.java +++ b/agent/src/com/cloud/agent/dao/impl/PropertiesStorage.java @@ -17,7 +17,6 @@ package com.cloud.agent.dao.impl; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; @@ -26,6 +25,7 @@ import java.util.Properties; import javax.ejb.Local; +import org.apache.commons.io.IOUtils; import org.apache.log4j.Logger; import com.cloud.agent.dao.StorageComponent; @@ -59,18 +59,10 @@ public class PropertiesStorage implements StorageComponent { _properties.store(output, _name); output.flush(); output.close(); - } catch (FileNotFoundException e) { - s_logger.error("Who deleted the file? ", e); } catch (IOException e) { s_logger.error("Uh-oh: ", e); } finally { - if (output != null) { - try { - output.close(); - } catch (IOException e) { - // ignore. - } - } + IOUtils.closeQuietly(output); } } @@ -99,7 +91,7 @@ public class PropertiesStorage implements StorageComponent { } try { - _properties.load(new FileInputStream(file)); + PropertiesUtil.loadFromFile(_properties, file); _file = file; } catch (FileNotFoundException e) { s_logger.error("How did we get here? ", e); diff --git a/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java b/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java index ee5c36176c8..6f49f47a1ed 100644 --- a/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java +++ b/agent/src/com/cloud/agent/resource/consoleproxy/ConsoleProxyResource.java @@ -32,6 +32,7 @@ import java.util.Properties; import javax.naming.ConfigurationException; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.agent.Agent.ExitStatus; @@ -357,8 +358,9 @@ public class ConsoleProxyResource extends ServerResourceBase implements private void launchConsoleProxy(final byte[] ksBits, final String ksPassword, final String encryptorPassword) { final Object resource = this; if (_consoleProxyMain == null) { - _consoleProxyMain = new Thread(new Runnable() { - public void run() { + _consoleProxyMain = new Thread(new ManagedContextRunnable() { + @Override + protected void runInContext() { try { Class consoleProxyClazz = Class.forName("com.cloud.consoleproxy.ConsoleProxy"); try { diff --git a/agent/test/com/cloud/agent/AgentShellTest.java b/agent/test/com/cloud/agent/AgentShellTest.java new file mode 100644 index 00000000000..d92accbd7e8 --- /dev/null +++ b/agent/test/com/cloud/agent/AgentShellTest.java @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent; + +import java.util.UUID; + +import javax.naming.ConfigurationException; + +import junit.framework.Assert; + +import org.junit.Test; + +public class AgentShellTest { + @Test + public void parseCommand() throws ConfigurationException { + AgentShell shell = new AgentShell(); + UUID anyUuid = UUID.randomUUID(); + shell.parseCommand(new String[] { "port=55555", "threads=4", + "host=localhost", "pod=pod1", "guid=" + anyUuid, "zone=zone1" }); + Assert.assertEquals(55555, shell.getPort()); + Assert.assertEquals(4, shell.getWorkers()); + Assert.assertEquals("localhost", shell.getHost()); + Assert.assertEquals(anyUuid.toString(), shell.getGuid()); + Assert.assertEquals("pod1", shell.getPod()); + Assert.assertEquals("zone1", shell.getZone()); + } + @Test + public void loadProperties() throws ConfigurationException { + AgentShell shell = new AgentShell(); + shell.loadProperties(); + Assert.assertNotNull(shell.getProperties()); + Assert.assertFalse(shell.getProperties().entrySet().isEmpty()); + } +} diff --git a/agent/test/com/cloud/agent/dao/impl/PropertiesStorageTest.java b/agent/test/com/cloud/agent/dao/impl/PropertiesStorageTest.java new file mode 100644 index 00000000000..adaebc61287 --- /dev/null +++ b/agent/test/com/cloud/agent/dao/impl/PropertiesStorageTest.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.agent.dao.impl; + +import java.io.File; +import java.io.IOException; +import java.util.HashMap; + +import junit.framework.Assert; + +import org.apache.commons.io.FileUtils; +import org.junit.Test; + +public class PropertiesStorageTest { + @Test + public void configureWithNotExistingFile() { + String fileName = "target/notyetexistingfile" + + System.currentTimeMillis(); + File file = new File(fileName); + + PropertiesStorage storage = new PropertiesStorage(); + HashMap params = new HashMap(); + params.put("path", fileName); + Assert.assertTrue(storage.configure("test", params)); + Assert.assertTrue(file.exists()); + storage.persist("foo", "bar"); + Assert.assertEquals("bar", storage.get("foo")); + + storage.stop(); + file.delete(); + } + + @Test + public void configureWithExistingFile() throws IOException { + String fileName = "target/existingfile" + + System.currentTimeMillis(); + File file = new File(fileName); + + FileUtils.writeStringToFile(file, "a=b\n\n"); + + PropertiesStorage storage = new PropertiesStorage(); + HashMap params = new HashMap(); + params.put("path", fileName); + Assert.assertTrue(storage.configure("test", params)); + Assert.assertEquals("b", storage.get("a")); + Assert.assertTrue(file.exists()); + storage.persist("foo", "bar"); + Assert.assertEquals("bar", storage.get("foo")); + + storage.stop(); + file.delete(); + } +} diff --git a/docs/runbook/publican.cfg b/api/resources/META-INF/cloudstack/api-planner/module.properties similarity index 76% rename from docs/runbook/publican.cfg rename to api/resources/META-INF/cloudstack/api-planner/module.properties index 72722cd8ab5..8eed8791149 100644 --- a/docs/runbook/publican.cfg +++ b/api/resources/META-INF/cloudstack/api-planner/module.properties @@ -1,13 +1,12 @@ -# Config::Simple 4.59 -# Fri May 25 12:50:59 2012 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# +# distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -15,8 +14,5 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - -xml_lang: "en-US" -type: Book -brand: cloudstack - +name=api-planner +parent=planner \ No newline at end of file diff --git a/api/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml b/api/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml new file mode 100644 index 00000000000..2fd34a8ee0a --- /dev/null +++ b/api/resources/META-INF/cloudstack/api-planner/spring-api-planner-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/api/src/com/cloud/agent/api/to/DataStoreTO.java b/api/src/com/cloud/agent/api/to/DataStoreTO.java index 9014f8e2b81..b79ba7d64be 100644 --- a/api/src/com/cloud/agent/api/to/DataStoreTO.java +++ b/api/src/com/cloud/agent/api/to/DataStoreTO.java @@ -20,7 +20,7 @@ package com.cloud.agent.api.to; import com.cloud.storage.DataStoreRole; - public interface DataStoreTO { public DataStoreRole getRole(); + public String getUuid(); } diff --git a/api/src/com/cloud/agent/api/to/DiskTO.java b/api/src/com/cloud/agent/api/to/DiskTO.java index 556ccd4db46..a577689bdef 100644 --- a/api/src/com/cloud/agent/api/to/DiskTO.java +++ b/api/src/com/cloud/agent/api/to/DiskTO.java @@ -18,21 +18,35 @@ */ package com.cloud.agent.api.to; +import java.util.Map; + import com.cloud.storage.Volume; public class DiskTO { + public static final String CHAP_INITIATOR_USERNAME = "chapInitiatorUsername"; + public static final String CHAP_INITIATOR_SECRET = "chapInitiatorSecret"; + public static final String CHAP_TARGET_USERNAME = "chapTargetUsername"; + public static final String CHAP_TARGET_SECRET = "chapTargetSecret"; + public static final String MANAGED = "managed"; + public static final String IQN = "iqn"; + public static final String STORAGE_HOST = "storageHost"; + public static final String STORAGE_PORT = "storagePort"; + public static final String VOLUME_SIZE = "volumeSize"; + private DataTO data; private Long diskSeq; - private String vdiUuid; + private String path; private Volume.Type type; + private Map _details; + public DiskTO() { } - public DiskTO(DataTO data, Long diskSeq, String vdiUuid, Volume.Type type) { + public DiskTO(DataTO data, Long diskSeq, String path, Volume.Type type) { this.data = data; this.diskSeq = diskSeq; - this.vdiUuid = vdiUuid; + this.path = path; this.type = type; } @@ -52,12 +66,12 @@ public class DiskTO { this.diskSeq = diskSeq; } - public String getVdiUuid() { - return vdiUuid; + public String getPath() { + return path; } - public void setVdiUuid(String vdiUuid) { - this.vdiUuid = vdiUuid; + public void setPath(String path) { + this.path = path; } public Volume.Type getType() { @@ -67,4 +81,12 @@ public class DiskTO { public void setType(Volume.Type type) { this.type = type; } + + public void setDetails(Map details) { + _details = details; + } + + public Map getDetails() { + return _details; + } } diff --git a/api/src/com/cloud/agent/api/to/NfsTO.java b/api/src/com/cloud/agent/api/to/NfsTO.java index 415c95ce3f5..54683c7f410 100644 --- a/api/src/com/cloud/agent/api/to/NfsTO.java +++ b/api/src/com/cloud/agent/api/to/NfsTO.java @@ -22,6 +22,7 @@ public class NfsTO implements DataStoreTO { private String _url; private DataStoreRole _role; + private String uuid; public NfsTO() { @@ -55,6 +56,12 @@ public class NfsTO implements DataStoreTO { this._role = _role; } + @Override + public String getUuid() { + return uuid; + } - + public void setUuid(String uuid) { + this.uuid = uuid; + } } diff --git a/api/src/com/cloud/agent/api/to/S3TO.java b/api/src/com/cloud/agent/api/to/S3TO.java index b1b692a8bad..350b9ca8b60 100644 --- a/api/src/com/cloud/agent/api/to/S3TO.java +++ b/api/src/com/cloud/agent/api/to/S3TO.java @@ -39,6 +39,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { private Integer socketTimeout; private Date created; private boolean enableRRS; + private long maxSingleUploadSizeInBytes; public S3TO() { @@ -50,7 +51,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { final String secretKey, final String endPoint, final String bucketName, final Boolean httpsFlag, final Integer connectionTimeout, final Integer maxErrorRetry, - final Integer socketTimeout, final Date created, final boolean enableRRS) { + final Integer socketTimeout, final Date created, final boolean enableRRS, final long maxUploadSize) { super(); @@ -66,6 +67,7 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { this.socketTimeout = socketTimeout; this.created = created; this.enableRRS = enableRRS; + this.maxSingleUploadSizeInBytes = maxUploadSize; } @@ -268,7 +270,6 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { } - public boolean getEnableRRS() { return enableRRS; } @@ -277,5 +278,28 @@ public final class S3TO implements S3Utils.ClientOptions, DataStoreTO { this.enableRRS = enableRRS; } + public long getMaxSingleUploadSizeInBytes() { + return maxSingleUploadSizeInBytes; + } + public void setMaxSingleUploadSizeInBytes(long maxSingleUploadSizeInBytes) { + this.maxSingleUploadSizeInBytes = maxSingleUploadSizeInBytes; + } + + public boolean getSingleUpload(long objSize){ + if ( maxSingleUploadSizeInBytes < 0 ){ + // always use single part upload + return true; + } else if ( maxSingleUploadSizeInBytes == 0 ){ + // always use multi part upload + return false; + } else { + // check object size to set flag + if (objSize < maxSingleUploadSizeInBytes){ + return true; + } else{ + return false; + } + } + } } diff --git a/api/src/com/cloud/agent/api/to/SwiftTO.java b/api/src/com/cloud/agent/api/to/SwiftTO.java index 7349d7779ac..3ad131ac4d8 100644 --- a/api/src/com/cloud/agent/api/to/SwiftTO.java +++ b/api/src/com/cloud/agent/api/to/SwiftTO.java @@ -29,8 +29,7 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { public SwiftTO() { } - public SwiftTO(Long id, String url, String account, String userName, String key - ) { + public SwiftTO(Long id, String url, String account, String userName, String key) { this.id = id; this.url = url; this.account = account; @@ -46,14 +45,17 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { return url; } + @Override public String getAccount() { return account; } + @Override public String getUserName() { return userName; } + @Override public String getKey() { return key; } @@ -67,4 +69,9 @@ public class SwiftTO implements DataStoreTO, SwiftUtil.SwiftClientCfg { public String getEndPoint() { return this.url; } + + @Override + public String getUuid() { + return null; + } } diff --git a/api/src/com/cloud/event/EventTypes.java b/api/src/com/cloud/event/EventTypes.java index 076a7c59211..0406c3e852d 100755 --- a/api/src/com/cloud/event/EventTypes.java +++ b/api/src/com/cloud/event/EventTypes.java @@ -76,6 +76,7 @@ public class EventTypes { public static final String EVENT_VM_MIGRATE = "VM.MIGRATE"; public static final String EVENT_VM_MOVE = "VM.MOVE"; public static final String EVENT_VM_RESTORE = "VM.RESTORE"; + public static final String EVENT_VM_EXPUNGE = "VM.EXPUNGE"; // Domain Router public static final String EVENT_ROUTER_CREATE = "ROUTER.CREATE"; @@ -188,6 +189,8 @@ public class EventTypes { public static final String EVENT_VOLUME_DETAIL_UPDATE = "VOLUME.DETAIL.UPDATE"; public static final String EVENT_VOLUME_DETAIL_ADD = "VOLUME.DETAIL.ADD"; public static final String EVENT_VOLUME_DETAIL_REMOVE = "VOLUME.DETAIL.REMOVE"; + public static final String EVENT_VOLUME_UPDATE = "VOLUME.UPDATE"; + // Domains public static final String EVENT_DOMAIN_CREATE = "DOMAIN.CREATE"; @@ -197,6 +200,7 @@ public class EventTypes { // Snapshots public static final String EVENT_SNAPSHOT_CREATE = "SNAPSHOT.CREATE"; public static final String EVENT_SNAPSHOT_DELETE = "SNAPSHOT.DELETE"; + public static final String EVENT_SNAPSHOT_REVERT = "SNAPSHOT.REVERT"; public static final String EVENT_SNAPSHOT_POLICY_CREATE = "SNAPSHOTPOLICY.CREATE"; public static final String EVENT_SNAPSHOT_POLICY_UPDATE = "SNAPSHOTPOLICY.UPDATE"; public static final String EVENT_SNAPSHOT_POLICY_DELETE = "SNAPSHOTPOLICY.DELETE"; @@ -456,6 +460,9 @@ public class EventTypes { public static final String EVENT_ACL_GROUP_GRANT = "ACLGROUP.GRANT"; public static final String EVENT_ACL_GROUP_REVOKE = "ACLGROUP.REVOKE"; + // Object store migration + public static final String EVENT_MIGRATE_PREPARE_SECONDARY_STORAGE = "MIGRATE.PREPARE.SS"; + static { // TODO: need a way to force author adding event types to declare the entity details as well, with out braking diff --git a/api/src/com/cloud/exception/ConcurrentOperationException.java b/api/src/com/cloud/exception/ConcurrentOperationException.java index cfe6ba3fa0a..018dba55f2e 100644 --- a/api/src/com/cloud/exception/ConcurrentOperationException.java +++ b/api/src/com/cloud/exception/ConcurrentOperationException.java @@ -17,8 +17,9 @@ package com.cloud.exception; import com.cloud.utils.SerialVersionUID; +import com.cloud.utils.exception.CloudRuntimeException; -public class ConcurrentOperationException extends CloudException { +public class ConcurrentOperationException extends CloudRuntimeException { private static final long serialVersionUID = SerialVersionUID.ConcurrentOperationException; diff --git a/api/src/com/cloud/network/Networks.java b/api/src/com/cloud/network/Networks.java index 7069282a669..0412bf45982 100755 --- a/api/src/com/cloud/network/Networks.java +++ b/api/src/com/cloud/network/Networks.java @@ -108,6 +108,7 @@ public class Networks { }, Mido("mido", String.class), Pvlan("pvlan", String.class), + Vxlan("vxlan", Long.class), UnDecided(null, null); private final String scheme; diff --git a/api/src/com/cloud/network/PhysicalNetwork.java b/api/src/com/cloud/network/PhysicalNetwork.java index f6cb1a6e0b6..55b18e67ba9 100644 --- a/api/src/com/cloud/network/PhysicalNetwork.java +++ b/api/src/com/cloud/network/PhysicalNetwork.java @@ -39,7 +39,8 @@ public interface PhysicalNetwork extends Identity, InternalIdentity { STT, VNS, MIDO, - SSP; + SSP, + VXLAN; } public enum BroadcastDomainRange { diff --git a/api/src/com/cloud/network/RemoteAccessVpn.java b/api/src/com/cloud/network/RemoteAccessVpn.java index 058b2f486e6..4f61334db1e 100644 --- a/api/src/com/cloud/network/RemoteAccessVpn.java +++ b/api/src/com/cloud/network/RemoteAccessVpn.java @@ -31,6 +31,7 @@ public interface RemoteAccessVpn extends ControlledEntity, InternalIdentity, Ide String getIpRange(); String getIpsecPresharedKey(); String getLocalIp(); - long getNetworkId(); + Long getNetworkId(); + Long getVpcId(); State getState(); } diff --git a/api/src/com/cloud/network/VirtualRouterProvider.java b/api/src/com/cloud/network/VirtualRouterProvider.java index f67686e6b08..02efb93db5a 100644 --- a/api/src/com/cloud/network/VirtualRouterProvider.java +++ b/api/src/com/cloud/network/VirtualRouterProvider.java @@ -20,14 +20,14 @@ import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; public interface VirtualRouterProvider extends InternalIdentity, Identity { - public enum VirtualRouterProviderType { + public enum Type { VirtualRouter, ElasticLoadBalancerVm, VPCVirtualRouter, InternalLbVm } - public VirtualRouterProviderType getType(); + public Type getType(); public boolean isEnabled(); diff --git a/api/src/com/cloud/network/element/RemoteAccessVPNServiceProvider.java b/api/src/com/cloud/network/element/RemoteAccessVPNServiceProvider.java index 4950ed92cab..b9233755249 100644 --- a/api/src/com/cloud/network/element/RemoteAccessVPNServiceProvider.java +++ b/api/src/com/cloud/network/element/RemoteAccessVPNServiceProvider.java @@ -19,7 +19,6 @@ package com.cloud.network.element; import java.util.List; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.Network; import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VpnUser; import com.cloud.utils.component.Adapter; @@ -27,7 +26,7 @@ import com.cloud.utils.component.Adapter; public interface RemoteAccessVPNServiceProvider extends Adapter { String[] applyVpnUsers(RemoteAccessVpn vpn, List users) throws ResourceUnavailableException; - boolean startVpn(Network network, RemoteAccessVpn vpn) throws ResourceUnavailableException; + boolean startVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException; - boolean stopVpn(Network network, RemoteAccessVpn vpn) throws ResourceUnavailableException; + boolean stopVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException; } diff --git a/api/src/com/cloud/network/element/VirtualRouterElementService.java b/api/src/com/cloud/network/element/VirtualRouterElementService.java index ea971b89c5d..b0db3d9bce2 100644 --- a/api/src/com/cloud/network/element/VirtualRouterElementService.java +++ b/api/src/com/cloud/network/element/VirtualRouterElementService.java @@ -22,12 +22,12 @@ import org.apache.cloudstack.api.command.admin.router.ConfigureVirtualRouterElem import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.utils.component.PluggableService; public interface VirtualRouterElementService extends PluggableService{ VirtualRouterProvider configure(ConfigureVirtualRouterElementCmd cmd); - VirtualRouterProvider addElement(Long nspId, VirtualRouterProviderType providerType); + VirtualRouterProvider addElement(Long nspId, Type providerType); VirtualRouterProvider getCreatedElement(long id); List searchForVirtualRouterElement(ListVirtualRouterElementsCmd cmd); } diff --git a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java index 285e714122a..de7692d38af 100644 --- a/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java +++ b/api/src/com/cloud/network/vpn/RemoteAccessVpnService.java @@ -31,7 +31,7 @@ import com.cloud.utils.Pair; public interface RemoteAccessVpnService { static final String RemoteAccessVpnClientIpRangeCK = "remote.access.vpn.client.iprange"; - RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall, long networkId) + RemoteAccessVpn createRemoteAccessVpn(long vpnServerAddressId, String ipRange, boolean openFirewall) throws NetworkRuleConflictException; void destroyRemoteAccessVpnForIp(long vpnServerAddressId, Account caller) throws ResourceUnavailableException; RemoteAccessVpn startRemoteAccessVpn(long vpnServerAddressId, boolean openFirewall) throws ResourceUnavailableException; @@ -47,5 +47,4 @@ public interface RemoteAccessVpnService { List listRemoteAccessVpns(long networkId); RemoteAccessVpn getRemoteAccessVpn(long vpnAddrId); - } diff --git a/api/src/com/cloud/offering/NetworkOffering.java b/api/src/com/cloud/offering/NetworkOffering.java index 6c5573e0368..749dae32fc9 100644 --- a/api/src/com/cloud/offering/NetworkOffering.java +++ b/api/src/com/cloud/offering/NetworkOffering.java @@ -130,4 +130,6 @@ public interface NetworkOffering extends InfrastructureEntity, InternalIdentity, boolean getEgressDefaultPolicy(); Integer getConcurrentConnections(); + + boolean isKeepAliveEnabled(); } diff --git a/api/src/com/cloud/server/ResourceMetaDataService.java b/api/src/com/cloud/server/ResourceMetaDataService.java index 556f97453a1..a71cfe7f1ee 100644 --- a/api/src/com/cloud/server/ResourceMetaDataService.java +++ b/api/src/com/cloud/server/ResourceMetaDataService.java @@ -19,19 +19,19 @@ package com.cloud.server; import java.util.List; import java.util.Map; -import com.cloud.server.ResourceTag.TaggedResourceType; +import org.apache.cloudstack.api.ResourceDetail; + +import com.cloud.server.ResourceTag.ResourceObjectType; public interface ResourceMetaDataService { - TaggedResourceType getResourceType (String resourceTypeStr); - /** * @param resourceId TODO * @param resourceType * @param details * @return */ - boolean addResourceMetaData(String resourceId, TaggedResourceType resourceType, Map details); + boolean addResourceMetaData(String resourceId, ResourceObjectType resourceType, Map details); /** @@ -41,7 +41,14 @@ public interface ResourceMetaDataService { * @param key * @return */ - public boolean deleteResourceMetaData(String resourceId, TaggedResourceType resourceType, String key); + public boolean deleteResourceMetaData(String resourceId, ResourceObjectType resourceType, String key); - } + ResourceDetail getDetail(long resourceId, ResourceObjectType resourceType, String key); + + + Map getDetailsMap(long resourceId, ResourceObjectType resourceType, Boolean forDisplay); + + List getDetailsList(long resourceId, ResourceObjectType resourceType, Boolean forDisplay); + +} diff --git a/api/src/com/cloud/server/ResourceTag.java b/api/src/com/cloud/server/ResourceTag.java index f1d31e4e0d0..ab74d260dc3 100644 --- a/api/src/com/cloud/server/ResourceTag.java +++ b/api/src/com/cloud/server/ResourceTag.java @@ -22,25 +22,45 @@ import org.apache.cloudstack.api.InternalIdentity; public interface ResourceTag extends ControlledEntity, Identity, InternalIdentity { - public enum TaggedResourceType { - UserVm, - Template, - ISO, - Volume, - Snapshot, - Network, - Nic, - LoadBalancer, - PortForwardingRule, - FirewallRule, - SecurityGroup, - PublicIpAddress, - Project, - Vpc, - NetworkACL, - StaticRoute, - VMSnapshot, - RemoteAccessVpn + //FIXME - extract enum to another interface as its used both by resourceTags and resourceMetaData code + public enum ResourceObjectType { + UserVm (true, true), + Template (true, true), + ISO (true, false), + Volume (true, true), + Snapshot (true, false), + Network (true, true), + Nic (false, true), + LoadBalancer (true, false), + PortForwardingRule (true, false), + FirewallRule (true, true), + SecurityGroup (true, false), + PublicIpAddress (true, false), + Project (true, false), + Vpc (true, false), + NetworkACL (true, false), + StaticRoute (true, false), + VMSnapshot (true, false), + RemoteAccessVpn (true, false), + Zone (false, true), + ServiceOffering (false, true), + Storage(false, true); + + ResourceObjectType(boolean resourceTagsSupport, boolean resourceMetadataSupport) { + this.resourceTagsSupport = resourceTagsSupport; + this.metadataSupport = resourceMetadataSupport; + } + + private final boolean resourceTagsSupport; + private final boolean metadataSupport; + + public boolean resourceTagsSupport() { + return this.resourceTagsSupport; + } + + public boolean resourceMetadataSupport() { + return this.metadataSupport; + } } /** @@ -61,7 +81,7 @@ public interface ResourceTag extends ControlledEntity, Identity, InternalIdentit /** * @return */ - TaggedResourceType getResourceType(); + ResourceObjectType getResourceType(); /** * @return diff --git a/api/src/com/cloud/server/TaggedResourceService.java b/api/src/com/cloud/server/TaggedResourceService.java index 46b185480bb..97046ac3950 100644 --- a/api/src/com/cloud/server/TaggedResourceService.java +++ b/api/src/com/cloud/server/TaggedResourceService.java @@ -19,12 +19,10 @@ package com.cloud.server; import java.util.List; import java.util.Map; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; public interface TaggedResourceService { - TaggedResourceType getResourceType (String resourceTypeStr); - /** * @param resourceIds TODO * @param resourceType @@ -32,14 +30,7 @@ public interface TaggedResourceService { * @param customer TODO * @return */ - List createTags(List resourceIds, TaggedResourceType resourceType, Map tags, String customer); - - /** - * @param resourceId - * @param resourceType - * @return - */ - String getUuid(String resourceId, TaggedResourceType resourceType); + List createTags(List resourceIds, ResourceObjectType resourceType, Map tags, String customer); /** @@ -48,10 +39,19 @@ public interface TaggedResourceService { * @param tags * @return */ - boolean deleteTags(List resourceIds, TaggedResourceType resourceType, Map tags); + boolean deleteTags(List resourceIds, ResourceObjectType resourceType, Map tags); - List listByResourceTypeAndId(TaggedResourceType type, long resourceId); + List listByResourceTypeAndId(ResourceObjectType type, long resourceId); - public Long getResourceId(String resourceId, TaggedResourceType resourceType); + //FIXME - the methods below should be extracted to its separate manager/service responsible just for retrieving object details + ResourceObjectType getResourceType (String resourceTypeStr); - } + /** + * @param resourceId + * @param resourceType + * @return + */ + String getUuid(String resourceId, ResourceObjectType resourceType); + + public long getResourceId(String resourceId, ResourceObjectType resourceType); +} diff --git a/api/src/com/cloud/storage/StorageService.java b/api/src/com/cloud/storage/StorageService.java index 1ae1d3a7102..cbbc1f33559 100644 --- a/api/src/com/cloud/storage/StorageService.java +++ b/api/src/com/cloud/storage/StorageService.java @@ -22,9 +22,9 @@ import org.apache.cloudstack.api.command.admin.storage.AddImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CancelPrimaryStorageMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.CreateSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.CreateStoragePoolCmd; -import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeleteImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.DeletePoolCmd; +import org.apache.cloudstack.api.command.admin.storage.DeleteSecondaryStagingStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import com.cloud.exception.DiscoveryException; @@ -97,4 +97,18 @@ public interface StorageService{ ImageStore discoverImageStore(AddImageStoreCmd cmd) throws IllegalArgumentException, DiscoveryException, InvalidParameterValueException; + /** + * Prepare NFS secondary storage for object store migration + * + * @param cmd + * - the command specifying secondaryStorageId + * @return the storage pool + * @throws ResourceUnavailableException + * TODO + * @throws InsufficientCapacityException + * TODO + */ + public ImageStore prepareSecondaryStorageForObjectStoreMigration(Long storeId) throws ResourceUnavailableException, + InsufficientCapacityException; + } diff --git a/api/src/com/cloud/storage/VolumeApiService.java b/api/src/com/cloud/storage/VolumeApiService.java index 0194c817cac..4806ae7c06f 100644 --- a/api/src/com/cloud/storage/VolumeApiService.java +++ b/api/src/com/cloud/storage/VolumeApiService.java @@ -84,7 +84,7 @@ public interface VolumeApiService { Snapshot allocSnapshot(Long volumeId, Long policyId) throws ResourceAllocationException; - Volume updateVolume(UpdateVolumeCmd updateVolumeCmd); + Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume); /** * Extracts the volume to a particular location. diff --git a/api/src/com/cloud/storage/snapshot/SnapshotApiService.java b/api/src/com/cloud/storage/snapshot/SnapshotApiService.java index 23e65220ff9..4f135107f07 100644 --- a/api/src/com/cloud/storage/snapshot/SnapshotApiService.java +++ b/api/src/com/cloud/storage/snapshot/SnapshotApiService.java @@ -106,4 +106,6 @@ public interface SnapshotApiService { * @return */ Long getHostIdForSnapshotOperation(Volume vol); + + boolean revertSnapshot(Long snapshotId); } diff --git a/api/src/com/cloud/user/DomainService.java b/api/src/com/cloud/user/DomainService.java index 7c302e377fd..f10728f6fb4 100644 --- a/api/src/com/cloud/user/DomainService.java +++ b/api/src/com/cloud/user/DomainService.java @@ -33,6 +33,9 @@ public interface DomainService { Domain getDomain(String uuid); + Domain getDomainByName(String name, long parentId); + + /** * Return whether a domain is a child domain of a given domain. * diff --git a/api/src/com/cloud/vm/UserVmService.java b/api/src/com/cloud/vm/UserVmService.java index 7d459b99a9e..0b142e83b72 100755 --- a/api/src/com/cloud/vm/UserVmService.java +++ b/api/src/com/cloud/vm/UserVmService.java @@ -23,6 +23,7 @@ import javax.naming.InsufficientResourcesException; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; @@ -463,4 +464,8 @@ public interface UserVmService { UserVm upgradeVirtualMachine(ScaleVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException, ManagementServerException, VirtualMachineMigrationException; + UserVm expungeVm(ExpungeVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException; + + UserVm expungeVm(long vmId) throws ResourceUnavailableException, ConcurrentOperationException; + } diff --git a/api/src/com/cloud/vm/VmDetailConstants.java b/api/src/com/cloud/vm/VmDetailConstants.java index 5ff3ce02fe4..87f4b5dc5de 100644 --- a/api/src/com/cloud/vm/VmDetailConstants.java +++ b/api/src/com/cloud/vm/VmDetailConstants.java @@ -21,4 +21,5 @@ public interface VmDetailConstants { public static final String NIC_ADAPTER = "nicAdapter"; public static final String ROOK_DISK_CONTROLLER = "rootDiskController"; public static final String NESTED_VIRTUALIZATION_FLAG = "nestedVirtualizationFlag"; + public static final String HYPERVISOR_TOOLS_VERSION = "hypervisortoolsversion"; } diff --git a/api/src/org/apache/cloudstack/api/APICommand.java b/api/src/org/apache/cloudstack/api/APICommand.java index 621b3476066..008bd1ed4d8 100644 --- a/api/src/org/apache/cloudstack/api/APICommand.java +++ b/api/src/org/apache/cloudstack/api/APICommand.java @@ -22,6 +22,7 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; +import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ResponseObject.ResponseView; @Retention(RetentionPolicy.RUNTIME) @@ -40,4 +41,6 @@ public @interface APICommand { String since() default ""; ResponseView responseView() default ResponseView.Admin; + + RoleType[] authorized() default {}; } diff --git a/api/src/org/apache/cloudstack/api/ApiCommandJobType.java b/api/src/org/apache/cloudstack/api/ApiCommandJobType.java index c48e6494477..6f9ac2dbf33 100644 --- a/api/src/org/apache/cloudstack/api/ApiCommandJobType.java +++ b/api/src/org/apache/cloudstack/api/ApiCommandJobType.java @@ -28,6 +28,7 @@ public enum ApiCommandJobType { SystemVm, Host, StoragePool, + ImageStore, IpAddress, PortableIpAddress, SecurityGroup, diff --git a/api/src/org/apache/cloudstack/api/ApiConstants.java b/api/src/org/apache/cloudstack/api/ApiConstants.java index 78200e58b50..32c2c5e9637 100755 --- a/api/src/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/org/apache/cloudstack/api/ApiConstants.java @@ -34,6 +34,7 @@ public class ApiConstants { public static final String BYTES_READ_RATE = "bytesreadrate"; public static final String BYTES_WRITE_RATE = "byteswriterate"; public static final String CATEGORY = "category"; + public static final String CAN_REVERT = "canrevert"; public static final String CERTIFICATE = "certificate"; public static final String PRIVATE_KEY = "privatekey"; public static final String DOMAIN_SUFFIX = "domainsuffix"; @@ -142,6 +143,7 @@ public class ApiConstants { public static final String MAX_SNAPS = "maxsnaps"; public static final String MEMORY = "memory"; public static final String MODE = "mode"; + public static final String KEEPALIVE_ENABLED = "keepaliveenabled"; public static final String NAME = "name"; public static final String METHOD_NAME = "methodname"; public static final String NETWORK_DOMAIN = "networkdomain"; @@ -186,6 +188,7 @@ public class ApiConstants { public static final String REQUIRES_HVM = "requireshvm"; public static final String RESOURCE_TYPE = "resourcetype"; public static final String RESPONSE = "response"; + public static final String REVERTABLE = "revertable"; public static final String QUERY_FILTER = "queryfilter"; public static final String SCHEDULE = "schedule"; public static final String SCOPE = "scope"; @@ -530,6 +533,11 @@ public class ApiConstants { public static final String ENTITY_ID = "entityid"; public static final String ACCESS_TYPE = "accesstype"; + public static final String RESOURCE_DETAILS = "resourcedetails"; + public static final String EXPUNGE = "expunge"; + public static final String FOR_DISPLAY = "fordisplay"; + + public enum HostDetails { all, capacity, events, stats, min; } diff --git a/api/src/org/apache/cloudstack/api/BaseCmd.java b/api/src/org/apache/cloudstack/api/BaseCmd.java index 84095546cec..b1ee0877428 100644 --- a/api/src/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/org/apache/cloudstack/api/BaseCmd.java @@ -21,7 +21,6 @@ import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.regex.Pattern; @@ -76,7 +75,6 @@ import com.cloud.user.Account; import com.cloud.user.AccountService; import com.cloud.user.DomainService; import com.cloud.user.ResourceLimitService; -import com.cloud.utils.Pair; import com.cloud.utils.db.EntityManager; import com.cloud.vm.UserVmService; import com.cloud.vm.snapshot.VMSnapshotService; @@ -303,172 +301,6 @@ public abstract class BaseCmd { return lowercaseParams; } - public String buildResponse(ServerApiException apiException, String responseType) { - StringBuffer sb = new StringBuffer(); - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - // JSON response - sb.append("{ \"" + getCommandName() + "\" : { " + "\"@attributes\":{\"cloud-stack-version\":\"" + _mgr.getVersion() + "\"},"); - sb.append("\"errorcode\" : \"" + apiException.getErrorCode() + "\", \"description\" : \"" + apiException.getDescription() + "\" } }"); - } else { - sb.append(""); - sb.append("<" + getCommandName() + ">"); - sb.append("" + apiException.getErrorCode() + ""); - sb.append("" + escapeXml(apiException.getDescription()) + ""); - sb.append(""); - } - return sb.toString(); - } - - public String buildResponse(List> tagList, String responseType) { - StringBuffer prefixSb = new StringBuffer(); - StringBuffer suffixSb = new StringBuffer(); - - // set up the return value with the name of the response - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - prefixSb.append("{ \"" + getCommandName() + "\" : { \"@attributes\":{\"cloud-stack-version\":\"" + _mgr.getVersion() + "\"},"); - } else { - prefixSb.append(""); - prefixSb.append("<" + getCommandName() + " cloud-stack-version=\"" + _mgr.getVersion() + "\">"); - } - - int i = 0; - for (Pair tagData : tagList) { - String tagName = tagData.first(); - Object tagValue = tagData.second(); - if (tagValue instanceof Object[]) { - Object[] subObjects = (Object[]) tagValue; - if (subObjects.length < 1) { - continue; - } - writeObjectArray(responseType, suffixSb, i++, tagName, subObjects); - } else { - writeNameValuePair(suffixSb, tagName, tagValue, responseType, i++); - } - } - - if (suffixSb.length() > 0) { - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { // append comma only if we have some suffix else - // not as per strict Json syntax. - prefixSb.append(","); - } - prefixSb.append(suffixSb); - } - // close the response - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - prefixSb.append("} }"); - } else { - prefixSb.append(""); - } - return prefixSb.toString(); - } - - private void writeNameValuePair(StringBuffer sb, String tagName, Object tagValue, String responseType, int propertyCount) { - if (tagValue == null) { - return; - } - - if (tagValue instanceof Object[]) { - Object[] subObjects = (Object[]) tagValue; - if (subObjects.length < 1) { - return; - } - writeObjectArray(responseType, sb, propertyCount, tagName, subObjects); - } else { - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - String seperator = ((propertyCount > 0) ? ", " : ""); - sb.append(seperator + "\"" + tagName + "\" : \"" + escapeJSON(tagValue.toString()) + "\""); - } else { - sb.append("<" + tagName + ">" + escapeXml(tagValue.toString()) + ""); - } - } - } - - @SuppressWarnings("rawtypes") - private void writeObjectArray(String responseType, StringBuffer sb, int propertyCount, String tagName, Object[] subObjects) { - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - String separator = ((propertyCount > 0) ? ", " : ""); - sb.append(separator); - } - int j = 0; - for (Object subObject : subObjects) { - if (subObject instanceof List) { - List subObjList = (List) subObject; - writeSubObject(sb, tagName, subObjList, responseType, j++); - } - } - - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - sb.append("]"); - } - } - - @SuppressWarnings("rawtypes") - private void writeSubObject(StringBuffer sb, String tagName, List tagList, String responseType, int objectCount) { - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - sb.append(((objectCount == 0) ? "\"" + tagName + "\" : [ { " : ", { ")); - } else { - sb.append("<" + tagName + ">"); - } - - int i = 0; - for (Object tag : tagList) { - if (tag instanceof Pair) { - Pair nameValuePair = (Pair) tag; - writeNameValuePair(sb, (String) nameValuePair.first(), nameValuePair.second(), responseType, i++); - } - } - - if (RESPONSE_TYPE_JSON.equalsIgnoreCase(responseType)) { - sb.append("}"); - } else { - sb.append(""); - } - } - - /** - * Escape xml response set to false by default. API commands to override this method to allow escaping - */ - public boolean requireXmlEscape() { - return true; - } - - private String escapeXml(String xml) { - if (!requireXmlEscape()) { - return xml; - } - int iLen = xml.length(); - if (iLen == 0) { - return xml; - } - StringBuffer sOUT = new StringBuffer(iLen + 256); - int i = 0; - for (; i < iLen; i++) { - char c = xml.charAt(i); - if (c == '<') { - sOUT.append("<"); - } else if (c == '>') { - sOUT.append(">"); - } else if (c == '&') { - sOUT.append("&"); - } else if (c == '"') { - sOUT.append("""); - } else if (c == '\'') { - sOUT.append("'"); - } else { - sOUT.append(c); - } - } - return sOUT.toString(); - } - - private static String escapeJSON(String str) { - if (str == null) { - return str; - } - - return str.replace("\"", "\\\""); - } - protected long getInstanceIdFromJobSuccessResult(String result) { s_logger.debug("getInstanceIdFromJobSuccessResult not overridden in subclass " + this.getClass().getName()); return 0; diff --git a/api/src/org/apache/cloudstack/api/ResourceDetail.java b/api/src/org/apache/cloudstack/api/ResourceDetail.java new file mode 100644 index 00000000000..4914c7806c1 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/ResourceDetail.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api; + +public interface ResourceDetail extends InternalIdentity{ + + public long getResourceId(); + + public String getName(); + + public String getValue(); + + public boolean isDisplay(); + +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java index bdad904c1dd..7296d5315d8 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/CreateNetworkOfferingCmd.java @@ -96,12 +96,15 @@ public class CreateNetworkOfferingCmd extends BaseCmd { private Boolean isPersistent; @Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, since="4.2.0", description="Network offering details in key/value pairs." + - " Supported keys are internallbprovider/publiclbprovider with service provider as a value") + " Supported keys are internallbprovider/publiclbprovider with service provider as a value") protected Map details; @Parameter(name=ApiConstants.EGRESS_DEFAULT_POLICY, type=CommandType.BOOLEAN, description="true if default guest network egress policy is allow; false if default egress policy is deny") private Boolean egressDefaultPolicy; + @Parameter(name=ApiConstants.KEEPALIVE_ENABLED, type=CommandType.BOOLEAN, required=false, description="if true keepalive will be turned on in the loadbalancer. At the time of writing this has only an effect on haproxy; the mode http and httpclose options are unset in the haproxy conf file.") + private Boolean keepAliveEnabled; + @Parameter(name=ApiConstants.MAX_CONNECTIONS, type=CommandType.INTEGER, description="maximum number of concurrent connections supported by the network offering") private Integer maxConnections; @@ -175,6 +178,10 @@ public class CreateNetworkOfferingCmd extends BaseCmd { return egressDefaultPolicy; } + public Boolean getKeepAliveEnabled() { + return keepAliveEnabled; + } + public Integer getMaxconnections() { return maxConnections; } diff --git a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java index c9c4c8ad3b4..f9bdadb4547 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java @@ -57,6 +57,9 @@ public class UpdateNetworkOfferingCmd extends BaseCmd { @Parameter(name=ApiConstants.STATE, type=CommandType.STRING, description="update state for the network offering") private String state; + @Parameter(name=ApiConstants.KEEPALIVE_ENABLED, type=CommandType.BOOLEAN, required=false, description="if true keepalive will be turned on in the loadbalancer. At the time of writing this has only an effect on haproxy; the mode http and httpclose options are unset in the haproxy conf file.") + private Boolean keepAliveEnabled; + @Parameter(name=ApiConstants.MAX_CONNECTIONS, type=CommandType.INTEGER, description="maximum number of concurrent connections supported by the network offering") private Integer maxConnections; @@ -91,6 +94,10 @@ public class UpdateNetworkOfferingCmd extends BaseCmd { public Integer getMaxconnections() { return maxConnections; } + + public Boolean getKeepAliveEnabled() { + return keepAliveEnabled; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java b/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java index 1e421a13d3f..a7b8dcd5439 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java @@ -49,6 +49,9 @@ public class UpdateDiskOfferingCmd extends BaseCmd{ @Parameter(name=ApiConstants.SORT_KEY, type=CommandType.INTEGER, description="sort key of the disk offering, integer") private Integer sortKey; + @Parameter(name=ApiConstants.DISPLAY_OFFERING, type=CommandType.BOOLEAN, description="an optional field, whether to display the offering to the end user or not.") + private Boolean displayOffering; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -69,8 +72,11 @@ public class UpdateDiskOfferingCmd extends BaseCmd{ return sortKey; } + public Boolean getDisplayOffering() { + return displayOffering; + } - ///////////////////////////////////////////////////// +///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java b/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java index 66c8ae5cb74..35da69778f3 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/router/CreateVirtualRouterElementCmd.java @@ -36,7 +36,7 @@ import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.element.VirtualRouterElementService; import com.cloud.user.Account; @@ -70,15 +70,15 @@ public class CreateVirtualRouterElementCmd extends BaseAsyncCreateCmd { return nspId; } - public VirtualRouterProviderType getProviderType() { + public Type getProviderType() { if (providerType != null) { - if (providerType.equalsIgnoreCase(VirtualRouterProviderType.VirtualRouter.toString())) { - return VirtualRouterProviderType.VirtualRouter; - } else if (providerType.equalsIgnoreCase(VirtualRouterProviderType.VPCVirtualRouter.toString())) { - return VirtualRouterProviderType.VPCVirtualRouter; + if (providerType.equalsIgnoreCase(Type.VirtualRouter.toString())) { + return Type.VirtualRouter; + } else if (providerType.equalsIgnoreCase(Type.VPCVirtualRouter.toString())) { + return Type.VPCVirtualRouter; } else throw new InvalidParameterValueException("Invalid providerType specified"); } - return VirtualRouterProviderType.VirtualRouter; + return Type.VirtualRouter; } ///////////////////////////////////////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java index 26351bb7755..ddf0391a905 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/ListStoragePoolsCmd.java @@ -59,7 +59,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { @Parameter(name=ApiConstants.ZONE_ID, type=CommandType.UUID, entityType = ZoneResponse.class, description="the Zone ID for the storage pool") private Long zoneId; - + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType = StoragePoolResponse.class, description="the ID of the storage pool") private Long id; @@ -109,6 +109,7 @@ public class ListStoragePoolsCmd extends BaseListCmd { return s_name; } + @Override public ApiCommandJobType getInstanceType() { return ApiCommandJobType.StoragePool; } diff --git a/api/src/org/apache/cloudstack/api/command/admin/storage/PrepareSecondaryStorageForMigrationCmd.java b/api/src/org/apache/cloudstack/api/command/admin/storage/PrepareSecondaryStorageForMigrationCmd.java new file mode 100644 index 00000000000..d0c995a64f1 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/storage/PrepareSecondaryStorageForMigrationCmd.java @@ -0,0 +1,109 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.storage; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ImageStoreResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.storage.ImageStore; +import com.cloud.user.Account; + +@APICommand(name = "prepareSecondaryStorageForMigration", description = "Prepare a NFS secondary storage to migrate to use object store like S3", responseObject = ImageStoreResponse.class) +public class PrepareSecondaryStorageForMigrationCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(PrepareSecondaryStorageForMigrationCmd.class.getName()); + private static final String s_name = "preparesecondarystorageformigrationresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, + required = true, description = "Secondary image store ID") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.ImageStore; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public long getEntityOwnerId() { + Account account = CallContext.current().getCallingAccount(); + if (account != null) { + return account.getId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_MIGRATE_PREPARE_SECONDARY_STORAGE; + } + + @Override + public String getEventDescription() { + return "preparing secondary storage: " + getId() + " for object store migration"; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException{ + ImageStore result = _storageService.prepareSecondaryStorageForObjectStoreMigration(getId()); + if (result != null){ + ImageStoreResponse response = _responseGenerator.createImageStoreResponse(result); + response.setResponseName(getCommandName()); + response.setResponseName("secondarystorage"); + setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to prepare secondary storage for object store migration"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java index 2a60e192ca3..6da4b6c9034 100644 --- a/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/admin/vm/AssignVMCmd.java @@ -39,7 +39,7 @@ import com.cloud.uservm.UserVm; public class AssignVMCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(AssignVMCmd.class.getName()); - private static final String s_name = "moveuservmresponse"; + private static final String s_name = "assignvirtualmachineresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// diff --git a/api/src/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java b/api/src/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java new file mode 100644 index 00000000000..387a0e986b2 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/admin/vm/ExpungeVMCmd.java @@ -0,0 +1,116 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.vm; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.uservm.UserVm; +import com.cloud.utils.exception.CloudRuntimeException; + +@APICommand(name = "expungeVirtualMachine", description="Expunge a virtual machine. Once expunged, it cannot be recoverd.", responseObject=SuccessResponse.class) +public class ExpungeVMCmd extends BaseAsyncCmd { + public static final Logger s_logger = Logger.getLogger(ExpungeVMCmd.class.getName()); + + private static final String s_name = "expungevirtualmachineresponse"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=UserVmResponse.class, + required=true, description="The ID of the virtual machine") + private Long id; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + UserVm vm = _responseGenerator.findUserVmById(getId()); + if (vm != null) { + return vm.getAccountId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_VM_EXPUNGE; + } + + @Override + public String getEventDescription() { + return "Expunging vm: " + getId(); + } + + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.VirtualMachine; + } + + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute() throws ResourceUnavailableException, ConcurrentOperationException{ + CallContext.current().setEventDetails("Vm Id: "+getId()); + try { + UserVm result = _userVmService.expungeVm(this); + + if (result != null) { + SuccessResponse response = new SuccessResponse(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to expunge vm"); + } + } catch (InvalidParameterValueException ipve) { + throw new ServerApiException(ApiErrorCode.PARAM_ERROR, ipve.getMessage()); + } catch (CloudRuntimeException cre) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, cre.getMessage()); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java index e0cd7133e11..a44b76828fb 100644 --- a/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/network/UpdateNetworkCmd.java @@ -153,9 +153,7 @@ public class UpdateNetworkCmd extends BaseAsyncCmd { @Override public String getEventDescription() { - - - StringBuffer eventMsg = new StringBuffer("Updating network: " + getId()); + StringBuilder eventMsg = new StringBuilder("Updating network: " + getId()); if (getNetworkOfferingId() != null) { Network network = _networkService.getNetwork(getId()); if (network == null) { diff --git a/api/src/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java b/api/src/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java index ca16cdc7efe..60eb438050f 100644 --- a/api/src/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/offering/ListServiceOfferingsCmd.java @@ -16,6 +16,11 @@ // under the License. package org.apache.cloudstack.api.command.user.offering; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListCmd; @@ -24,9 +29,10 @@ import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.UserVmResponse; - import org.apache.log4j.Logger; +import com.cloud.exception.InvalidParameterValueException; + @APICommand(name = "listServiceOfferings", description="Lists all available service offerings.", responseObject=ServiceOfferingResponse.class) public class ListServiceOfferingsCmd extends BaseListCmd { public static final Logger s_logger = Logger.getLogger(ListServiceOfferingsCmd.class.getName()); @@ -98,7 +104,6 @@ public class ListServiceOfferingsCmd extends BaseListCmd { @Override public void execute(){ - ListResponse response = _queryService.searchForServiceOfferings(this); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java b/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java new file mode 100644 index 00000000000..6e790e1c170 --- /dev/null +++ b/api/src/org/apache/cloudstack/api/command/user/snapshot/RevertSnapshotCmd.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.api.command.user.snapshot; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiCommandJobType; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.SnapshotResponse; +import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.context.CallContext; + +import com.cloud.event.EventTypes; +import com.cloud.storage.Snapshot; +import com.cloud.user.Account; + +@APICommand(name = "revertSnapshot", description = "revert a volume snapshot.", responseObject = SnapshotResponse.class) +public class RevertSnapshotCmd extends BaseAsyncCmd { + private static final String s_name = "revertsnapshotresponse"; + @Parameter(name= ApiConstants.ID, type= BaseCmd.CommandType.UUID, entityType = SnapshotResponse.class, + required=true, description="The ID of the snapshot") + private Long id; + + public Long getId() { + return id; + } + + + @Override + public String getCommandName() { + return s_name; + } + + @Override + public long getEntityOwnerId() { + Snapshot snapshot = _entityMgr.findById(Snapshot.class, getId()); + if (snapshot != null) { + return snapshot.getAccountId(); + } + + return Account.ACCOUNT_ID_SYSTEM; // no account info given, parent this command to SYSTEM so ERROR events are tracked + } + + @Override + public String getEventType() { + return EventTypes.EVENT_SNAPSHOT_REVERT; + } + + @Override + public String getEventDescription() { + return "revert snapshot: " + getId(); + } + + @Override + public ApiCommandJobType getInstanceType() { + return ApiCommandJobType.Snapshot; + } + + @Override + public Long getInstanceId() { + return getId(); + } + + @Override + public void execute(){ + CallContext.current().setEventDetails("Snapshot Id: "+getId()); + boolean result = _snapshotService.revertSnapshot(getId()); + if (result) { + SuccessResponse response = new SuccessResponse(getCommandName()); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to revert snapshot"); + } + } +} diff --git a/api/src/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java b/api/src/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java index 6f1a081da12..e36bd73263b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java @@ -20,16 +20,16 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.response.CreateSSHKeyPairResponse; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ProjectResponse; -import org.apache.cloudstack.api.response.SSHKeyPairResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; import com.cloud.user.SSHKeyPair; -@APICommand(name = "createSSHKeyPair", description="Create a new keypair and returns the private key", responseObject=SSHKeyPairResponse.class) +@APICommand(name = "createSSHKeyPair", description="Create a new keypair and returns the private key", responseObject=CreateSSHKeyPairResponse.class) public class CreateSSHKeyPairCmd extends BaseCmd { public static final Logger s_logger = Logger.getLogger(CreateSSHKeyPairCmd.class.getName()); private static final String s_name = "createsshkeypairresponse"; @@ -91,7 +91,7 @@ public class CreateSSHKeyPairCmd extends BaseCmd { @Override public void execute() { SSHKeyPair r = _mgr.createSSHKeyPair(this); - SSHKeyPairResponse response = new SSHKeyPairResponse(r.getName(), r.getFingerprint(), r.getPrivateKey()); + CreateSSHKeyPairResponse response = new CreateSSHKeyPairResponse(r.getName(), r.getFingerprint(), r.getPrivateKey()); response.setResponseName(getCommandName()); response.setObjectName("keypair"); this.setResponseObject(response); diff --git a/api/src/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java b/api/src/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java index a01bac39a4b..84226d7cc99 100644 --- a/api/src/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/tag/CreateTagsCmd.java @@ -34,7 +34,7 @@ import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; @APICommand(name = "createTags", description = "Creates resource tag(s)", responseObject = SuccessResponse.class, since = "4.0.0") public class CreateTagsCmd extends BaseAsyncCmd{ public static final Logger s_logger = Logger.getLogger(CreateTagsCmd.class.getName()); @@ -64,7 +64,7 @@ public class CreateTagsCmd extends BaseAsyncCmd{ ///////////////////////////////////////////////////// - public TaggedResourceType getResourceType(){ + public ResourceObjectType getResourceType(){ return _taggedResourceService.getResourceType(resourceType); } diff --git a/api/src/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java b/api/src/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java index a6ba0da82b7..5ce2e3795d1 100644 --- a/api/src/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/tag/DeleteTagsCmd.java @@ -33,7 +33,7 @@ import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.log4j.Logger; import com.cloud.event.EventTypes; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; @APICommand(name = "deleteTags", description = "Deleting resource tag(s)", responseObject = SuccessResponse.class, since = "4.0.0") public class DeleteTagsCmd extends BaseAsyncCmd{ public static final Logger s_logger = Logger.getLogger(DeleteTagsCmd.class.getName()); @@ -59,7 +59,7 @@ public class DeleteTagsCmd extends BaseAsyncCmd{ ///////////////////////////////////////////////////// - public TaggedResourceType getResourceType(){ + public ResourceObjectType getResourceType(){ return _taggedResourceService.getResourceType(resourceType); } diff --git a/api/src/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java b/api/src/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java index 06959c15e75..b3e8d1f83fe 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vm/DestroyVMCmd.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.command.user.vm; +import java.util.List; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandJobType; import org.apache.cloudstack.api.ApiConstants; @@ -25,13 +27,11 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.context.CallContext; - import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.user.Account; import com.cloud.uservm.UserVm; @@ -48,7 +48,12 @@ public class DestroyVMCmd extends BaseAsyncCmd { @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=UserVmResponse.class, required=true, description="The ID of the virtual machine") private Long id; - + + + @Parameter(name=ApiConstants.EXPUNGE, type=CommandType.BOOLEAN, + description="If true is passed, the vm is expunged immediately. False by default. Parameter can be passed to the call by ROOT/Domain admin only", since="4.2.1") + private Boolean expunge; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -56,6 +61,13 @@ public class DestroyVMCmd extends BaseAsyncCmd { public Long getId() { return id; } + + public boolean getExpunge() { + if (expunge == null) { + return false; + } + return expunge; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// @@ -97,11 +109,14 @@ public class DestroyVMCmd extends BaseAsyncCmd { @Override public void execute() throws ResourceUnavailableException, ConcurrentOperationException{ CallContext.current().setEventDetails("Vm Id: "+getId()); - UserVm result; - result = _userVmService.destroyVm(this); + UserVm result = _userVmService.destroyVm(this); + UserVmResponse response = new UserVmResponse(); if (result != null) { - UserVmResponse response = _responseGenerator.createUserVmResponse("virtualmachine", result).get(0); + List responses = _responseGenerator.createUserVmResponse("virtualmachine", result); + if (responses != null && !responses.isEmpty()) { + response = responses.get(0); + } response.setResponseName("virtualmachine"); this.setResponseObject(response); } else { diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java index a3b92478d1e..1384b58b2d0 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/AddResourceDetailCmd.java @@ -71,7 +71,7 @@ public class AddResourceDetailCmd extends BaseAsyncCmd { return detailsMap; } - public ResourceTag.TaggedResourceType getResourceType() { + public ResourceTag.ResourceObjectType getResourceType() { return _taggedResourceService.getResourceType(resourceType); } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/ListResourceDetailsCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/ListResourceDetailsCmd.java index c02d4b4c6ef..1e522b2d53b 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/ListResourceDetailsCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/ListResourceDetailsCmd.java @@ -17,7 +17,8 @@ package org.apache.cloudstack.api.command.user.volume; -import com.cloud.server.ResourceTag; +import java.util.List; + import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; @@ -25,40 +26,27 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ResourceDetailResponse; import org.apache.cloudstack.api.response.ResourceTagResponse; +import org.apache.cloudstack.context.CallContext; -import java.util.List; +import com.cloud.server.ResourceTag; @APICommand(name = "listResourceDetails", description = "List resource detail(s)", responseObject = ResourceTagResponse.class, since = "4.2") public class ListResourceDetailsCmd extends BaseListProjectAndAccountResourcesCmd{ private static final String s_name = "listresourcedetailsresponse"; - @Parameter(name=ApiConstants.RESOURCE_TYPE, type=CommandType.STRING, description="list by resource type") + @Parameter(name=ApiConstants.RESOURCE_TYPE, type=CommandType.STRING, description="list by resource type", required=true) private String resourceType; - @Parameter(name=ApiConstants.RESOURCE_ID, type=CommandType.STRING, description="list by resource id") + @Parameter(name=ApiConstants.RESOURCE_ID, type=CommandType.STRING, description="list by resource id", required=true) private String resourceId; @Parameter(name=ApiConstants.KEY, type=CommandType.STRING, description="list by key") private String key; - - ///////////////////////////////////////////////////// - /////////////////// Accessors /////////////////////// - ///////////////////////////////////////////////////// - - @Override - public void execute() { - - ListResponse response = new ListResponse(); - List resourceDetailResponse = _queryService.listResource(this); - response.setResponses(resourceDetailResponse); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - } - - public ResourceTag.TaggedResourceType getResourceType() { - return _taggedResourceService.getResourceType(resourceType); - } - + + @Parameter(name=ApiConstants.FOR_DISPLAY, type=CommandType.BOOLEAN, description="if set to true, only details marked with display=true, are returned." + + " Always false is the call is made by the regular user", since="4.3") + private Boolean forDisplay; + public String getResourceId() { return resourceId; } @@ -71,5 +59,33 @@ public class ListResourceDetailsCmd extends BaseListProjectAndAccountResourcesCm public String getCommandName() { return s_name; } + + public Boolean forDisplay() { + if (!_accountService.isAdmin(CallContext.current().getCallingAccount().getType())) { + return true; + } + + return forDisplay; + } + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() { + + ListResponse response = new ListResponse(); + List resourceDetailResponse = _queryService.listResourceDetails(this); + response.setResponses(resourceDetailResponse); + response.setResponseName(getCommandName()); + this.setResponseObject(response); + } + + public ResourceTag.ResourceObjectType getResourceType() { + return _taggedResourceService.getResourceType(resourceType); + } + + } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java index 8be70f348d0..5f2e131f514 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/RemoveResourceDetailCmd.java @@ -16,32 +16,21 @@ // under the License. package org.apache.cloudstack.api.command.user.volume; -import com.cloud.server.ResourceTag; - import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandJobType; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; -import org.apache.cloudstack.api.response.UserVmResponse; -import org.apache.cloudstack.api.response.VolumeResponse; -import org.apache.cloudstack.context.CallContext; - import org.apache.log4j.Logger; import com.cloud.event.EventTypes; -import com.cloud.storage.Volume; -import com.cloud.user.Account; - -import java.util.*; +import com.cloud.server.ResourceTag; @APICommand(name = "removeResourceDetail", description="Removes detail for the Resource.", responseObject=SuccessResponse.class) public class RemoveResourceDetailCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(RemoveResourceDetailCmd.class.getName()); - private static final String s_name = "RemoveResourceDetailresponse"; + private static final String s_name = "removeresourcedetailresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -62,7 +51,7 @@ public class RemoveResourceDetailCmd extends BaseAsyncCmd { ///////////////////////////////////////////////////// - public ResourceTag.TaggedResourceType getResourceType(){ + public ResourceTag.ResourceObjectType getResourceType(){ return _taggedResourceService.getResourceType(resourceType); } diff --git a/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java b/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java index ad7c9920ad4..d4e3a6c1643 100644 --- a/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/volume/UpdateVolumeCmd.java @@ -23,32 +23,39 @@ import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.context.CallContext; - import org.apache.log4j.Logger; import com.cloud.event.EventTypes; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.storage.Volume; -import com.cloud.user.Account; @APICommand(name = "updateVolume", description="Updates the volume.", responseObject=VolumeResponse.class) public class UpdateVolumeCmd extends BaseAsyncCmd { public static final Logger s_logger = Logger.getLogger(UpdateVolumeCmd.class.getName()); - private static final String s_name = "addVolumeresponse"; + private static final String s_name = "updatevolumeresponse"; ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VolumeResponse.class, - required=true, description="the ID of the disk volume") + @Parameter(name=ApiConstants.ID, type=CommandType.UUID, entityType=VolumeResponse.class, description="the ID of the disk volume") private Long id; - @Parameter(name=ApiConstants.PATH, type=CommandType.STRING, - required=true, description="the path of the volume") + @Parameter(name=ApiConstants.PATH, type=CommandType.STRING, description="The path of the volume") private String path; + + @Parameter(name=ApiConstants.STORAGE_ID, type=CommandType.UUID, entityType=StoragePoolResponse.class, + description="Destination storage pool UUID for the volume", since="4.3") + private Long storageId; + + @Parameter(name=ApiConstants.STATE, type=CommandType.STRING, description="The state of the volume", since="4.3") + private String state; + + @Parameter(name=ApiConstants.DISPLAY_VOLUME, type=CommandType.BOOLEAN, description="an optional field, whether to the display the volume to the end user or not.") + private Boolean displayVolume; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -61,8 +68,20 @@ public class UpdateVolumeCmd extends BaseAsyncCmd { public Long getId() { return id; } + + public Long getStorageId() { + return storageId; + } - ///////////////////////////////////////////////////// + public String getState() { + return state; + } + + public Boolean getDisplayVolume() { + return displayVolume; + } + +///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -83,25 +102,37 @@ public class UpdateVolumeCmd extends BaseAsyncCmd { public long getEntityOwnerId() { Volume volume = _responseGenerator.findVolumeById(getId()); if (volume == null) { - return Account.ACCOUNT_ID_SYSTEM; // bad id given, parent this command to SYSTEM so ERROR events are tracked + throw new InvalidParameterValueException("Invalid volume id was provided"); } return volume.getAccountId(); } @Override public String getEventType() { - return EventTypes.EVENT_VOLUME_ATTACH; + return EventTypes.EVENT_VOLUME_UPDATE; } @Override public String getEventDescription() { - return "adding detail to the volume: " + getId(); + StringBuilder desc = new StringBuilder("Updating volume: "); + desc.append(getId()).append(" with"); + if (getPath() != null) { + desc.append(" path " + getPath()); + } + if (getStorageId() != null) { + desc.append(", storage id " + getStorageId()); + } + + if (getState() != null) { + desc.append(", state " + getState()); + } + return desc.toString(); } @Override public void execute(){ CallContext.current().setEventDetails("Volume Id: "+getId()); - Volume result = _volumeService.updateVolume(this); + Volume result = _volumeService.updateVolume(getId(), getPath(), getState(), getStorageId(), getDisplayVolume()); if (result != null) { VolumeResponse response = _responseGenerator.createVolumeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java index ff681a9d1e6..523101d67fc 100644 --- a/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java @@ -126,25 +126,10 @@ public class CreateRemoteAccessVpnCmd extends BaseAsyncCreateCmd { return EventTypes.EVENT_REMOTE_ACCESS_VPN_CREATE; } - public long getNetworkId() { - IpAddress ip = _entityMgr.findById(IpAddress.class, getPublicIpId()); - Long ntwkId = null; - - if (ip.getAssociatedWithNetworkId() != null) { - ntwkId = ip.getAssociatedWithNetworkId(); - } - - if (ntwkId == null) { - throw new InvalidParameterValueException("Unable to create remote access vpn for the ipAddress id=" + getPublicIpId() + - " as ip is not associated with any network and no networkId is passed in"); - } - return ntwkId; - } - @Override public void create() { try { - RemoteAccessVpn vpn = _ravService.createRemoteAccessVpn(publicIpId, ipRange, getOpenFirewall(), getNetworkId()); + RemoteAccessVpn vpn = _ravService.createRemoteAccessVpn(publicIpId, ipRange, getOpenFirewall()); if (vpn != null) { this.setEntityId(vpn.getServerAddressId()); // find uuid for server ip address diff --git a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java index 4cf3b58a0a8..2a98cfbe928 100644 --- a/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java +++ b/api/src/org/apache/cloudstack/api/command/user/zone/ListZonesByCmd.java @@ -16,21 +16,21 @@ // under the License. package org.apache.cloudstack.api.command.user.zone; -import java.util.ArrayList; -import java.util.List; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.BaseCmd.CommandType; import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ListResponse; -import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.log4j.Logger; -import com.cloud.dc.DataCenter; +import com.cloud.exception.InvalidParameterValueException; @APICommand(name = "listZones", description="Lists zones", responseObject=ZoneResponse.class) public class ListZonesByCmd extends BaseListCmd { @@ -62,6 +62,9 @@ public class ListZonesByCmd extends BaseListCmd { @Parameter(name=ApiConstants.SHOW_CAPACITIES, type=CommandType.BOOLEAN, description="flag to display the capacity of the zones") private Boolean showCapacities; + + @Parameter(name = ApiConstants.TAGS, type = CommandType.MAP, description = "List zones by resource tags (key/value pairs)", since="4.3") + private Map tags; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// @@ -90,6 +93,25 @@ public class ListZonesByCmd extends BaseListCmd { public Boolean getShowCapacities() { return showCapacities; } + + public Map getTags() { + Map tagsMap = null; + if (tags != null && !tags.isEmpty()) { + tagsMap = new HashMap(); + Collection servicesCollection = tags.values(); + Iterator iter = servicesCollection.iterator(); + while (iter.hasNext()) { + HashMap services = (HashMap) iter.next(); + String key = services.get("key"); + String value = services.get("value"); + if (value == null) { + throw new InvalidParameterValueException("No value is passed in for key " + key); + } + tagsMap.put(key, value); + } + } + return tagsMap; + } ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// diff --git a/api/src/org/apache/cloudstack/api/response/CreateSSHKeyPairResponse.java b/api/src/org/apache/cloudstack/api/response/CreateSSHKeyPairResponse.java new file mode 100644 index 00000000000..e247fb4dcbc --- /dev/null +++ b/api/src/org/apache/cloudstack/api/response/CreateSSHKeyPairResponse.java @@ -0,0 +1,41 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.response; + +import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; + +public class CreateSSHKeyPairResponse extends SSHKeyPairResponse { + + @SerializedName("privatekey") @Param(description="Private key") + private String privateKey; + + public CreateSSHKeyPairResponse() {} + + public CreateSSHKeyPairResponse(String name, String fingerprint, String privateKey) { + super(name, fingerprint); + this.privateKey = privateKey; + } + + public String getPrivateKey() { + return privateKey; + } + + public void setPrivateKey(String privateKey) { + this.privateKey = privateKey; + } +} diff --git a/api/src/org/apache/cloudstack/api/response/ResourceDetailResponse.java b/api/src/org/apache/cloudstack/api/response/ResourceDetailResponse.java index 0e917d71904..989a126a1ae 100644 --- a/api/src/org/apache/cloudstack/api/response/ResourceDetailResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ResourceDetailResponse.java @@ -16,14 +16,8 @@ // under the License. package org.apache.cloudstack.api.response; -import java.util.Date; -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.Set; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; -import org.apache.cloudstack.api.EntityReference; import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; @@ -47,6 +41,11 @@ public class ResourceDetailResponse extends BaseResponse{ @Param(description = "value of the resource detail") private String value; + + @SerializedName(ApiConstants.FOR_DISPLAY) + @Param(description = "if detail is returned to the regular user", since="4.3") + private boolean forDisplay; + public String getResourceId() { return resourceId; } @@ -78,4 +77,8 @@ public class ResourceDetailResponse extends BaseResponse{ public void setValue(String value) { this.value = value; } + + public void setForDisplay(boolean forDisplay) { + this.forDisplay = forDisplay; + } } diff --git a/api/src/org/apache/cloudstack/api/response/SSHKeyPairResponse.java b/api/src/org/apache/cloudstack/api/response/SSHKeyPairResponse.java index 2791853d4a2..e102bab0394 100644 --- a/api/src/org/apache/cloudstack/api/response/SSHKeyPairResponse.java +++ b/api/src/org/apache/cloudstack/api/response/SSHKeyPairResponse.java @@ -30,19 +30,11 @@ public class SSHKeyPairResponse extends BaseResponse { @SerializedName("fingerprint") @Param(description="Fingerprint of the public key") private String fingerprint; - @SerializedName("privatekey") @Param(description="Private key") - private String privateKey; - public SSHKeyPairResponse() {} public SSHKeyPairResponse(String name, String fingerprint) { - this(name, fingerprint, null); - } - - public SSHKeyPairResponse(String name, String fingerprint, String privateKey) { this.name = name; this.fingerprint = fingerprint; - this.privateKey = privateKey; } public String getName() { @@ -61,12 +53,4 @@ public class SSHKeyPairResponse extends BaseResponse { this.fingerprint = fingerprint; } - public String getPrivateKey() { - return privateKey; - } - - public void setPrivateKey(String privateKey) { - this.privateKey = privateKey; - } - } diff --git a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java index 5c5b369ec25..e305ee95e70 100644 --- a/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ServiceOfferingResponse.java @@ -17,19 +17,15 @@ package org.apache.cloudstack.api.response; import java.util.Date; - - -import com.google.gson.annotations.SerializedName; - import java.util.Map; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; import com.cloud.offering.ServiceOffering; import com.cloud.serializer.Param; +import com.google.gson.annotations.SerializedName; @EntityReference(value = ServiceOffering.class) public class ServiceOfferingResponse extends BaseResponse { @@ -108,6 +104,10 @@ public class ServiceOfferingResponse extends BaseResponse { @SerializedName(ApiConstants.SERVICE_OFFERING_DETAILS) @Param(description = "additional key/value details tied with this service offering", since = "4.2.0") private Map details; + + + public ServiceOfferingResponse(){ + } public String getId() { return id; @@ -287,4 +287,5 @@ public class ServiceOfferingResponse extends BaseResponse { public void setDetails(Map details) { this.details = details; } + } diff --git a/api/src/org/apache/cloudstack/api/response/SnapshotResponse.java b/api/src/org/apache/cloudstack/api/response/SnapshotResponse.java index e9cb109bf31..7c2b4a99770 100644 --- a/api/src/org/apache/cloudstack/api/response/SnapshotResponse.java +++ b/api/src/org/apache/cloudstack/api/response/SnapshotResponse.java @@ -26,18 +26,8 @@ import org.apache.cloudstack.api.EntityReference; import com.cloud.serializer.Param; import com.cloud.storage.Snapshot; import com.google.gson.annotations.SerializedName; -import com.cloud.serializer.Param; -import com.cloud.storage.Snapshot; -import com.google.gson.annotations.SerializedName; -import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponse; -import org.apache.cloudstack.api.EntityReference; - -import java.util.Date; -import java.util.List; @EntityReference(value=Snapshot.class) -@SuppressWarnings("unused") public class SnapshotResponse extends BaseResponse implements ControlledEntityResponse { @SerializedName(ApiConstants.ID) @Param(description = "ID of the snapshot") @@ -100,6 +90,9 @@ public class SnapshotResponse extends BaseResponse implements ControlledEntityRe @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with snapshot", responseObject = ResourceTagResponse.class) private List tags; + @SerializedName(ApiConstants.REVERTABLE) + @Param(description="indicates whether the underlying storage supports reverting the volume to this snapshot") + private boolean revertable; @Override public String getObjectId() { @@ -118,6 +111,7 @@ public class SnapshotResponse extends BaseResponse implements ControlledEntityRe return accountName; } + @Override public void setAccountName(String accountName) { this.accountName = accountName; } @@ -131,6 +125,7 @@ public class SnapshotResponse extends BaseResponse implements ControlledEntityRe this.domainId = domainId; } + @Override public void setDomainName(String domainName) { this.domainName = domainName; } @@ -180,8 +175,16 @@ public class SnapshotResponse extends BaseResponse implements ControlledEntityRe public void setZoneId(String zoneId) { this.zoneId = zoneId; } - + public void setTags(List tags) { this.tags = tags; } + + public boolean isRevertable() { + return revertable; + } + + public void setRevertable(boolean revertable) { + this.revertable = revertable; + } } diff --git a/api/src/org/apache/cloudstack/api/response/UserVmResponse.java b/api/src/org/apache/cloudstack/api/response/UserVmResponse.java index d9bb2a976ee..9a7f91c70f6 100644 --- a/api/src/org/apache/cloudstack/api/response/UserVmResponse.java +++ b/api/src/org/apache/cloudstack/api/response/UserVmResponse.java @@ -18,6 +18,7 @@ package org.apache.cloudstack.api.response; import java.util.Date; import java.util.LinkedHashSet; +import java.util.Map; import java.util.Set; import org.apache.cloudstack.affinity.AffinityGroupResponse; @@ -177,6 +178,9 @@ public class UserVmResponse extends BaseResponse implements ControlledEntityResp @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with vm", responseObject = ResourceTagResponse.class) private Set tags; + + @SerializedName(ApiConstants.DETAILS) @Param(description="Template details in key/value pairs.", since="4.2.1") + private Map details; @SerializedName(ApiConstants.SSH_KEYPAIR) @Param(description="ssh key-pair") private String keyPairName; @@ -653,5 +657,8 @@ public class UserVmResponse extends BaseResponse implements ControlledEntityResp public void setServiceState(String state) { this.serviceState = state; } - + + public void setDetails(Map details) { + this.details = details; + } } diff --git a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java index 338fcaae5a4..56c007f2d69 100644 --- a/api/src/org/apache/cloudstack/api/response/VolumeResponse.java +++ b/api/src/org/apache/cloudstack/api/response/VolumeResponse.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api.response; import java.util.Date; -import java.util.HashSet; import java.util.LinkedHashSet; import java.util.Set; @@ -178,12 +177,30 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity @Param(description="the status of the volume") private String status; - @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class) + @SerializedName(ApiConstants.TAGS) + @Param(description="the list of resource tags associated with volume", responseObject = ResourceTagResponse.class) private Set tags; - @SerializedName(ApiConstants.DISPLAY_VOLUME) @Param(description="an optional field whether to the display the volume to the end user or not.") + @SerializedName(ApiConstants.DISPLAY_VOLUME) + @Param(description="an optional field whether to the display the volume to the end user or not.") private Boolean displayVm; + @SerializedName(ApiConstants.PATH) + @Param(description="The path of the volume") + private String path; + + @SerializedName(ApiConstants.STORAGE_ID) + @Param(description = "id of the primary storage hosting the disk volume; returned to admin user only", since="4.3") + private String storagePoolId; + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + public VolumeResponse(){ tags = new LinkedHashSet(); } @@ -388,4 +405,7 @@ public class VolumeResponse extends BaseResponse implements ControlledViewEntity this.displayVm = displayVm; } + public void setStoragePoolId(String storagePoolId) { + this.storagePoolId = storagePoolId; + } } diff --git a/api/src/org/apache/cloudstack/api/response/ZoneResponse.java b/api/src/org/apache/cloudstack/api/response/ZoneResponse.java index 2ebb15a1ecf..2f93e9159ce 100644 --- a/api/src/org/apache/cloudstack/api/response/ZoneResponse.java +++ b/api/src/org/apache/cloudstack/api/response/ZoneResponse.java @@ -16,7 +16,10 @@ // under the License. package org.apache.cloudstack.api.response; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; @@ -98,6 +101,19 @@ public class ZoneResponse extends BaseResponse { @SerializedName(ApiConstants.LOCAL_STORAGE_ENABLED) @Param(description="true if local storage offering enabled, false otherwise") private boolean localStorageEnabled; + + @SerializedName(ApiConstants.TAGS) @Param(description="the list of resource tags associated with zone.", + responseObject = ResourceTagResponse.class, since="4.3") + private Set tags; + + @SerializedName(ApiConstants.RESOURCE_DETAILS) + @Param(description = "Meta data associated with the zone (key/value pairs)", since = "4.3.0") + private Map resourceDetails; + + + public ZoneResponse(){ + tags = new LinkedHashSet(); + } public void setId(String id) { this.id = id; @@ -198,4 +214,12 @@ public class ZoneResponse extends BaseResponse { public void setIp6Dns2(String ip6Dns2) { this.ip6Dns2 = ip6Dns2; } + + public void addTag(ResourceTagResponse tag){ + this.tags.add(tag); + } + + public void setResourceDetails(Map details) { + this.resourceDetails = details; + } } diff --git a/api/src/org/apache/cloudstack/context/CallContext.java b/api/src/org/apache/cloudstack/context/CallContext.java index a62a3da72c4..5439aee7062 100644 --- a/api/src/org/apache/cloudstack/context/CallContext.java +++ b/api/src/org/apache/cloudstack/context/CallContext.java @@ -18,8 +18,10 @@ package org.apache.cloudstack.context; import java.util.HashMap; import java.util.Map; +import java.util.Stack; import java.util.UUID; +import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; import org.apache.log4j.Logger; import org.apache.log4j.NDC; @@ -37,18 +39,27 @@ import com.cloud.utils.exception.CloudRuntimeException; */ public class CallContext { private static final Logger s_logger = Logger.getLogger(CallContext.class); - private static ThreadLocal s_currentContext = new ThreadLocal(); + private static ManagedThreadLocal s_currentContext = new ManagedThreadLocal(); + private static ManagedThreadLocal> s_currentContextStack = + new ManagedThreadLocal>() { + @Override + protected Stack initialValue() { + return new Stack(); + } + }; private String contextId; private Account account; + private long accountId; private long startEventId = 0; private String eventDescription; private String eventDetails; private String eventType; private User user; + private long userId; private final Map context = new HashMap(); - private static EntityManager s_entityMgr; + static EntityManager s_entityMgr; public static void init(EntityManager entityMgr) { s_entityMgr = entityMgr; @@ -57,9 +68,17 @@ public class CallContext { protected CallContext() { } + protected CallContext(long userId, long accountId, String contextId) { + this.userId = userId; + this.accountId = accountId; + this.contextId = contextId; + } + protected CallContext(User user, Account account, String contextId) { this.user = user; + this.userId = user.getId(); this.account = account; + this.accountId = account.getId(); this.contextId = contextId; } @@ -72,10 +91,13 @@ public class CallContext { } public long getCallingUserId() { - return user.getId(); + return userId; } public User getCallingUser() { + if (user == null) { + user = s_entityMgr.findById(User.class, userId); + } return user; } @@ -84,6 +106,9 @@ public class CallContext { } public Account getCallingAccount() { + if (account == null) { + account = s_entityMgr.findById(Account.class, accountId); + } return account; } @@ -101,6 +126,10 @@ public class CallContext { * @return CallContext */ public static CallContext register(User callingUser, Account callingAccount, String contextId) { + return register(callingUser, callingAccount, null, null, contextId); + } + + protected static CallContext register(User callingUser, Account callingAccount, Long userId, Long accountId, String contextId) { /* Unit tests will have multiple times of setup/tear-down call to this, remove assertions to all unit test to run @@ -109,12 +138,20 @@ public class CallContext { throw new CloudRuntimeException("There's a context already so what does this new register context mean? " + s_currentContext.get().toString()); } */ - CallContext callingContext = new CallContext(callingUser, callingAccount, contextId); + CallContext callingContext = null; + if (userId == null || accountId == null) { + callingContext = new CallContext(callingUser, callingAccount, contextId); + } else { + callingContext = new CallContext(userId, accountId, contextId); + } s_currentContext.set(callingContext); NDC.push("ctx-" + UuidUtils.first(contextId)); if (s_logger.isTraceEnabled()) { s_logger.trace("Registered: " + callingContext); } + + s_currentContextStack.get().push(callingContext); + return callingContext; } @@ -126,14 +163,13 @@ public class CallContext { try { CallContext context = s_currentContext.get(); if (context == null) { - return register(User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM); + return register(null, null, User.UID_SYSTEM, Account.ACCOUNT_ID_SYSTEM, UUID.randomUUID().toString()); } assert context.getCallingUserId() == User.UID_SYSTEM : "You are calling a very specific method that registers a one time system context. This method is meant for background threads that does processing."; return context; } catch (Exception e) { - s_logger.fatal("Exiting the system because we're unable to register the system call context.", e); - System.exit(1); - throw new CloudRuntimeException("Should never hit this"); + s_logger.error("Failed to register the system call context.", e); + throw new CloudRuntimeException("Failed to register system call context", e); } } @@ -162,10 +198,15 @@ public class CallContext { return register(user, account); } + public static void unregisterAll() { + while ( unregister() != null ) { + // NOOP + } + } + public static CallContext unregister() { CallContext context = s_currentContext.get(); if (context == null) { - s_logger.debug("No context to remove"); return null; } s_currentContext.remove(); @@ -183,6 +224,14 @@ public class CallContext { s_logger.trace("Popping from NDC: " + contextId); } } + + Stack stack = s_currentContextStack.get(); + stack.pop(); + + if ( ! stack.isEmpty() ) { + s_currentContext.set(stack.peek()); + } + return context; } @@ -195,15 +244,15 @@ public class CallContext { } public long getCallingAccountId() { - return account.getId(); + return accountId; } public String getCallingAccountUuid() { - return account.getUuid(); + return getCallingAccount().getUuid(); } public String getCallingUserUuid() { - return user.getUuid(); + return getCallingUser().getUuid(); } public void setEventDetails(String eventDetails) { @@ -240,8 +289,8 @@ public class CallContext { @Override public String toString() { - return new StringBuffer("CCtxt[acct=").append(account.getId()) - .append("; user=").append(user.getId()) + return new StringBuilder("CCtxt[acct=").append(getCallingAccountId()) + .append("; user=").append(getCallingUserId()) .append("; id=").append(contextId) .append("]").toString(); } diff --git a/api/src/org/apache/cloudstack/context/CallContextListener.java b/api/src/org/apache/cloudstack/context/CallContextListener.java new file mode 100644 index 00000000000..9464f3d596e --- /dev/null +++ b/api/src/org/apache/cloudstack/context/CallContextListener.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.context; + +import javax.annotation.PostConstruct; +import javax.inject.Inject; + +import org.apache.cloudstack.managed.context.ManagedContextListener; + +import com.cloud.utils.db.EntityManager; + +public class CallContextListener implements ManagedContextListener { + + @Inject + EntityManager entityMgr; + + @Override + public Object onEnterContext(boolean reentry) { + if ( ! reentry ) { + CallContext.registerSystemCallContextOnceOnly(); + } + + return null; + } + + @Override + public void onLeaveContext(Object unused, boolean reentry) { + if ( ! reentry ) { + CallContext.unregisterAll(); + } + } + + @PostConstruct + public void init() { + CallContext.init(entityMgr); + } +} diff --git a/api/src/org/apache/cloudstack/query/QueryService.java b/api/src/org/apache/cloudstack/query/QueryService.java index 9b88bdb0dc7..bbbb3fe9dd8 100644 --- a/api/src/org/apache/cloudstack/query/QueryService.java +++ b/api/src/org/apache/cloudstack/query/QueryService.java @@ -123,7 +123,7 @@ public interface QueryService { String affinityGroupType, Long vmId, String accountName, Long domainId, boolean isRecursive, boolean listAll, Long startIndex, Long pageSize); - public List listResource(ListResourceDetailsCmd cmd); + public List listResourceDetails(ListResourceDetailsCmd cmd); ListResponse searchForInternalLbVms(ListInternalLBVMsCmd cmd); diff --git a/awsapi/pom.xml b/awsapi/pom.xml index 5839a97563b..b1afd2d696e 100644 --- a/awsapi/pom.xml +++ b/awsapi/pom.xml @@ -16,7 +16,9 @@ specific language governing permissions and limitations under the License. --> - + 4.0.0 cloud-awsapi Apache CloudStack AWS API Bridge @@ -35,8 +37,7 @@ org.springframework spring-web - ${org.springframework.version} - + org.apache.axis2 axis2 @@ -72,7 +73,6 @@ log4j log4j - ${cs.log4j.version} org.apache.cloudstack @@ -97,22 +97,19 @@ org.apache.ws.commons.axiom axiom-impl - + com.google.code.gson gson - ${cs.gson.version} commons-codec commons-codec - ${cs.codec.version} javax.servlet servlet-api - ${cs.servlet.version} provided @@ -123,7 +120,6 @@ org.jasypt jasypt - ${cs.jasypt.version} com.caringo.client @@ -137,15 +133,15 @@ mar - bouncycastle - bcprov-jdk14 + bouncycastle + bcprov-jdk14 - org.apache.xalan - xalan + org.apache.xalan + xalan - org.opensaml + org.opensaml opensaml @@ -157,126 +153,127 @@ mar - bouncycastle - bcprov-jdk14 + bouncycastle + bcprov-jdk14 - org.apache.xalan - xalan + org.apache.xalan + xalan - org.opensaml + org.opensaml opensaml - org.apache.rampart - rampart-core - ${cs.rampart.version} - runtime + org.apache.rampart + rampart-core + ${cs.rampart.version} + runtime - org.apache.xalan - xalan + org.apache.xalan + xalan - org.opensaml + org.opensaml opensaml - org.apache.rampart - rampart-policy - ${cs.rampart.version} - runtime + org.apache.rampart + rampart-policy + ${cs.rampart.version} + runtime - org.apache.xalan - xalan + org.apache.xalan + xalan - org.opensaml + org.opensaml opensaml - org.apache.rampart - rampart-trust - ${cs.rampart.version} - runtime + org.apache.rampart + rampart-trust + ${cs.rampart.version} + runtime - org.apache.xalan - xalan + org.apache.xalan + xalan - org.opensaml + org.opensaml opensaml - org.slf4j - slf4j-jdk14 - 1.6.1 - runtime + org.slf4j + slf4j-jdk14 + 1.6.1 + runtime - org.slf4j - slf4j-api - 1.6.1 - runtime + org.slf4j + slf4j-api + 1.6.1 + runtime - org.apache.ws.security - wss4j - 1.6.1 - runtime + org.apache.ws.security + wss4j + 1.6.1 + runtime - joda-time - joda-time - 1.5.2 - runtime + joda-time + joda-time + 1.5.2 + runtime - org.opensaml - xmltooling - 1.3.1 - runtime + org.opensaml + xmltooling + 1.3.1 + runtime - org.opensaml - openws - 1.4.1 - runtime + org.opensaml + openws + 1.4.1 + runtime - velocity - velocity - 1.5 - runtime + velocity + velocity + 1.5 + runtime - org.opensaml - opensaml - 2.5.1-1 - runtime + org.opensaml + opensaml + 2.5.1-1 + runtime - org.apache.santuario - xmlsec - 1.4.2 - runtime + org.apache.santuario + xmlsec + 1.4.2 + runtime - org.bouncycastle - bcprov-jdk16 - 1.45 - runtime + org.bouncycastle + bcprov-jdk16 + + 1.45 + runtime mysql @@ -302,7 +299,7 @@ org.apache.cloudstack cloud-framework-db ${project.version} - + @@ -313,7 +310,7 @@ - ../utils/conf/ + ../utils/conf/ ${basedir}/resource/AmazonEC2 @@ -331,7 +328,7 @@ - + org.apache.maven.plugins maven-war-plugin 2.3 @@ -358,7 +355,6 @@ maven-antrun-plugin - 1.7 generate-resource @@ -368,52 +364,49 @@ - + - + - + - - + + - - - - org.apache.axis2 - axis2-aar-maven-plugin - 1.6.2 - true - - false - cloud-ec2 - ${project.build.directory}/WEB-INF/services - - - resource/AmazonEC2 - META-INF - - services.xml - - - - - - - - aar - - - - + + + + org.apache.axis2 + axis2-aar-maven-plugin + 1.6.2 + true + + false + cloud-ec2 + ${project.build.directory}/WEB-INF/services + + + resource/AmazonEC2 + META-INF + + services.xml + + + + + + + + aar + + + + @@ -426,19 +419,15 @@ - - org.apache.maven.plugins - - - maven-antrun-plugin - + org.apache.maven.plugins + maven-antrun-plugin [1.7,) run - + @@ -451,7 +440,7 @@ - + @@ -483,14 +472,10 @@ - - - + + + @@ -555,27 +540,27 @@ - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - clean - - exec - - - rm - - -rf - ${basedir}/wsdl/ - ${basedir}/resources/AmazonEC2.wsdl - - - - - + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + clean + + exec + + + rm + + -rf + ${basedir}/wsdl/ + ${basedir}/resources/AmazonEC2.wsdl + + + + + diff --git a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java index dd354a39ffb..00486cbaceb 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/BucketPolicyDaoImpl.java @@ -26,7 +26,7 @@ import com.cloud.bridge.model.BucketPolicyVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={BucketPolicyDao.class}) @@ -42,7 +42,7 @@ public class BucketPolicyDaoImpl extends GenericDaoBase im public BucketPolicyVO getByName( String bucketName ) { SearchBuilder searchByBucket = createSearchBuilder(); searchByBucket.and("BucketName", searchByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByBucket.create(); @@ -59,7 +59,7 @@ public class BucketPolicyDaoImpl extends GenericDaoBase im public void deletePolicy( String bucketName ) { SearchBuilder deleteByBucket = createSearchBuilder(); deleteByBucket.and("BucketName", deleteByBucket.entity().getBucketName(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = deleteByBucket.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackAccountDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackAccountDaoImpl.java index 8fbc7c8e3af..75a693e4955 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackAccountDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackAccountDaoImpl.java @@ -25,6 +25,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={CloudStackAccountDao.class}) @@ -34,7 +35,7 @@ public class CloudStackAccountDaoImpl extends GenericDaoBase SearchByUUID = createSearchBuilder(); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { txn.start(); SearchByUUID.and("uuid", SearchByUUID.entity().getUuid(), diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java index bc77ea1d886..644dcdcef37 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackConfigurationDaoImpl.java @@ -27,6 +27,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={CloudStackConfigurationDao.class}) @@ -42,7 +43,7 @@ public class CloudStackConfigurationDaoImpl extends GenericDaoBase sc = NameSearch.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackSvcOfferingDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackSvcOfferingDaoImpl.java index 8021eb618e9..cb8d129f528 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackSvcOfferingDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackSvcOfferingDaoImpl.java @@ -29,6 +29,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={CloudStackSvcOfferingDao.class}) @@ -42,7 +43,7 @@ public class CloudStackSvcOfferingDaoImpl extends GenericDaoBase searchByName = createSearchBuilder(); searchByName.and("name", searchByName.entity().getName(), SearchCriteria.Op.EQ); searchByName.done(); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { txn.start(); SearchCriteria sc = searchByName.create(); @@ -61,7 +62,7 @@ public class CloudStackSvcOfferingDaoImpl extends GenericDaoBase searchByID = createSearchBuilder(); searchByID.and("uuid", searchByID.entity().getUuid(), SearchCriteria.Op.EQ); searchByID.done(); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { txn.start(); SearchCriteria sc = searchByID.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java index f7e1da65dc6..7fe1dabee4d 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/CloudStackUserDaoImpl.java @@ -26,6 +26,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.crypt.DBEncryptionUtil; @Component @@ -43,7 +44,7 @@ public class CloudStackUserDaoImpl extends GenericDaoBase searchByAccessKey = createSearchBuilder(); searchByAccessKey.and("apiKey", searchByAccessKey.entity().getApiKey(), SearchCriteria.Op.EQ); searchByAccessKey.done(); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { txn.start(); SearchCriteria sc = searchByAccessKey.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MHostDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/MHostDaoImpl.java index 222325498b9..b52fcaf221b 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MHostDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MHostDaoImpl.java @@ -25,6 +25,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={MHostDao.class}) @@ -38,7 +39,7 @@ public class MHostDaoImpl extends GenericDaoBase implements MHost @Override public MHostVO getByHostKey(String hostKey) { NameSearch.and("MHostKey", NameSearch.entity().getHostKey(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.open("cloudbridge", TransactionLegacy.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = NameSearch.create(); @@ -52,7 +53,7 @@ public class MHostDaoImpl extends GenericDaoBase implements MHost @Override public void updateHeartBeat(MHostVO mhost) { - Transaction txn = Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.open("cloudbridge", TransactionLegacy.AWSAPI_DB, true); try { txn.start(); update(mhost.getId(), mhost); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MHostMountDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/MHostMountDaoImpl.java index 8b99f487911..8a7153a9b85 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MHostMountDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MHostMountDaoImpl.java @@ -25,6 +25,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={MHostMountDao.class}) @@ -37,7 +38,7 @@ public class MHostMountDaoImpl extends GenericDaoBase implem public MHostMountVO getHostMount(long mHostId, long sHostId) { SearchByMHostID.and("MHostID", SearchByMHostID.entity().getmHostID(), SearchCriteria.Op.EQ); SearchByMHostID.and("SHostID", SearchByMHostID.entity().getsHostID(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByMHostID.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultiPartPartsDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/MultiPartPartsDaoImpl.java index 6f314951697..f1472e675aa 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultiPartPartsDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultiPartPartsDaoImpl.java @@ -28,6 +28,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={MultiPartPartsDao.class}) @@ -42,7 +43,7 @@ public class MultiPartPartsDaoImpl extends GenericDaoBase sc = ByUploadID.create(); @@ -61,7 +62,7 @@ public class MultiPartPartsDaoImpl extends GenericDaoBase byUploadID = createSearchBuilder(); byUploadID.and("UploadID", byUploadID.entity().getUploadid(), SearchCriteria.Op.EQ); byUploadID.and("partNumber", byUploadID.entity().getPartNumber(), SearchCriteria.Op.GT); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = byUploadID.create(); @@ -82,7 +83,7 @@ public class MultiPartPartsDaoImpl extends GenericDaoBase byUploadID = createSearchBuilder(); byUploadID.and("UploadID", byUploadID.entity().getUploadid(), SearchCriteria.Op.EQ); byUploadID.and("partNumber", byUploadID.entity().getPartNumber(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = byUploadID.create(); @@ -102,7 +103,7 @@ public class MultiPartPartsDaoImpl extends GenericDaoBase byUploadID = createSearchBuilder(); byUploadID.and("UploadID", byUploadID.entity().getUploadid(), SearchCriteria.Op.EQ); byUploadID.and("partNumber", byUploadID.entity().getPartNumber(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = byUploadID.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultiPartUploadsDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/MultiPartUploadsDaoImpl.java index 0f76e80a952..41133a06e92 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultiPartUploadsDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultiPartUploadsDaoImpl.java @@ -33,6 +33,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={MultiPartUploadsDao.class}) @@ -42,9 +43,9 @@ public class MultiPartUploadsDaoImpl extends GenericDaoBase multipartExits( int uploadId ) { MultiPartUploadsVO uploadvo = null; - Transaction txn = null; + TransactionLegacy txn = null; try { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); uploadvo = findById(new Long(uploadId)); if (null != uploadvo) return new OrderedPair(uploadvo.getAccessKey(), uploadvo.getNameKey()); @@ -58,9 +59,9 @@ public class MultiPartUploadsDaoImpl extends GenericDaoBase sc = byBucket.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java index c1a69dc5e47..4e6ff3d1b25 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultipartLoadDao.java @@ -34,6 +34,7 @@ import com.cloud.bridge.service.core.s3.S3MultipartPart; import com.cloud.bridge.service.core.s3.S3MultipartUpload; import com.cloud.bridge.util.OrderedPair; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class MultipartLoadDao { public static final Logger logger = Logger.getLogger(MultipartLoadDao.class); @@ -94,9 +95,9 @@ public class MultipartLoadDao { */ public int initiateUpload( String accessKey, String bucketName, String key, String cannedAccess, S3MetaDataEntry[] meta ) { int uploadId = -1; - Transaction txn = null; + TransactionLegacy txn = null; try { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); Date tod = new Date(); MultiPartUploadsVO uploadVO = new MultiPartUploadsVO(accessKey, bucketName, key, cannedAccess, tod); @@ -315,9 +316,9 @@ public class MultipartLoadDao { private void saveMultipartMeta( int uploadId, S3MetaDataEntry[] meta ) { if (null == meta) return; - Transaction txn = null; + TransactionLegacy txn = null; try { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); for( int i=0; i < meta.length; i++ ) { S3MetaDataEntry entry = meta[i]; diff --git a/awsapi/src/com/cloud/bridge/persist/dao/MultipartMetaDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/MultipartMetaDaoImpl.java index 7ab93599d22..fec0a2c1280 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/MultipartMetaDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/MultipartMetaDaoImpl.java @@ -27,6 +27,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={MultipartMetaDao.class}) @@ -37,7 +38,7 @@ public class MultipartMetaDaoImpl extends GenericDaoBase SearchBuilder searchByUID = createSearchBuilder(); searchByUID.and("UploadID", searchByUID.entity().getUploadID(), SearchCriteria.Op.EQ); searchByUID.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByUID.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/OfferingDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/OfferingDaoImpl.java index ea7d264f80c..963f1084134 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/OfferingDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/OfferingDaoImpl.java @@ -29,6 +29,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={OfferingDao.class}) @@ -39,7 +40,7 @@ public class OfferingDaoImpl extends GenericDaoBase impl @Override public int getOfferingCount() { - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); return listAll().size(); @@ -56,7 +57,7 @@ public class OfferingDaoImpl extends GenericDaoBase impl SearchBuilder searchByAmazon = createSearchBuilder(); searchByAmazon.and("AmazonEC2Offering", searchByAmazon.entity().getAmazonOffering() , SearchCriteria.Op.EQ); searchByAmazon.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByAmazon.create(); @@ -74,7 +75,7 @@ public class OfferingDaoImpl extends GenericDaoBase impl SearchBuilder searchByAmazon = createSearchBuilder(); searchByAmazon.and("CloudStackOffering", searchByAmazon.entity().getAmazonOffering() , SearchCriteria.Op.EQ); searchByAmazon.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByAmazon.create(); @@ -93,7 +94,7 @@ public class OfferingDaoImpl extends GenericDaoBase impl searchByAmazon.and("CloudStackOffering", searchByAmazon.entity().getAmazonOffering() , SearchCriteria.Op.EQ); searchByAmazon.and("AmazonEC2Offering", searchByAmazon.entity().getCloudstackOffering() , SearchCriteria.Op.EQ); searchByAmazon.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); OfferingBundleVO offering = null; try { txn.start(); @@ -122,7 +123,7 @@ public class OfferingDaoImpl extends GenericDaoBase impl SearchBuilder searchByAmazon = createSearchBuilder(); searchByAmazon.and("AmazonEC2Offering", searchByAmazon.entity().getAmazonOffering() , SearchCriteria.Op.EQ); searchByAmazon.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = searchByAmazon.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SAclDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SAclDaoImpl.java index d88660e05c9..d4b4c90fedc 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SAclDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SAclDaoImpl.java @@ -32,6 +32,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SAclDao.class}) @@ -46,7 +47,7 @@ public class SAclDaoImpl extends GenericDaoBase implements SAclDao SearchByTarget.and("TargetID", SearchByTarget.entity().getTargetId(), SearchCriteria.Op.EQ); SearchByTarget.done(); Filter filter = new Filter(SAclVO.class, "grantOrder", Boolean.TRUE, null, null); - Transaction txn = Transaction.open( Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open( TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByTarget.create(); @@ -66,7 +67,7 @@ public class SAclDaoImpl extends GenericDaoBase implements SAclDao SearchByAcl.and("TargetID", SearchByAcl.entity().getTargetId(), SearchCriteria.Op.EQ); SearchByAcl.and("GranteeCanonicalID", SearchByAcl.entity().getGranteeCanonicalId(), SearchCriteria.Op.EQ); Filter filter = new Filter(SAclVO.class, "grantOrder", Boolean.TRUE, null, null); - Transaction txn = Transaction.open( Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open( TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByAcl.create(); @@ -85,7 +86,7 @@ public class SAclDaoImpl extends GenericDaoBase implements SAclDao SearchByTarget.and("Target", SearchByTarget.entity().getTarget(), SearchCriteria.Op.EQ); SearchByTarget.and("TargetID", SearchByTarget.entity().getTargetId(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByTarget.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SBucketDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SBucketDaoImpl.java index 817c682a946..552281d8b85 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SBucketDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SBucketDaoImpl.java @@ -29,6 +29,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SBucketDao.class}) @@ -42,7 +43,7 @@ public class SBucketDaoImpl extends GenericDaoBase implements S SearchBuilder SearchByName = createSearchBuilder(); SearchByName.and("Name", SearchByName.entity().getName(), SearchCriteria.Op.EQ); //Transaction txn = Transaction.open(Transaction.AWSAPI_DB); - Transaction txn = Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.open("cloudbridge", TransactionLegacy.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = SearchByName.create(); @@ -59,7 +60,7 @@ public class SBucketDaoImpl extends GenericDaoBase implements S SearchBuilder ByCanonicalID = createSearchBuilder(); ByCanonicalID.and("OwnerCanonicalID", ByCanonicalID.entity().getOwnerCanonicalId(), SearchCriteria.Op.EQ); Filter filter = new Filter(SBucketVO.class, "createTime", Boolean.TRUE, null, null); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = ByCanonicalID.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SHostDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SHostDaoImpl.java index 9b6b5359759..5d2e9b901b3 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SHostDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SHostDaoImpl.java @@ -25,6 +25,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SHostDao.class}) @@ -36,7 +37,7 @@ public class SHostDaoImpl extends GenericDaoBase implements SHost SearchBuilder HostSearch = createSearchBuilder(); HostSearch.and("Host", HostSearch.entity().getHost(), SearchCriteria.Op.EQ); HostSearch.done(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = HostSearch.create(); @@ -55,7 +56,7 @@ public class SHostDaoImpl extends GenericDaoBase implements SHost LocalStorageHostSearch.and("MHostID", LocalStorageHostSearch.entity().getMhostid(), SearchCriteria.Op.EQ); LocalStorageHostSearch.and("ExportRoot", LocalStorageHostSearch.entity().getExportRoot(), SearchCriteria.Op.EQ); LocalStorageHostSearch.done(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); SearchCriteria sc = LocalStorageHostSearch.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SMetaDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SMetaDaoImpl.java index 8fdc9493d82..95355b92689 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SMetaDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SMetaDaoImpl.java @@ -28,6 +28,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SMetaDao.class}) @@ -41,7 +42,7 @@ public class SMetaDaoImpl extends GenericDaoBase implements SMeta SearchByTarget.and("Target", SearchByTarget.entity().getTarget(), SearchCriteria.Op.EQ); SearchByTarget.and("TargetID", SearchByTarget.entity().getTargetId(), SearchCriteria.Op.EQ); SearchByTarget.done(); - Transaction txn = Transaction.open( Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open( TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByTarget.create(); @@ -71,7 +72,7 @@ public class SMetaDaoImpl extends GenericDaoBase implements SMeta SearchBuilder SearchByTarget = createSearchBuilder(); SearchByTarget.and("Target", SearchByTarget.entity().getTarget(), SearchCriteria.Op.EQ); SearchByTarget.and("TargetID", SearchByTarget.entity().getTargetId(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByTarget.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java index 6d23757b8b5..e6370feca1b 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SObjectDaoImpl.java @@ -33,6 +33,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SObjectDao.class}) @@ -47,7 +48,7 @@ public class SObjectDaoImpl extends GenericDaoBase implements S SearchBuilder SearchByName = createSearchBuilder(); SearchByName.and("SBucketID", SearchByName.entity().getBucketID() , SearchCriteria.Op.EQ); SearchByName.and("NameKey", SearchByName.entity().getNameKey() , SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByName.create(); @@ -76,7 +77,7 @@ public class SObjectDaoImpl extends GenericDaoBase implements S SearchByBucket.and("SBucketID", SearchByBucket.entity().getBucketID(), SearchCriteria.Op.EQ); SearchByBucket.and("DeletionMark", SearchByBucket.entity().getDeletionMark(), SearchCriteria.Op.NULL); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = SearchByBucket.create(); @@ -100,7 +101,7 @@ public class SObjectDaoImpl extends GenericDaoBase implements S List objects = new ArrayList(); getAllBuckets.and("SBucketID", getAllBuckets.entity().getBucketID(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.currentTxn(); // Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); try { txn.start(); SearchCriteria sc = getAllBuckets.create(); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/SObjectItemDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/SObjectItemDaoImpl.java index 57140c49072..294b32d4d4f 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/SObjectItemDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/SObjectItemDaoImpl.java @@ -27,6 +27,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SObjectItemDao.class}) @@ -39,7 +40,7 @@ public class SObjectItemDaoImpl extends GenericDaoBase impl @Override public SObjectItemVO getByObjectIdNullVersion(long id) { - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); SearchBuilder SearchByID = createSearchBuilder(); SearchByID.and("ID", SearchByID.entity().getId(), SearchCriteria.Op.EQ); @@ -56,7 +57,7 @@ public class SObjectItemDaoImpl extends GenericDaoBase impl @Override public List getItems(long sobjectID) { - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); SearchBuilder SearchBySobjectID = createSearchBuilder(); SearchBySobjectID.and("SObjectID", SearchBySobjectID.entity().getId(), SearchCriteria.Op.EQ); diff --git a/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDaoImpl.java b/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDaoImpl.java index c45886f794c..b60a717a3ee 100644 --- a/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDaoImpl.java +++ b/awsapi/src/com/cloud/bridge/persist/dao/UserCredentialsDaoImpl.java @@ -29,6 +29,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UserCredentialsDao.class}) @@ -41,7 +42,7 @@ public class UserCredentialsDaoImpl extends GenericDaoBase SearchByAccessKey = createSearchBuilder(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchByAccessKey.and("AccessKey", SearchByAccessKey.entity() @@ -60,7 +61,7 @@ public class UserCredentialsDaoImpl extends GenericDaoBase SearchByCertID = createSearchBuilder(); SearchByCertID.and("CertUniqueId", SearchByCertID.entity().getCertUniqueId(), SearchCriteria.Op.EQ); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); SearchCriteria sc = SearchByCertID.create(); diff --git a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java index 50ac26f2901..1ef04a4aebd 100644 --- a/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/EC2RestServlet.java @@ -161,6 +161,7 @@ import com.cloud.bridge.util.ConfigurationHelper; import com.cloud.bridge.util.EC2RestAuth; import com.cloud.stack.models.CloudStackAccount; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component("EC2RestServlet") public class EC2RestServlet extends HttpServlet { @@ -377,7 +378,7 @@ public class EC2RestServlet extends HttpServlet { private void setUserKeys( HttpServletRequest request, HttpServletResponse response ) { String[] accessKey = null; String[] secretKey = null; - Transaction txn = null; + TransactionLegacy txn = null; try { // -> all these parameters are required accessKey = request.getParameterValues( "accesskey" ); @@ -398,7 +399,7 @@ public class EC2RestServlet extends HttpServlet { return; } try { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); // -> use the keys to see if the account actually exists ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); @@ -434,7 +435,7 @@ public class EC2RestServlet extends HttpServlet { */ private void setCertificate( HttpServletRequest request, HttpServletResponse response ) throws Exception { - Transaction txn = null; + TransactionLegacy txn = null; try { // [A] Pull the cert and cloud AccessKey from the request String[] certificate = request.getParameterValues( "cert" ); @@ -470,7 +471,7 @@ public class EC2RestServlet extends HttpServlet { // [C] Associate the cert's uniqueId with the Cloud API keys String uniqueId = AuthenticationUtils.X509CertUniqueId( userCert ); logger.debug( "SetCertificate, uniqueId: " + uniqueId ); - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]); user.setCertUniqueId(uniqueId); @@ -505,7 +506,7 @@ public class EC2RestServlet extends HttpServlet { */ private void deleteCertificate( HttpServletRequest request, HttpServletResponse response ) throws Exception { - Transaction txn = null; + TransactionLegacy txn = null; try { String [] accessKey = request.getParameterValues( "AWSAccessKeyId" ); if ( null == accessKey || 0 == accessKey.length ) { @@ -527,7 +528,7 @@ public class EC2RestServlet extends HttpServlet { /* UserCredentialsDao credentialDao = new UserCredentialsDao(); credentialDao.setCertificateId( accessKey[0], null ); - */ txn = Transaction.open(Transaction.AWSAPI_DB); + */ txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); UserCredentialsVO user = ucDao.getByAccessKey(accessKey[0]); user.setCertUniqueId(null); ucDao.update(user.getId(), user); diff --git a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java index 7e69fd65087..192e1a28e51 100644 --- a/awsapi/src/com/cloud/bridge/service/S3RestServlet.java +++ b/awsapi/src/com/cloud/bridge/service/S3RestServlet.java @@ -67,6 +67,7 @@ import com.cloud.bridge.util.RestAuth; import com.cloud.bridge.util.S3SoapAuth; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class S3RestServlet extends HttpServlet { private static final long serialVersionUID = -6168996266762804877L; public static final String ENABLE_S3_API="enable.s3.api"; @@ -139,7 +140,7 @@ public class S3RestServlet extends HttpServlet { */ private void processRequest( HttpServletRequest request, HttpServletResponse response, String method ) { - Transaction txn = Transaction.open("cloudbridge", Transaction.AWSAPI_DB, true); + TransactionLegacy txn = TransactionLegacy.open("cloudbridge", TransactionLegacy.AWSAPI_DB, true); try { logRequest(request); @@ -274,7 +275,7 @@ public class S3RestServlet extends HttpServlet { // -> use the keys to see if the account actually exists //ServiceProvider.getInstance().getEC2Engine().validateAccount( accessKey[0], secretKey[0] ); //UserCredentialsDaoImpl credentialDao = new UserCredentialsDao(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); UserCredentialsVO user = new UserCredentialsVO(accessKey[0], secretKey[0]); user = ucDao.persist(user); diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java index c98de34a698..4d7c41a75b3 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/S3BucketAction.java @@ -94,6 +94,7 @@ import com.cloud.bridge.util.XSerializer; import com.cloud.bridge.util.XSerializerXmlAdapter; import com.cloud.bridge.util.XmlHelper; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class S3BucketAction implements ServletAction { @@ -371,7 +372,7 @@ public class S3BucketAction implements ServletAction { response.setStatus(403); return; } - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); // [B] Place the policy into the database over writting an existing policy try { // -> first make sure that the policy is valid by parsing it diff --git a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java index a0892cc979b..0854741699f 100644 --- a/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java +++ b/awsapi/src/com/cloud/bridge/service/controller/s3/ServiceProvider.java @@ -35,6 +35,7 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; import org.apache.axis2.AxisFault; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.log4j.Logger; import org.apache.log4j.xml.DOMConfigurator; import org.springframework.stereotype.Component; @@ -61,6 +62,7 @@ import com.cloud.bridge.util.OrderedPair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component public class ServiceProvider extends ManagerBase { @@ -89,7 +91,7 @@ public class ServiceProvider extends ManagerBase { protected ServiceProvider() throws IOException { // register service implementation object - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.close(); } @@ -182,7 +184,7 @@ public class ServiceProvider extends ManagerBase { public UserInfo getUserInfo(String accessKey) { UserInfo info = new UserInfo(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); try { txn.start(); UserCredentialsVO cloudKeys = ucDao.getByAccessKey( accessKey ); @@ -252,7 +254,7 @@ public class ServiceProvider extends ManagerBase { multipartDir = properties.getProperty("storage.multipartDir"); - Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn1 = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); timer.schedule(getHeartbeatTask(), HEARTBEAT_INTERVAL, HEARTBEAT_INTERVAL); txn1.close(); @@ -280,10 +282,9 @@ public class ServiceProvider extends ManagerBase { } private TimerTask getHeartbeatTask() { - return new TimerTask() { - + return new ManagedContextTimerTask() { @Override - public void run() { + protected void runInContext() { try { mhost.setLastHeartbeatTime(DateHelper.currentGMTTime()); mhostDao.updateHeartBeat(mhost); diff --git a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java index 7beb012d4b7..05e87d788db 100644 --- a/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java +++ b/awsapi/src/com/cloud/bridge/service/core/s3/S3Engine.java @@ -86,6 +86,7 @@ import com.cloud.bridge.util.StringHelper; import com.cloud.bridge.util.Triple; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; /** * The CRUD control actions to be invoked from S3BucketAction or S3ObjectAction. @@ -195,7 +196,7 @@ public class S3Engine { String cannedAccessPolicy = request.getCannedAccess(); String bucketName = request.getBucketName(); response.setBucketName( bucketName ); - Transaction txn= null; + TransactionLegacy txn= null; verifyBucketName( bucketName, false ); S3PolicyContext context = new S3PolicyContext( PolicyActions.CreateBucket, bucketName ); @@ -205,7 +206,7 @@ public class S3Engine { OrderedPair shost_storagelocation_pair = null; boolean success = false; try { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); if (bucketDao.getByName(request.getBucketName()) != null) throw new ObjectAlreadyExistsException("Bucket already exists"); @@ -257,10 +258,10 @@ public class S3Engine { String bucketName = request.getBucketName(); SBucketVO sbucket = bucketDao.getByName(bucketName); - Transaction txn = null; + TransactionLegacy txn = null; if ( sbucket != null ) { - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); S3PolicyContext context = new S3PolicyContext( PolicyActions.DeleteBucket, bucketName ); switch( verifyPolicy( context )) @@ -699,7 +700,7 @@ public class S3Engine { if (null != version) httpResp.addHeader("x-amz-version-id", version); httpResp.flushBuffer(); - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); // [C] Re-assemble the object from its uploaded file parts try { // explicit transaction control to avoid holding transaction during @@ -752,11 +753,11 @@ public class S3Engine { S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); InputStream is = null; - Transaction txn = null; + TransactionLegacy txn = null; try { // explicit transaction control to avoid holding transaction during file-copy process - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); is = request.getDataInputStream(); String md5Checksum = bucketAdapter.saveObject(is, host_storagelocation_pair.getSecond(), bucket.getName(), itemFileName); @@ -813,11 +814,11 @@ public class S3Engine { S3BucketAdapter bucketAdapter = getStorageHostBucketAdapter(host_storagelocation_pair.getFirst()); String itemFileName = object_objectitem_pair.getSecond().getStoredPath(); InputStream is = null; - Transaction txn = null; + TransactionLegacy txn = null; try { // explicit transaction control to avoid holding transaction during file-copy process - txn = Transaction.open(Transaction.AWSAPI_DB); + txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); is = request.getInputStream(); @@ -1505,7 +1506,7 @@ public class S3Engine { context.setEvalParam( ConditionKeys.Acl, cannedAccessPolicy); verifyAccess( context, "SBucket", bucket.getId(), SAcl.PERMISSION_WRITE ); // TODO - check this validates plain POSTs - Transaction txn = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn.start(); // [B] If versioning is off them we over write a null object item @@ -1554,7 +1555,7 @@ public class S3Engine { } else { - Transaction txn1 = Transaction.open(Transaction.AWSAPI_DB); + TransactionLegacy txn1 = TransactionLegacy.open(TransactionLegacy.AWSAPI_DB); txn1.start(); // -> there is no object nor an object item object = new SObjectVO(); diff --git a/client/WEB-INF/classes/resources/messages.properties b/client/WEB-INF/classes/resources/messages.properties index bc1e43692a3..12d2a11a294 100644 --- a/client/WEB-INF/classes/resources/messages.properties +++ b/client/WEB-INF/classes/resources/messages.properties @@ -14,6 +14,12 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +label.hypervisors=Hypervisors +label.home=Home +label.sockets=Sockets +label.root.disk.size=Root disk size +label.s3.nfs.server=S3 NFS Server +label.s3.nfs.path=S3 NFS Path label.delete.events=Delete events label.delete.alerts=Delete alerts label.archive.alerts=Archive alerts @@ -39,7 +45,7 @@ message.acquire.ip.nic=Please confirm that you would like to acquire a new secon message.select.affinity.groups=Please select any affinity groups you want this VM to belong to: message.no.affinity.groups=You do not have any affinity groups. Please continue to the next step. label.action.delete.nic=Remove NIC -message.action.delete.nic=Please confirm that want to remove this NIC, which will also remove the associated network from the VM. +message.action.delete.nic=Please confirm that want to remove this NIC, which will also remove the associated network from the VM. changed.item.properties=Changed item properties confirm.enable.s3=Please fill in the following information to enable support for S3-backed Secondary Storage confirm.enable.swift=Please fill in the following information to enable support for Swift @@ -200,6 +206,8 @@ label.action.enable.user.processing=Enabling User.... label.action.enable.user=Enable User label.action.enable.zone.processing=Enabling Zone.... label.action.enable.zone=Enable Zone +label.action.expunge.instance=Expunge Instance +label.action.expunge.instance.processing=Expunging Instance.... label.action.force.reconnect.processing=Reconnecting.... label.action.force.reconnect=Force Reconnect label.action.generate.keys.processing=Generate Keys.... @@ -249,6 +257,8 @@ label.action.stop.systemvm.processing=Stopping System VM.... label.action.stop.systemvm=Stop System VM label.action.take.snapshot.processing=Taking Snapshot.... label.action.take.snapshot=Take Snapshot +label.action.revert.snapshot.processing=Reverting to Snapshot... +label.action.revert.snapshot=Revert to Snapshot label.action.unmanage.cluster.processing=Unmanaging Cluster.... label.action.unmanage.cluster=Unmanage Cluster label.action.update.OS.preference.processing=Updating OS Preference.... @@ -315,6 +325,7 @@ label.add.template=Add Template label.add.to.group=Add to group label.add.user=Add User label.add.vlan=Add VLAN +label.add.vxlan=Add VXLAN label.add.VM.to.tier=Add VM to tier label.add.vm=Add VM label.add.vms.to.lb=Add VM(s) to load balancer rule @@ -412,6 +423,11 @@ label.cluster.type=Cluster Type label.cluster=Cluster label.clusters=Clusters label.clvm=CLVM +label.rbd=RBD +label.rbd.monitor=Ceph monitor +label.rbd.pool=Ceph pool +label.rbd.id=Cephx user +label.rbd.secret=Cephx secret label.code=Code label.community=Community label.compute.and.storage=Compute and Storage @@ -540,6 +556,7 @@ label.end.IP=End IP label.end.port=End Port label.end.reserved.system.IP=End Reserved system IP label.end.vlan=End Vlan +label.end.vxlan=End Vxlan label.endpoint.or.operation=Endpoint or Operation label.endpoint=Endpoint label.enter.token=Enter token @@ -551,6 +568,7 @@ label.ESP.lifetime=ESP Lifetime (second) label.ESP.policy=ESP policy label.esx.host=ESX/ESXi Host label.example=Example +label.expunge=Expunge label.external.link=External link label.f5=F5 label.failed=Failed @@ -799,6 +817,7 @@ label.network.domain.text=Network domain label.network.domain=Network Domain label.network.id=Network ID label.network.label.display.for.blank.value=Use default gateway +label.network.limits=Network limits label.network.name=Network Name label.network.offering.display.text=Network Offering Display Text label.network.offering.id=Network Offering ID @@ -1026,12 +1045,14 @@ label.source.nat=Source NAT label.source=Source label.specify.IP.ranges=Specify IP ranges label.specify.vlan=Specify VLAN +label.specify.vxlan=Specify VXLAN label.SR.name = SR Name-Label label.srx=SRX label.start.IP=Start IP label.start.port=Start Port label.start.reserved.system.IP=Start Reserved system IP label.start.vlan=Start Vlan +label.start.vxlan=Start Vxlan label.state=State label.static.nat.enabled=Static NAT Enabled label.static.nat.to=Static NAT to @@ -1158,6 +1179,9 @@ label.virtual.routers=Virtual Routers label.vlan.id=VLAN ID label.vlan.range=VLAN Range label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=VXLAN Range +label.vxlan=VXLAN label.vm.add=Add Instance label.vm.destroy=Destroy label.vm.display.name=VM display name @@ -1263,6 +1287,7 @@ message.action.enable.nexusVswitch=Please confirm that you want to enable this n message.action.enable.physical.network=Please confirm that you want to enable this physical network. message.action.enable.pod=Please confirm that you want to enable this pod. message.action.enable.zone=Please confirm that you want to enable this zone. +message.action.expunge.instance=Please confirm that you want to expunge this instance. message.action.force.reconnect=Your host has been successfully forced to reconnect. This process can take up to several minutes. message.action.host.enable.maintenance.mode=Enabling maintenance mode will cause a live migration of all running instances on this host to any available host. message.action.instance.reset.password=Please confirm that you want to change the ROOT password for this virtual machine. @@ -1283,6 +1308,7 @@ message.action.stop.instance=Please confirm that you want to stop this instance. message.action.stop.router=All services provided by this virtual router will be interrupted. Please confirm that you want to stop this router. message.action.stop.systemvm=Please confirm that you want to stop this system VM. message.action.take.snapshot=Please confirm that you want to take a snapshot of this volume. +message.action.revert.snapshot=Please confirm that you want to revert the owning volume to this snapshot. message.action.unmanage.cluster=Please confirm that you want to unmanage the cluster. message.action.vmsnapshot.delete=Please confirm that you want to delete this VM snapshot. message.action.vmsnapshot.revert=Revert VM snapshot diff --git a/client/WEB-INF/classes/resources/messages_de_DE.properties b/client/WEB-INF/classes/resources/messages_de_DE.properties index 3c0c8deaabd..2f164609d00 100644 --- a/client/WEB-INF/classes/resources/messages_de_DE.properties +++ b/client/WEB-INF/classes/resources/messages_de_DE.properties @@ -224,6 +224,7 @@ label.add.system.service.offering=System-Service-Angebot hinzuf\u00fcgen label.add.template=Vorlage hinzuf\u00fcgen label.add.user=Benutzer hinzuf\u00fcgen label.add.vlan=VLAN hinzuf\u00fcgen +label.add.vxlan=VXLAN hinzuf\u00fcgen label.add.volume=Volume hinzuf\u00fcgen label.add.zone=Zone hinzuf\u00fcgen label.admin.accounts=Administrator-Konten @@ -621,6 +622,9 @@ label.virtual.network=Virtuelles Netzwerk label.vlan.id=VLAN ID label.vlan.range=VLAN Reichweite label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=VXLAN Reichweite +label.vxlan=VXLAN label.vm.add=Instanz hinzuf\u00fcgen label.vm.destroy=Zerst\u00f6ren label.VMFS.datastore=VMFS Datenspeicher diff --git a/client/WEB-INF/classes/resources/messages_es.properties b/client/WEB-INF/classes/resources/messages_es.properties index 86eb596689c..3620047a275 100644 --- a/client/WEB-INF/classes/resources/messages_es.properties +++ b/client/WEB-INF/classes/resources/messages_es.properties @@ -238,6 +238,7 @@ label.add.template=A\u00c3\u00b1adir plantilla label.add.to.group=Agregar al grupo label.add.user=Agregar usuario label.add.vlan=A\u00c3\u00b1adir VLAN +label.add.vxlan=A\u00c3\u00b1adir VXLAN label.add.volume=A\u00c3\u00b1adir volumen label.add.zone=A\u00c3\u00b1adir Zona label.admin.accounts=Administrador de Cuentas @@ -606,6 +607,7 @@ label.snapshot.s=Instant\u00c3\u00a1nea (s) label.snapshots=instant\u00c3\u00a1neas label.source.nat=NAT Fuente label.specify.vlan=Especifique VLAN +label.specify.vxlan=Especifique VXLAN label.SR.name = SR Nombre de etiqueta label.start.port=Iniciar Puerto label.state=Estado @@ -685,6 +687,9 @@ label.virtual.network=Red Virtual label.vlan.id=ID de VLAN label.vlan.range=VLAN Gama label.vlan=VLAN +label.vxlan.id=ID de VXLAN +label.vxlan.range=VXLAN Gama +label.vxlan=VXLAN label.vm.add=A\u00c3\u00b1adir Instancia label.vm.destroy=Destroy label.VMFS.datastore=VMFS de datos tienda diff --git a/client/WEB-INF/classes/resources/messages_fr_FR.properties b/client/WEB-INF/classes/resources/messages_fr_FR.properties index 284fde89386..db624221ddf 100644 --- a/client/WEB-INF/classes/resources/messages_fr_FR.properties +++ b/client/WEB-INF/classes/resources/messages_fr_FR.properties @@ -300,6 +300,7 @@ label.add.template=Ajouter un mod\u00e8le label.add.to.group=Ajouter au groupe label.add.user=Ajouter un utilisateur label.add.vlan=Ajouter un VLAN +label.add.vxlan=Ajouter un VXLAN label.add.vm=Ajouter VM label.add.vms=Ajouter VMs label.add.vms.to.lb=Ajouter une/des VM(s) \u00e0 la r\u00e8gle de r\u00e9partition de charge @@ -512,6 +513,7 @@ label.endpoint=Terminaison label.end.port=Port de fin label.end.reserved.system.IP=Adresse IP de fin r\u00e9serv\u00e9e Syst\u00e8me label.end.vlan=VLAN de fin +label.end.vxlan=VXLAN de fin label.enter.token=Entrez le jeton unique label.error.code=Code d\\'erreur label.error=Erreur @@ -995,12 +997,14 @@ label.source.nat=NAT Source label.source=Origine label.specify.IP.ranges=Sp\u00e9cifier des plages IP label.specify.vlan=Pr\u00e9ciser le VLAN +label.specify.vxlan=Pr\u00e9ciser le VXLAN label.SR.name = Nom du point de montage label.srx=SRX label.start.IP=Plage de d\u00e9but IP label.start.port=Port de d\u00e9but label.start.reserved.system.IP=Adresse IP de d\u00e9but r\u00e9serv\u00e9e Syst\u00e8me label.start.vlan=VLAN de d\u00e9part +label.start.vxlan=VXLAN de d\u00e9part label.state=\u00c9tat label.static.nat.enabled=NAT statique activ\u00e9 label.static.nat=NAT Statique @@ -1127,6 +1131,9 @@ label.virtual.routers=Routeurs virtuels label.vlan.id=ID du VLAN label.vlan.range=Plage du VLAN label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=Plage du VXLAN +label.vxlan=VXLAN label.vm.add=Ajouter une instance label.vm.destroy=D\u00e9truire label.vm.display.name=Nom commun VM diff --git a/client/WEB-INF/classes/resources/messages_ja.properties b/client/WEB-INF/classes/resources/messages_ja.properties index 56fa55a3e4c..d01efe88ff1 100644 --- a/client/WEB-INF/classes/resources/messages_ja.properties +++ b/client/WEB-INF/classes/resources/messages_ja.properties @@ -1,4 +1,4 @@ -# Licensed to the Apache Software Foundation (ASF) under one +a# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file @@ -313,6 +313,7 @@ label.add.template=\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8\u306e\u8ffd\u52a0 label.add.to.group=\u8ffd\u52a0\u5148\u30b0\u30eb\u30fc\u30d7 label.add.user=\u30e6\u30fc\u30b6\u30fc\u306e\u8ffd\u52a0 label.add.vlan=VLAN \u306e\u8ffd\u52a0 +label.add.vxlan=VXLAN \u306e\u8ffd\u52a0 label.add.VM.to.tier=\u968e\u5c64\u3078\u306e VM \u306e\u8ffd\u52a0 label.add.vm=VM \u306e\u8ffd\u52a0 label.add.vms.to.lb=\u8ca0\u8377\u5206\u6563\u898f\u5247\u3078\u306e VM \u306e\u8ffd\u52a0 @@ -532,6 +533,7 @@ label.end.IP=\u7d42\u4e86 IP \u30a2\u30c9\u30ec\u30b9 label.end.port=\u7d42\u4e86\u30dd\u30fc\u30c8 label.end.reserved.system.IP=\u4e88\u7d04\u6e08\u307f\u7d42\u4e86\u30b7\u30b9\u30c6\u30e0 IP \u30a2\u30c9\u30ec\u30b9 label.end.vlan=\u7d42\u4e86 VLAN +label.end.vxlan=\u7d42\u4e86 VXLAN label.endpoint.or.operation=\u30a8\u30f3\u30c9\u30dd\u30a4\u30f3\u30c8\u307e\u305f\u306f\u64cd\u4f5c label.endpoint=\u30a8\u30f3\u30c9\u30dd\u30a4\u30f3\u30c8 label.enter.token=\u30c8\u30fc\u30af\u30f3\u306e\u5165\u529b @@ -1007,12 +1009,14 @@ label.source.nat=\u9001\u4fe1\u5143 NAT label.source=\u9001\u4fe1\u5143 label.specify.IP.ranges=IP \u30a2\u30c9\u30ec\u30b9\u306e\u7bc4\u56f2\u306e\u6307\u5b9a label.specify.vlan=VLAN \u3092\u6307\u5b9a\u3059\u308b +label.specify.vxlan=VXLAN \u3092\u6307\u5b9a\u3059\u308b label.SR.name = SR \u540d\u30e9\u30d9\u30eb label.srx=SRX label.start.IP=\u958b\u59cb IP \u30a2\u30c9\u30ec\u30b9 label.start.port=\u958b\u59cb\u30dd\u30fc\u30c8 label.start.reserved.system.IP=\u4e88\u7d04\u6e08\u307f\u958b\u59cb\u30b7\u30b9\u30c6\u30e0 IP \u30a2\u30c9\u30ec\u30b9 label.start.vlan=\u958b\u59cb VLAN +label.start.vxlan=\u958b\u59cb VXLAN label.state=\u72b6\u614b label.static.nat.enabled=\u9759\u7684 NAT \u6709\u52b9 label.static.nat.to=\u9759\u7684 NAT \u306e\u8a2d\u5b9a\u5148: @@ -1139,6 +1143,9 @@ label.virtual.routers=\u4eee\u60f3\u30eb\u30fc\u30bf\u30fc label.vlan.id=VLAN ID label.vlan.range=VLAN \u306e\u7bc4\u56f2 label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=VXLAN \u306e\u7bc4\u56f2 +label.vxlan=VXLAN label.vm.add=\u30a4\u30f3\u30b9\u30bf\u30f3\u30b9\u306e\u8ffd\u52a0 label.vm.destroy=\u7834\u68c4 label.vm.display.name=VM \u8868\u793a\u540d diff --git a/client/WEB-INF/classes/resources/messages_ko_KR.properties b/client/WEB-INF/classes/resources/messages_ko_KR.properties index 7f3d5ebae75..b755072d613 100644 --- a/client/WEB-INF/classes/resources/messages_ko_KR.properties +++ b/client/WEB-INF/classes/resources/messages_ko_KR.properties @@ -289,6 +289,7 @@ label.add.to.group=\uadf8\ub8f9\uc5d0 \ucd94\uac00 label.add=\ucd94\uac00 label.add.user=\uc0ac\uc6a9\uc790 \ucd94\uac00 label.add.vlan=VLAN \ucd94\uac00 +label.add.vxlan=VXLAN \ucd94\uac00 label.add.vms.to.lb=\ub124\ud2b8\uc6cc\ud06c \ub85c\ub4dc \uacf5\uc720 \uaddc\uce59\uc5d0 VM \ucd94\uac00 label.add.vms=VM \ucd94\uac00 label.add.VM.to.tier=\uacc4\uce35\uc5d0 VM \ucd94\uac00 @@ -479,6 +480,7 @@ label.endpoint.or.operation=\uc5d4\ub4dc \ud3ec\uc778\ud2b8 \ub610\ub294 \uc791\ label.end.port=\uc885\ub8cc \ud3ec\ud1a0 label.end.reserved.system.IP=\uc608\uc57d\ub41c \uc885\ub8cc \uc2dc\uc2a4\ud15c IP \uc8fc\uc18c label.end.vlan=\uc885\ub8cc VLAN +label.end.vxlan=\uc885\ub8cc VXLAN label.enter.token=\ud1a0\ud070 \uc785\ub825 label.error.code=\uc624\ub958 \ucf54\ub4dc label.error=\uc624\ub958 @@ -925,12 +927,14 @@ label.source.nat=\uc804\uc1a1\uc6d0 NAT label.source=\uc2dc\uc791 \uc704\uce58 label.specify.IP.ranges=IP \uc8fc\uc18c \ubc94\uc704 \uc9c0\uc815 label.specify.vlan=VLAN \uc9c0\uc815 +label.specify.vxlan=VXLAN \uc9c0\uc815 label.SR.name = SR \uba85 \ub77c\ubca8 label.srx=SRX label.start.IP=\uc2dc\uc791 IP \uc8fc\uc18c label.start.port=\uc2dc\uc791 \ud3ec\ud1a0 label.start.reserved.system.IP=\uc608\uc57d\ub41c \uc2dc\uc791 \uc2dc\uc2a4\ud15c IP \uc8fc\uc18c label.start.vlan=\uc2dc\uc791 VLAN +label.start.vxlan=\uc2dc\uc791 VXLAN label.state=\uc0c1\ud0dc label.static.nat.enabled=\uc815\uc801 NAT \uc720\ud6a8 label.static.nat.to=\uc815\uc801 NAT \uc124\uc815 \uc704\uce58\: @@ -1055,6 +1059,9 @@ label.virtual.router=\uac00\uc0c1 \ub77c\uc6b0\ud130 label.vlan.id=VLAN ID label.vlan.range=VLAN \ubc94\uc704 label.vlan=\uac00\uc0c1 \ub124\ud2b8\uc6cc\ud06c(VLAN) +label.vxlan.id=VXLAN ID +label.vxlan.range=VXLAN \ubc94\uc704 +label.vxlan=VXLAN label.vm.add=\uc778\uc2a4\ud134\uc2a4 \ucd94\uac00 label.vm.destroy=\ud30c\uae30 label.vm.display.name=VM \ud45c\uc2dc\uba85 diff --git a/client/WEB-INF/classes/resources/messages_pt_BR.properties b/client/WEB-INF/classes/resources/messages_pt_BR.properties index 9f7a663657f..86bb83177a8 100644 --- a/client/WEB-INF/classes/resources/messages_pt_BR.properties +++ b/client/WEB-INF/classes/resources/messages_pt_BR.properties @@ -288,6 +288,7 @@ label.add.template=Adicionar Template label.add.to.group=Adicionar ao grupo label.add.user=Adicionar Usu\u00e1rio label.add.vlan=Adicionar VLAN +label.add.vxlan=Adicionar VXLAN label.add.vm=Adicionar VM label.add.vms=Adicionar VMs label.add.vms.to.lb=Add VM(s) na regra de balanceamento de carga @@ -480,6 +481,7 @@ label.endpoint=Ponto de acesso label.end.port=Porta Final label.end.reserved.system.IP=Fim dos IPs reservados para o sistema label.end.vlan=Vlan do fim +label.end.vxlan=Vxlan do fim label.enter.token=Digite o token label.error.code=C\u00f3digo de Erro label.error=Erro @@ -931,12 +933,14 @@ label.source.nat=Source NAT label.source=Origem label.specify.IP.ranges=Especifique range de IP label.specify.vlan=Especificar VLAN +label.specify.vxlan=Especificar VXLAN label.SR.name = SR Name-Label label.srx=SRX label.start.IP=IP do in\u00edcio label.start.port=Porta de In\u00edcio label.start.reserved.system.IP=In\u00edcio dos IPs reservados para o sistema label.start.vlan=Vlan do in\u00edcio +label.start.vxlan=Vxlan do in\u00edcio label.state=Estado label.static.nat.enabled=NAT est\u00e1tico Habilitado label.static.nat=NAT Est\u00e1tico @@ -1059,6 +1063,9 @@ label.virtual.routers=Roteadores Virtuais label.vlan.id=VLAN ID label.vlan.range=Intervalo de VLAN label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=Intervalo de VXLAN +label.vxlan=VXLAN label.vm.add=Adicionar Cloud Server label.vm.destroy=Apagar label.vm.display.name=Nome de exibi\u00e7\u00e3o da VM diff --git a/client/WEB-INF/classes/resources/messages_ru_RU.properties b/client/WEB-INF/classes/resources/messages_ru_RU.properties index 37a36a9b022..62c791f61b9 100644 --- a/client/WEB-INF/classes/resources/messages_ru_RU.properties +++ b/client/WEB-INF/classes/resources/messages_ru_RU.properties @@ -283,6 +283,7 @@ label.add.to.group=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0432 \u043 label.add=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c label.add.user=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044f label.add.vlan=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c VLAN +label.add.vxlan=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c VXLAN label.add.vms.to.lb=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0412\u041c \u0432 \u043f\u0440\u0430\u0432\u0438\u043b\u043e \u0431\u0430\u043b\u0430\u043d\u0441\u0438\u0440\u043e\u0432\u043a\u0438 \u043d\u0430\u0433\u0440\u0443\u0437\u043a\u0438 label.add.vms=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0412\u041c label.add.vm=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u0412\u041c @@ -454,6 +455,7 @@ label.endpoint.or.operation=\u041a\u043e\u043d\u0435\u0447\u043d\u0430\u044f \u0 label.end.port=\u041a\u043e\u043d\u0435\u0447\u043d\u044b\u0439 \u043f\u043e\u0440\u0442 label.end.reserved.system.IP=\u041a\u043e\u043d\u0435\u0447\u043d\u044b\u0439 \u0437\u0430\u0440\u0435\u0437\u0435\u0440\u0432\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0439 \u0441\u0438\u0441\u0442\u0435\u043c\u043d\u044b\u0439 IP-\u0430\u0434\u0440\u0435\u0441 label.end.vlan=\u041a\u043e\u043d\u0435\u0447\u043d\u044b\u0439 VLAN +label.end.vxlan=\u041a\u043e\u043d\u0435\u0447\u043d\u044b\u0439 VXLAN label.enter.token=\u0412\u0432\u0435\u0434\u0438\u0442\u0435 \u0442\u0430\u043b\u043e\u043d label.error.code=\u041a\u043e\u0434 \u043e\u0448\u0438\u0431\u043a\u0438 label.error=\u041e\u0448\u0438\u0431\u043a\u0430 @@ -874,12 +876,14 @@ label.source.nat=Source NAT label.source=\u0418\u0441\u0442\u043e\u0447\u043d\u0438\u043a label.specify.IP.ranges=\u0423\u043a\u0430\u0436\u0438\u0442\u0435 \u0434\u0438\u0430\u043f\u0430\u0437\u043e\u043d IP-\u0430\u0434\u0440\u0435\u0441\u043e\u0432 label.specify.vlan=\u0423\u043a\u0430\u0436\u0438\u0442\u0435 VLAN +label.specify.vxlan=\u0423\u043a\u0430\u0436\u0438\u0442\u0435 VXLAN label.SR.name = SR Name-Label label.srx=SRX label.start.IP=\u041d\u0430\u0447\u0430\u043b\u044c\u043d\u044b\u0439 IP label.start.port=\u041d\u0430\u0447\u0430\u043b\u044c\u043d\u044b\u0439 \u043f\u043e\u0440\u0442 label.start.reserved.system.IP=\u041d\u0430\u0447\u0430\u043b\u044c\u043d\u044b\u0439 \u0437\u0430\u0440\u0435\u0437\u0435\u0440\u0432\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0439 \u0441\u0438\u0441\u0442\u0435\u043c\u043d\u044b\u0439 IP-\u0430\u0434\u0440\u0435\u0441 label.start.vlan=\u041d\u0430\u0447\u0430\u043b\u044c\u043d\u044b\u0439 VLAN +label.start.vxlan=\u041d\u0430\u0447\u0430\u043b\u044c\u043d\u044b\u0439 VXLAN label.state=\u0421\u043e\u0441\u0442\u043e\u044f\u043d\u0438\u0435 label.static.nat.enabled=\u0421\u0442\u0430\u0442\u0438\u0447\u0435\u0441\u043a\u0438\u0439 NAT \u0432\u043a\u043b\u044e\u0447\u0435\u043d label.static.nat.to=\u0421\u0442\u0430\u0442\u0438\u0447\u043d\u044b\u0439 NAT \u043a @@ -1001,6 +1005,9 @@ label.virtual.router=\u0412\u0438\u0440\u0442\u0443\u0430\u043b\u044c\u043d\u044 label.vlan.id=ID VLAN label.vlan.range=\u0414\u0438\u0430\u043f\u0430\u0437\u043e\u043d VLAN label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=\u0414\u0438\u0430\u043f\u0430\u0437\u043e\u043d Range +label.vxlan=VXLAN label.vm.add=\u0414\u043e\u0431\u0430\u0432\u0438\u0442\u044c \u043c\u0430\u0448\u0438\u043d\u044b label.vm.destroy=\u0423\u043d\u0438\u0447\u0442\u043e\u0436\u0438\u0442\u044c label.vm.display.name=\u041e\u0442\u043e\u0431\u0440\u0430\u0436\u0430\u0435\u043c\u043e\u0435 \u0438\u043c\u044f \u0412\u041c diff --git a/client/WEB-INF/classes/resources/messages_zh_CN.properties b/client/WEB-INF/classes/resources/messages_zh_CN.properties index 6ab251faaed..acb67bb51f7 100644 --- a/client/WEB-INF/classes/resources/messages_zh_CN.properties +++ b/client/WEB-INF/classes/resources/messages_zh_CN.properties @@ -314,6 +314,7 @@ label.add.template=\u6dfb\u52a0\u6a21\u677f label.add.to.group=\u6dfb\u52a0\u5230\u7ec4 label.add.user=\u6dfb\u52a0\u7528\u6237 label.add.vlan=\u6dfb\u52a0 VLAN +label.add.vxlan=\u6dfb\u52a0 VXLAN label.add.VM.to.tier=\u5411\u5c42\u4e2d\u6dfb\u52a0 VM label.add.vm=\u6dfb\u52a0 VM label.add.vms.to.lb=\u5411\u8d1f\u8f7d\u5e73\u8861\u5668\u89c4\u5219\u4e2d\u6dfb\u52a0 VM @@ -539,6 +540,7 @@ label.end.IP=\u7ed3\u675f IP label.end.port=\u7ed3\u675f\u7aef\u53e3 label.end.reserved.system.IP=\u7ed3\u675f\u9884\u7559\u7cfb\u7edf IP label.end.vlan=\u7ed3\u675f VLAN +label.end.vxlan=\u7ed3\u675f VXLAN label.endpoint.or.operation=\u7aef\u70b9\u6216\u64cd\u4f5c label.endpoint=\u7aef\u70b9 label.enter.token=\u8f93\u5165\u4ee4\u724c @@ -1025,12 +1027,14 @@ label.source.nat=\u6e90 NAT label.source=\u6e90\u7b97\u6cd5 label.specify.IP.ranges=\u6307\u5b9a IP \u8303\u56f4 label.specify.vlan=\u6307\u5b9a VLAN +label.specify.vxlan=\u6307\u5b9a VXLAN label.SR.name = SR \u540d\u79f0\u6807\u7b7e label.srx=SRX label.start.IP=\u8d77\u59cb IP label.start.port=\u8d77\u59cb\u7aef\u53e3 label.start.reserved.system.IP=\u8d77\u59cb\u9884\u7559\u7cfb\u7edf IP label.start.vlan=\u8d77\u59cb VLAN +label.start.vxlan=\u8d77\u59cb VXLAN label.state=\u72b6\u6001 label.static.nat.enabled=\u5df2\u542f\u7528\u9759\u6001 NAT label.static.nat.to=\u9759\u6001 NAT \u76ee\u6807 @@ -1157,6 +1161,9 @@ label.virtual.routers=\u865a\u62df\u8def\u7531\u5668 label.vlan.id=VLAN ID label.vlan.range=VLAN \u8303\u56f4 label.vlan=VLAN +label.vxlan.id=VXLAN ID +label.vxlan.range=VXLAN Range +label.vxlan=VXLAN label.vm.add=\u6dfb\u52a0\u5b9e\u4f8b label.vm.destroy=\u9500\u6bc1 label.vm.display.name=VM \u663e\u793a\u540d\u79f0 diff --git a/client/WEB-INF/web.xml b/client/WEB-INF/web.xml index e5c05d3fd20..1af38e14535 100644 --- a/client/WEB-INF/web.xml +++ b/client/WEB-INF/web.xml @@ -29,11 +29,11 @@ - org.springframework.web.context.ContextLoaderListener + org.apache.cloudstack.spring.module.web.CloudStackContextLoaderListener - + contextConfigLocation - classpath:applicationContext.xml, classpath:componentContext.xml + classpath:META-INF/cloudstack/webApplicationContext.xml diff --git a/client/pom.xml b/client/pom.xml index 99a3c3e1e92..8cbdaffe94f 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -7,7 +7,8 @@ the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> - 4.0.0 cloud-client-ui @@ -19,6 +20,16 @@ 4.3.0-SNAPSHOT + + org.apache.cloudstack + cloud-framework-spring-module + ${project.version} + + + org.apache.cloudstack + cloud-framework-spring-lifecycle + ${project.version} + org.apache.cloudstack cloud-plugin-storage-volume-solidfire @@ -109,6 +120,11 @@ cloud-plugin-network-internallb ${project.version} + + org.apache.cloudstack + cloud-plugin-network-vxlan + ${project.version} + org.apache.cloudstack cloud-plugin-hypervisor-xen @@ -280,11 +296,6 @@ cloud-plugin-host-anti-affinity ${project.version} - - org.apache.cloudstack - cloud-console-proxy - ${project.version} - @@ -353,27 +364,7 @@ maven-antrun-plugin - 1.7 - - - copy-systemvm - process-resources - - run - - - - - - - - - - - - generate-resource generate-resources @@ -382,95 +373,93 @@ - - + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -483,77 +472,42 @@ - process-nonoss + process-noredist process-resources run - + test + replace="cloud-stack-components-specification=components-nonoss.xml" byline="true" + /> - process-simulator-context + process-noredist-spring-context process-resources run - - test - - - - - - process-nonoss-spring-context - process-resources - - run - - - - + + - - + + - - process-quickcloud-spring-context - process-resources - - run - - - - quickcloud - - - - @@ -602,19 +556,15 @@ - - org.apache.maven.plugins - - - maven-antrun-plugin - + org.apache.maven.plugins + maven-antrun-plugin [1.7,) run - + @@ -625,6 +575,51 @@ + + systemvm + + + systemvm + + + + + org.apache.cloudstack + cloud-systemvm + ${project.version} + pom + + + + + + maven-antrun-plugin + 1.7 + + + + copy-systemvm + process-resources + + run + + + + + + + + + + + + + + + + + simulator @@ -644,7 +639,7 @@ netapp - nonoss + noredist @@ -659,7 +654,7 @@ f5 - nonoss + noredist @@ -674,7 +669,7 @@ netscaler - nonoss + noredist @@ -689,7 +684,7 @@ srx - nonoss + noredist @@ -704,7 +699,7 @@ vmware - nonoss + noredist @@ -725,5 +720,20 @@ + + quickcloud + + + quickcloud + + + + + org.apache.cloudstack + cloud-quickcloud + ${project.version} + + + diff --git a/client/resources/META-INF/cloudstack/webApplicationContext.xml b/client/resources/META-INF/cloudstack/webApplicationContext.xml new file mode 100644 index 00000000000..fea2709747b --- /dev/null +++ b/client/resources/META-INF/cloudstack/webApplicationContext.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/client/tomcatconf/commands.properties.in b/client/tomcatconf/commands.properties.in index 9bb0ea25a55..428042a5137 100644 --- a/client/tomcatconf/commands.properties.in +++ b/client/tomcatconf/commands.properties.in @@ -71,6 +71,7 @@ assignVirtualMachine=7 migrateVirtualMachine=1 migrateVirtualMachineWithVolume=1 recoverVirtualMachine=7 +expungeVirtualMachine=1 #### snapshot commands createSnapshot=15 @@ -79,7 +80,7 @@ deleteSnapshot=15 createSnapshotPolicy=15 deleteSnapshotPolicies=15 listSnapshotPolicies=15 - +revertSnapshot=15 #### template commands createTemplate=15 @@ -255,6 +256,7 @@ deleteImageStore=1 createSecondaryStagingStore=1 listSecondaryStagingStores=1 deleteSecondaryStagingStore=1 +prepareSecondaryStorageForMigration=1 #### host commands addHost=3 @@ -471,7 +473,7 @@ listTags=15 #### Meta Data commands addResourceDetail=1 removeResourceDetail=1 -listResourceDetails=1 +listResourceDetails=15 ### Site-to-site VPN commands createVpnCustomerGateway=15 @@ -493,7 +495,7 @@ listVirtualRouterElements=7 #### usage commands generateUsageRecords=1 -listUsageRecords=1 +listUsageRecords=7 listUsageTypes=1 #### traffic monitor commands @@ -678,6 +680,7 @@ addLdapConfiguration=3 deleteLdapConfiguration=3 listLdapUsers=3 ldapCreateAccount=3 +importLdapUsers=3 ### Acl commands createAclRole=7 @@ -695,3 +698,5 @@ removeAccountFromAclGroup=7 grantPermissionToAclGroup=7 revokePermissionFromAclGroup=7 + + diff --git a/client/tomcatconf/log4j-cloud.xml.in b/client/tomcatconf/log4j-cloud.xml.in index d439b771f4f..08021f2077b 100755 --- a/client/tomcatconf/log4j-cloud.xml.in +++ b/client/tomcatconf/log4j-cloud.xml.in @@ -152,6 +152,14 @@ under the License. + + + + + + + + diff --git a/client/tomcatconf/tomcat6-nonssl.conf.in b/client/tomcatconf/tomcat6-nonssl.conf.in index 4a9a70f619e..5ce724c73b7 100644 --- a/client/tomcatconf/tomcat6-nonssl.conf.in +++ b/client/tomcatconf/tomcat6-nonssl.conf.in @@ -41,7 +41,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=512M -XX:MaxPermSize=800m" +JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:PermSize=512M -XX:MaxPermSize=800m" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/client/tomcatconf/tomcat6-ssl.conf.in b/client/tomcatconf/tomcat6-ssl.conf.in index 0d2650871b6..c967a98be98 100644 --- a/client/tomcatconf/tomcat6-ssl.conf.in +++ b/client/tomcatconf/tomcat6-ssl.conf.in @@ -40,7 +40,7 @@ CATALINA_TMPDIR="@MSENVIRON@/temp" # Use JAVA_OPTS to set java.library.path for libtcnative.so #JAVA_OPTS="-Djava.library.path=/usr/lib64" -JAVA_OPTS="-Djava.awt.headless=true -Djavax.net.ssl.trustStore=/etc/cloudstack/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Dcom.sun.management.jmxremote.port=45219 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=800m -XX:PermSize=512M" +JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Djavax.net.ssl.trustStore=/etc/cloudstack/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=@MSLOGDIR@ -XX:MaxPermSize=800m -XX:PermSize=512M" # What user should run tomcat TOMCAT_USER="@MSUSER@" diff --git a/docs/qig/publican.cfg b/core/resources/META-INF/cloudstack/allocator/module.properties similarity index 75% rename from docs/qig/publican.cfg rename to core/resources/META-INF/cloudstack/allocator/module.properties index 52d434c3775..7866be06f30 100644 --- a/docs/qig/publican.cfg +++ b/core/resources/META-INF/cloudstack/allocator/module.properties @@ -1,13 +1,12 @@ -# Config::Simple 4.59 -# Fri May 25 12:50:59 2012 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# +# distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -15,8 +14,5 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - -xml_lang: "en-US" -type: Book -brand: cloudstack -docname: qig +name=allocator +parent=core diff --git a/core/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml b/core/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml new file mode 100644 index 00000000000..65ebc704400 --- /dev/null +++ b/core/resources/META-INF/cloudstack/allocator/spring-core-allocator-context.xml @@ -0,0 +1,32 @@ + + + + + + \ No newline at end of file diff --git a/core/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml b/core/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml new file mode 100644 index 00000000000..ad00de8be2c --- /dev/null +++ b/core/resources/META-INF/cloudstack/allocator/spring-core-lifecycle-allocator-context-inheritable.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core/resources/META-INF/cloudstack/api/module.properties b/core/resources/META-INF/cloudstack/api/module.properties new file mode 100644 index 00000000000..cc66a099a6c --- /dev/null +++ b/core/resources/META-INF/cloudstack/api/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=api +parent=core diff --git a/core/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml b/core/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml new file mode 100644 index 00000000000..b0ed228c0da --- /dev/null +++ b/core/resources/META-INF/cloudstack/api/spring-core-lifecycle-api-context-inheritable.xml @@ -0,0 +1,53 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/backend/module.properties b/core/resources/META-INF/cloudstack/backend/module.properties new file mode 100644 index 00000000000..ab18ad18837 --- /dev/null +++ b/core/resources/META-INF/cloudstack/backend/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=backend +parent=core diff --git a/core/resources/META-INF/cloudstack/bootstrap/module.properties b/core/resources/META-INF/cloudstack/bootstrap/module.properties new file mode 100644 index 00000000000..716bd002d47 --- /dev/null +++ b/core/resources/META-INF/cloudstack/bootstrap/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=bootstrap diff --git a/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context-inheritable.xml b/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context-inheritable.xml new file mode 100644 index 00000000000..adee3ed28e0 --- /dev/null +++ b/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context-inheritable.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context.xml b/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context.xml new file mode 100644 index 00000000000..40fcc71c14e --- /dev/null +++ b/core/resources/META-INF/cloudstack/bootstrap/spring-bootstrap-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/core/resources/META-INF/cloudstack/compute/module.properties b/core/resources/META-INF/cloudstack/compute/module.properties new file mode 100644 index 00000000000..0a12aae7c19 --- /dev/null +++ b/core/resources/META-INF/cloudstack/compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=compute +parent=backend diff --git a/core/resources/META-INF/cloudstack/compute/spring-core-lifecycle-compute-context-inheritable.xml b/core/resources/META-INF/cloudstack/compute/spring-core-lifecycle-compute-context-inheritable.xml new file mode 100644 index 00000000000..b57f52fc2ef --- /dev/null +++ b/core/resources/META-INF/cloudstack/compute/spring-core-lifecycle-compute-context-inheritable.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/core/module.properties b/core/resources/META-INF/cloudstack/core/module.properties new file mode 100644 index 00000000000..fd5ecb7bf15 --- /dev/null +++ b/core/resources/META-INF/cloudstack/core/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=core +parent=system diff --git a/core/resources/META-INF/cloudstack/core/spring-core-context.xml b/core/resources/META-INF/cloudstack/core/spring-core-context.xml new file mode 100644 index 00000000000..6cd00a40103 --- /dev/null +++ b/core/resources/META-INF/cloudstack/core/spring-core-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/core/spring-core-lifecycle-core-context-inheritable.xml b/core/resources/META-INF/cloudstack/core/spring-core-lifecycle-core-context-inheritable.xml new file mode 100644 index 00000000000..06b9f5e0748 --- /dev/null +++ b/core/resources/META-INF/cloudstack/core/spring-core-lifecycle-core-context-inheritable.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml b/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml new file mode 100644 index 00000000000..c2467b1a850 --- /dev/null +++ b/core/resources/META-INF/cloudstack/core/spring-core-registry-core-context.xml @@ -0,0 +1,273 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/discoverer/module.properties b/core/resources/META-INF/cloudstack/discoverer/module.properties new file mode 100644 index 00000000000..e511fb5e37d --- /dev/null +++ b/core/resources/META-INF/cloudstack/discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=discoverer +parent=core diff --git a/core/resources/META-INF/cloudstack/discoverer/spring-core-lifecycle-discoverer-context-inheritable.xml b/core/resources/META-INF/cloudstack/discoverer/spring-core-lifecycle-discoverer-context-inheritable.xml new file mode 100644 index 00000000000..2c83a104b32 --- /dev/null +++ b/core/resources/META-INF/cloudstack/discoverer/spring-core-lifecycle-discoverer-context-inheritable.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/network/module.properties b/core/resources/META-INF/cloudstack/network/module.properties new file mode 100644 index 00000000000..1a15fb01131 --- /dev/null +++ b/core/resources/META-INF/cloudstack/network/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=network +parent=backend diff --git a/core/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml b/core/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml new file mode 100644 index 00000000000..3388ca41284 --- /dev/null +++ b/core/resources/META-INF/cloudstack/network/spring-core-lifecycle-network-context-inheritable.xml @@ -0,0 +1,94 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/core/resources/META-INF/cloudstack/planner/module.properties b/core/resources/META-INF/cloudstack/planner/module.properties new file mode 100644 index 00000000000..96359fbe6e3 --- /dev/null +++ b/core/resources/META-INF/cloudstack/planner/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=planner +parent=allocator \ No newline at end of file diff --git a/core/resources/META-INF/cloudstack/planner/spring-core-lifecycle-planner-context-inheritable.xml b/core/resources/META-INF/cloudstack/planner/spring-core-lifecycle-planner-context-inheritable.xml new file mode 100644 index 00000000000..715f86d9c28 --- /dev/null +++ b/core/resources/META-INF/cloudstack/planner/spring-core-lifecycle-planner-context-inheritable.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/storage/module.properties b/core/resources/META-INF/cloudstack/storage/module.properties new file mode 100644 index 00000000000..564e85e116e --- /dev/null +++ b/core/resources/META-INF/cloudstack/storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage +parent=backend diff --git a/core/resources/META-INF/cloudstack/storage/spring-lifecycle-storage-context-inheritable.xml b/core/resources/META-INF/cloudstack/storage/spring-lifecycle-storage-context-inheritable.xml new file mode 100644 index 00000000000..ad78cad8edc --- /dev/null +++ b/core/resources/META-INF/cloudstack/storage/spring-lifecycle-storage-context-inheritable.xml @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/system/module.properties b/core/resources/META-INF/cloudstack/system/module.properties new file mode 100644 index 00000000000..0b07ebeb478 --- /dev/null +++ b/core/resources/META-INF/cloudstack/system/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=system +parent=bootstrap diff --git a/core/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml b/core/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml new file mode 100644 index 00000000000..80c5da744bb --- /dev/null +++ b/core/resources/META-INF/cloudstack/system/spring-core-system-context-inheritable.xml @@ -0,0 +1,54 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/core/resources/META-INF/cloudstack/system/spring-core-system-context.xml b/core/resources/META-INF/cloudstack/system/spring-core-system-context.xml new file mode 100644 index 00000000000..c2d540ca102 --- /dev/null +++ b/core/resources/META-INF/cloudstack/system/spring-core-system-context.xml @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + diff --git a/core/src/com/cloud/agent/api/AttachVolumeCommand.java b/core/src/com/cloud/agent/api/AttachVolumeCommand.java index 49b2a706b4b..e9276198dbf 100644 --- a/core/src/com/cloud/agent/api/AttachVolumeCommand.java +++ b/core/src/com/cloud/agent/api/AttachVolumeCommand.java @@ -25,6 +25,7 @@ public class AttachVolumeCommand extends Command { private StoragePoolType pooltype; private String volumePath; private String volumeName; + private Long volumeSize; private Long deviceId; private String chainInfo; private String poolUuid; @@ -45,13 +46,14 @@ public class AttachVolumeCommand extends Command { public AttachVolumeCommand(boolean attach, boolean managed, String vmName, StoragePoolType pooltype, String volumePath, String volumeName, - Long deviceId, String chainInfo) { + Long volumeSize, Long deviceId, String chainInfo) { this.attach = attach; this._managed = managed; this.vmName = vmName; this.pooltype = pooltype; this.volumePath = volumePath; this.volumeName = volumeName; + this.volumeSize = volumeSize; this.deviceId = deviceId; this.chainInfo = chainInfo; } @@ -85,6 +87,10 @@ public class AttachVolumeCommand extends Command { return volumeName; } + public Long getVolumeSize() { + return volumeSize; + } + public Long getDeviceId() { return deviceId; } diff --git a/core/src/com/cloud/agent/api/ClusterSyncAnswer.java b/core/src/com/cloud/agent/api/ClusterSyncAnswer.java index 99fee2a9dd1..e5ea1f15aca 100644 --- a/core/src/com/cloud/agent/api/ClusterSyncAnswer.java +++ b/core/src/com/cloud/agent/api/ClusterSyncAnswer.java @@ -18,12 +18,12 @@ package com.cloud.agent.api; import java.util.HashMap; -import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.vm.VirtualMachine.State; public class ClusterSyncAnswer extends Answer { private long _clusterId; - private HashMap> _newStates; + private HashMap> _newStates; private boolean _isExecuted=false; // this is here because a cron command answer is being sent twice @@ -38,7 +38,7 @@ public class ClusterSyncAnswer extends Answer { } - public ClusterSyncAnswer(long clusterId, HashMap> newStates){ + public ClusterSyncAnswer(long clusterId, HashMap> newStates){ _clusterId = clusterId; _newStates = newStates; result = true; @@ -48,7 +48,7 @@ public class ClusterSyncAnswer extends Answer { return _clusterId; } - public HashMap> getNewStates() { + public HashMap> getNewStates() { return _newStates; } diff --git a/core/src/com/cloud/agent/api/CreateVMSnapshotAnswer.java b/core/src/com/cloud/agent/api/CreateVMSnapshotAnswer.java index f9fb1642b3f..8b8e69e9c38 100644 --- a/core/src/com/cloud/agent/api/CreateVMSnapshotAnswer.java +++ b/core/src/com/cloud/agent/api/CreateVMSnapshotAnswer.java @@ -17,21 +17,21 @@ package com.cloud.agent.api; -import java.util.List; +import org.apache.cloudstack.storage.to.VolumeObjectTO; -import com.cloud.agent.api.to.VolumeTO; +import java.util.List; public class CreateVMSnapshotAnswer extends Answer { - private List volumeTOs; + private List volumeTOs; private VMSnapshotTO vmSnapshotTo; - public List getVolumeTOs() { + public List getVolumeTOs() { return volumeTOs; } - public void setVolumeTOs(List volumeTOs) { + public void setVolumeTOs(List volumeTOs) { this.volumeTOs = volumeTOs; } @@ -53,7 +53,7 @@ public class CreateVMSnapshotAnswer extends Answer { } public CreateVMSnapshotAnswer(CreateVMSnapshotCommand cmd, - VMSnapshotTO vmSnapshotTo, List volumeTOs) { + VMSnapshotTO vmSnapshotTo, List volumeTOs) { super(cmd, true, ""); this.vmSnapshotTo = vmSnapshotTo; this.volumeTOs = volumeTOs; diff --git a/core/src/com/cloud/agent/api/CreateVMSnapshotCommand.java b/core/src/com/cloud/agent/api/CreateVMSnapshotCommand.java index 478987d993b..bfbc21d1c2b 100644 --- a/core/src/com/cloud/agent/api/CreateVMSnapshotCommand.java +++ b/core/src/com/cloud/agent/api/CreateVMSnapshotCommand.java @@ -18,12 +18,14 @@ package com.cloud.agent.api; import java.util.List; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.storage.to.VolumeObjectTO; public class CreateVMSnapshotCommand extends VMSnapshotBaseCommand { - public CreateVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType, VirtualMachine.State vmState) { + public CreateVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType, VirtualMachine.State vmState) { super(vmName, snapshot, volumeTOs, guestOSType); this.vmState = vmState; } diff --git a/core/src/com/cloud/agent/api/DeleteVMSnapshotAnswer.java b/core/src/com/cloud/agent/api/DeleteVMSnapshotAnswer.java index 8f4ecad3d80..d6ae95cb89d 100644 --- a/core/src/com/cloud/agent/api/DeleteVMSnapshotAnswer.java +++ b/core/src/com/cloud/agent/api/DeleteVMSnapshotAnswer.java @@ -16,12 +16,12 @@ // under the License. package com.cloud.agent.api; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + import java.util.List; -import com.cloud.agent.api.to.VolumeTO; - public class DeleteVMSnapshotAnswer extends Answer { - private List volumeTOs; + private List volumeTOs; public DeleteVMSnapshotAnswer() { } @@ -32,16 +32,16 @@ public class DeleteVMSnapshotAnswer extends Answer { } public DeleteVMSnapshotAnswer(DeleteVMSnapshotCommand cmd, - List volumeTOs) { + List volumeTOs) { super(cmd, true, ""); this.volumeTOs = volumeTOs; } - public List getVolumeTOs() { + public List getVolumeTOs() { return volumeTOs; } - public void setVolumeTOs(List volumeTOs) { + public void setVolumeTOs(List volumeTOs) { this.volumeTOs = volumeTOs; } diff --git a/core/src/com/cloud/agent/api/DeleteVMSnapshotCommand.java b/core/src/com/cloud/agent/api/DeleteVMSnapshotCommand.java index c213448bf9c..1c64a2b6e97 100644 --- a/core/src/com/cloud/agent/api/DeleteVMSnapshotCommand.java +++ b/core/src/com/cloud/agent/api/DeleteVMSnapshotCommand.java @@ -19,10 +19,11 @@ package com.cloud.agent.api; import java.util.List; import com.cloud.agent.api.to.VolumeTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; public class DeleteVMSnapshotCommand extends VMSnapshotBaseCommand { - public DeleteVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { + public DeleteVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { super( vmName, snapshot, volumeTOs, guestOSType); } } diff --git a/core/src/com/cloud/agent/api/MigrateCommand.java b/core/src/com/cloud/agent/api/MigrateCommand.java index 5042b8c1971..0d8f70cf047 100644 --- a/core/src/com/cloud/agent/api/MigrateCommand.java +++ b/core/src/com/cloud/agent/api/MigrateCommand.java @@ -16,26 +16,33 @@ // under the License. package com.cloud.agent.api; +import com.cloud.agent.api.to.VirtualMachineTO; + public class MigrateCommand extends Command { String vmName; String destIp; String hostGuid; boolean isWindows; - + VirtualMachineTO vmTO; protected MigrateCommand() { } - public MigrateCommand(String vmName, String destIp, boolean isWindows) { + public MigrateCommand(String vmName, String destIp, boolean isWindows, VirtualMachineTO vmTO) { this.vmName = vmName; this.destIp = destIp; this.isWindows = isWindows; + this.vmTO = vmTO; } public boolean isWindows() { return isWindows; } + public VirtualMachineTO getVirtualMachine() { + return vmTO; + } + public String getDestinationIp() { return destIp; } diff --git a/core/src/com/cloud/agent/api/MigrateWithStorageAnswer.java b/core/src/com/cloud/agent/api/MigrateWithStorageAnswer.java index d87a5f184c8..6468884f464 100644 --- a/core/src/com/cloud/agent/api/MigrateWithStorageAnswer.java +++ b/core/src/com/cloud/agent/api/MigrateWithStorageAnswer.java @@ -20,9 +20,6 @@ import java.util.List; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import com.cloud.agent.api.to.DiskTO; -import com.cloud.agent.api.to.VolumeTO; - public class MigrateWithStorageAnswer extends Answer { List volumeTos; diff --git a/core/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java b/core/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java index fd8f22f3579..ec8bd0f4b65 100644 --- a/core/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java +++ b/core/src/com/cloud/agent/api/MigrateWithStorageCompleteAnswer.java @@ -20,8 +20,6 @@ import java.util.List; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import com.cloud.agent.api.to.VolumeTO; - public class MigrateWithStorageCompleteAnswer extends Answer { List volumeTos; diff --git a/core/src/com/cloud/agent/api/RebootCommand.java b/core/src/com/cloud/agent/api/RebootCommand.java index 49712b6fce5..299e61b76af 100755 --- a/core/src/com/cloud/agent/api/RebootCommand.java +++ b/core/src/com/cloud/agent/api/RebootCommand.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.agent.api; -import com.cloud.hypervisor.Hypervisor; import com.cloud.vm.VirtualMachine; public class RebootCommand extends Command { diff --git a/core/src/com/cloud/agent/api/RevertToVMSnapshotAnswer.java b/core/src/com/cloud/agent/api/RevertToVMSnapshotAnswer.java index 848ffc0ebf8..6170864c08e 100644 --- a/core/src/com/cloud/agent/api/RevertToVMSnapshotAnswer.java +++ b/core/src/com/cloud/agent/api/RevertToVMSnapshotAnswer.java @@ -17,14 +17,14 @@ package com.cloud.agent.api; -import java.util.List; - -import com.cloud.agent.api.to.VolumeTO; import com.cloud.vm.VirtualMachine; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.List; public class RevertToVMSnapshotAnswer extends Answer { - private List volumeTOs; + private List volumeTOs; private VirtualMachine.State vmState; public RevertToVMSnapshotAnswer(RevertToVMSnapshotCommand cmd, boolean result, @@ -37,7 +37,7 @@ public class RevertToVMSnapshotAnswer extends Answer { } public RevertToVMSnapshotAnswer(RevertToVMSnapshotCommand cmd, - List volumeTOs, + List volumeTOs, VirtualMachine.State vmState) { super(cmd, true, ""); this.volumeTOs = volumeTOs; @@ -48,11 +48,11 @@ public class RevertToVMSnapshotAnswer extends Answer { return vmState; } - public List getVolumeTOs() { + public List getVolumeTOs() { return volumeTOs; } - public void setVolumeTOs(List volumeTOs) { + public void setVolumeTOs(List volumeTOs) { this.volumeTOs = volumeTOs; } diff --git a/core/src/com/cloud/agent/api/RevertToVMSnapshotCommand.java b/core/src/com/cloud/agent/api/RevertToVMSnapshotCommand.java index 429a186e0dc..1e5fd6c9a68 100644 --- a/core/src/com/cloud/agent/api/RevertToVMSnapshotCommand.java +++ b/core/src/com/cloud/agent/api/RevertToVMSnapshotCommand.java @@ -19,10 +19,11 @@ package com.cloud.agent.api; import java.util.List; import com.cloud.agent.api.to.VolumeTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; public class RevertToVMSnapshotCommand extends VMSnapshotBaseCommand { - public RevertToVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { + public RevertToVMSnapshotCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { super(vmName, snapshot, volumeTOs, guestOSType); } diff --git a/core/src/com/cloud/agent/api/StartAnswer.java b/core/src/com/cloud/agent/api/StartAnswer.java index 922d060cfae..f3e75dfb75d 100644 --- a/core/src/com/cloud/agent/api/StartAnswer.java +++ b/core/src/com/cloud/agent/api/StartAnswer.java @@ -16,11 +16,14 @@ // under the License. package com.cloud.agent.api; +import java.util.Map; + import com.cloud.agent.api.to.VirtualMachineTO; public class StartAnswer extends Answer { VirtualMachineTO vm; String host_guid; + Map _iqnToPath; protected StartAnswer() { } @@ -54,4 +57,12 @@ public class StartAnswer extends Answer { public String getHost_guid() { return host_guid; } + + public void setIqnToPath(Map iqnToPath) { + _iqnToPath = iqnToPath; + } + + public Map getIqnToPath() { + return _iqnToPath; + } } diff --git a/core/src/com/cloud/agent/api/StartupRoutingCommand.java b/core/src/com/cloud/agent/api/StartupRoutingCommand.java index 5961ab0017e..d52666b7d9d 100755 --- a/core/src/com/cloud/agent/api/StartupRoutingCommand.java +++ b/core/src/com/cloud/agent/api/StartupRoutingCommand.java @@ -22,7 +22,7 @@ import java.util.Map; import com.cloud.host.Host; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Networks.RouterPrivateIpStrategy; -import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.vm.VirtualMachine.State; public class StartupRoutingCommand extends StartupCommand { @@ -48,7 +48,7 @@ public class StartupRoutingCommand extends StartupCommand { long dom0MinMemory; boolean poolSync; Map vms; - HashMap> _clusterVMStates; + HashMap> _clusterVMStates; String caps; String pool; HypervisorType hypervisorType; @@ -129,7 +129,7 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr } } - public void setClusterVMStateChanges(HashMap> allStates){ + public void setClusterVMStateChanges(HashMap> allStates){ _clusterVMStates = allStates; } @@ -157,7 +157,7 @@ getHostDetails().put(RouterPrivateIpStrategy.class.getCanonicalName(), privIpStr return vms; } - public HashMap> getClusterVMStateChanges() { + public HashMap> getClusterVMStateChanges() { return _clusterVMStates; } diff --git a/core/src/com/cloud/agent/api/StopAnswer.java b/core/src/com/cloud/agent/api/StopAnswer.java index 0af23853da5..614835e2a37 100755 --- a/core/src/com/cloud/agent/api/StopAnswer.java +++ b/core/src/com/cloud/agent/api/StopAnswer.java @@ -17,37 +17,34 @@ package com.cloud.agent.api; public class StopAnswer extends RebootAnswer { - Integer vncPort; + + private String hypervisortoolsversion; Integer timeOffset; protected StopAnswer() { } - public StopAnswer(StopCommand cmd, String details, Integer vncPort, Integer timeOffset, boolean success) { + public StopAnswer(StopCommand cmd, String details, String hypervisortoolsversion, Integer timeOffset, boolean success) { super(cmd, details, success); - this.vncPort = vncPort; + this.hypervisortoolsversion = hypervisortoolsversion; this.timeOffset = timeOffset; } - public StopAnswer(StopCommand cmd, String details, Integer vncPort, boolean success) { + public StopAnswer(StopCommand cmd, String details, boolean success) { super(cmd, details, success); - this.vncPort = vncPort; + this.hypervisortoolsversion = null; this.timeOffset = null; } - public StopAnswer(StopCommand cmd, String details, boolean success) { - super(cmd, details, success); - vncPort = null; - timeOffset = null; - } public StopAnswer(StopCommand cmd, Exception e) { super(cmd, e); + this.hypervisortoolsversion = null; + this.timeOffset = null; } - @Override - public Integer getVncPort() { - return vncPort; + public String getHypervisorToolsVersion() { + return hypervisortoolsversion; } public Integer getTimeOffset() { diff --git a/core/src/com/cloud/agent/api/VMSnapshotBaseCommand.java b/core/src/com/cloud/agent/api/VMSnapshotBaseCommand.java index 2120f2f73b1..b2c524194ea 100644 --- a/core/src/com/cloud/agent/api/VMSnapshotBaseCommand.java +++ b/core/src/com/cloud/agent/api/VMSnapshotBaseCommand.java @@ -19,27 +19,29 @@ package com.cloud.agent.api; import java.util.List; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.VolumeTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; public class VMSnapshotBaseCommand extends Command{ - protected List volumeTOs; + protected List volumeTOs; protected VMSnapshotTO target; protected String vmName; protected String guestOSType; - public VMSnapshotBaseCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { + public VMSnapshotBaseCommand(String vmName, VMSnapshotTO snapshot, List volumeTOs, String guestOSType) { this.vmName = vmName; this.target = snapshot; this.volumeTOs = volumeTOs; this.guestOSType = guestOSType; } - public List getVolumeTOs() { + public List getVolumeTOs() { return volumeTOs; } - public void setVolumeTOs(List volumeTOs) { + public void setVolumeTOs(List volumeTOs) { this.volumeTOs = volumeTOs; } diff --git a/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java b/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java index dfca4ab5908..ee8033a7e28 100644 --- a/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java +++ b/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigAnswer.java @@ -20,7 +20,6 @@ import java.util.List; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.LoadBalancerTO; -import com.cloud.agent.api.to.NicTO; /** * LoadBalancerConfigCommand sends the load balancer configuration diff --git a/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java b/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java index f705f6c9707..7206d2f5e35 100644 --- a/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java +++ b/core/src/com/cloud/agent/api/routing/HealthCheckLBConfigCommand.java @@ -17,7 +17,6 @@ package com.cloud.agent.api.routing; import com.cloud.agent.api.to.LoadBalancerTO; -import com.cloud.agent.api.to.NicTO; /** * LoadBalancerConfigCommand sends the load balancer configuration diff --git a/core/src/com/cloud/agent/api/routing/LoadBalancerConfigCommand.java b/core/src/com/cloud/agent/api/routing/LoadBalancerConfigCommand.java index ee29290b720..3a51e8ad6be 100644 --- a/core/src/com/cloud/agent/api/routing/LoadBalancerConfigCommand.java +++ b/core/src/com/cloud/agent/api/routing/LoadBalancerConfigCommand.java @@ -33,6 +33,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand { public String lbStatsAuth = "admin1:AdMiN123"; public String lbStatsUri = "/admin?stats"; public String maxconn =""; + public boolean keepAliveEnabled = false; NicTO nic; Long vpcId; @@ -44,7 +45,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand { this.vpcId = vpcId; } - public LoadBalancerConfigCommand(LoadBalancerTO[] loadBalancers,String PublicIp,String GuestIp,String PrivateIp, NicTO nic, Long vpcId, String maxconn) { + public LoadBalancerConfigCommand(LoadBalancerTO[] loadBalancers,String PublicIp,String GuestIp,String PrivateIp, NicTO nic, Long vpcId, String maxconn, boolean keepAliveEnabled) { this.loadBalancers = loadBalancers; this.lbStatsPublicIP = PublicIp; this.lbStatsPrivateIP = PrivateIp; @@ -52,6 +53,7 @@ public class LoadBalancerConfigCommand extends NetworkElementCommand { this.nic = nic; this.vpcId = vpcId; this.maxconn=maxconn; + this.keepAliveEnabled = keepAliveEnabled; } public NicTO getNic() { diff --git a/core/src/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java b/core/src/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java index 68d7caf016f..37278ee64d3 100644 --- a/core/src/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java +++ b/core/src/com/cloud/agent/api/routing/RemoteAccessVpnCfgCommand.java @@ -20,10 +20,13 @@ package com.cloud.agent.api.routing; public class RemoteAccessVpnCfgCommand extends NetworkElementCommand { boolean create; + private boolean vpcEnabled; String vpnServerIp; String ipRange; String presharedKey; String localIp; + private String localCidr; + private String publicInterface; protected RemoteAccessVpnCfgCommand() { this.create = false; @@ -39,12 +42,18 @@ public class RemoteAccessVpnCfgCommand extends NetworkElementCommand { } - public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey) { + public RemoteAccessVpnCfgCommand(boolean create, String vpnServerAddress, String localIp, String ipRange, String ipsecPresharedKey, boolean vpcEnabled) { this.vpnServerIp = vpnServerAddress; this.ipRange = ipRange; this.presharedKey = ipsecPresharedKey; this.localIp = localIp; this.create = create; + this.vpcEnabled = vpcEnabled; + if (vpcEnabled) { + this.setPublicInterface("eth1"); + } else { + this.setPublicInterface("eth2"); + } } public String getVpnServerIp() { @@ -75,4 +84,28 @@ public class RemoteAccessVpnCfgCommand extends NetworkElementCommand { return localIp; } + public boolean isVpcEnabled() { + return vpcEnabled; + } + + public void setVpcEnabled(boolean vpcEnabled) { + this.vpcEnabled = vpcEnabled; + } + + public String getLocalCidr() { + return localCidr; + } + + public void setLocalCidr(String localCidr) { + this.localCidr = localCidr; + } + + public String getPublicInterface() { + return publicInterface; + } + + public void setPublicInterface(String publicInterface) { + this.publicInterface = publicInterface; + } + } diff --git a/core/src/com/cloud/agent/api/routing/SetNetworkACLCommand.java b/core/src/com/cloud/agent/api/routing/SetNetworkACLCommand.java index 236e8ea907a..ba4b4b427bf 100644 --- a/core/src/com/cloud/agent/api/routing/SetNetworkACLCommand.java +++ b/core/src/com/cloud/agent/api/routing/SetNetworkACLCommand.java @@ -20,9 +20,7 @@ package com.cloud.agent.api.routing; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; -import java.util.HashSet; import java.util.List; -import java.util.Set; import com.cloud.agent.api.to.NetworkACLTO; import com.cloud.agent.api.to.NicTO; diff --git a/core/src/com/cloud/agent/api/storage/UploadCommand.java b/core/src/com/cloud/agent/api/storage/UploadCommand.java index 9b893e2abd5..98eebe423c3 100644 --- a/core/src/com/cloud/agent/api/storage/UploadCommand.java +++ b/core/src/com/cloud/agent/api/storage/UploadCommand.java @@ -41,30 +41,30 @@ public class UploadCommand extends AbstractUploadCommand implements InternalIden this.template = new TemplateTO(template); this.url = url; this.installPath = installPath; - this.checksum = template.getChecksum(); - this.id = template.getId(); - this.templateSizeInBytes = sizeInBytes; + checksum = template.getChecksum(); + id = template.getId(); + templateSizeInBytes = sizeInBytes; } public UploadCommand(String url, long id, long sizeInBytes, String installPath, Type type){ - this.template = null; + template = null; this.url = url; this.installPath = installPath; this.id = id; this.type = type; - this.templateSizeInBytes = sizeInBytes; + templateSizeInBytes = sizeInBytes; } protected UploadCommand() { } public UploadCommand(UploadCommand that) { - this.template = that.template; - this.url = that.url; - this.installPath = that.installPath; - this.checksum = that.getChecksum(); - this.id = that.id; + template = that.template; + url = that.url; + installPath = that.installPath; + checksum = that.getChecksum(); + id = that.id; } public String getDescription() { @@ -114,7 +114,8 @@ public class UploadCommand extends AbstractUploadCommand implements InternalIden this.templateSizeInBytes = templateSizeInBytes; } - public long getId() { + @Override + public long getId() { return id; } diff --git a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java index 9e6216f5036..874146c6258 100755 --- a/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java +++ b/core/src/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResource.java @@ -103,12 +103,10 @@ import com.cloud.utils.ssh.SshHelper; @Local(value = {VirtualRoutingResource.class}) public class VirtualRoutingResource implements Manager { private static final Logger s_logger = Logger.getLogger(VirtualRoutingResource.class); - private String _savepasswordPath; // This script saves a random password to the DomR file system private String _publicIpAddress; private String _firewallPath; private String _loadbPath; private String _dhcpEntryPath; - private String _vmDataPath; private String _publicEthIf; private String _privateEthIf; private String _bumpUpPriorityPath; @@ -215,6 +213,8 @@ public class VirtualRoutingResource implements Manager { args += " -s "; args += cmd.getVpnServerIp(); } + args += " -C " + cmd.getLocalCidr(); + args += " -i " + cmd.getPublicInterface(); String result = routerProxy("vpn_l2tp.sh", cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP), args); if (result != null) { return new Answer(cmd, false, "Configure VPN failed"); @@ -549,13 +549,14 @@ public class VirtualRoutingResource implements Manager { final String vmIpAddress = cmd.getVmIpAddress(); final String local = vmName; - // Run save_password_to_domr.sh - final String result = savePassword(routerPrivateIPAddress, vmIpAddress, password, local); + String args = "-v " + vmIpAddress; + args += " -p " + password; + + String result = routerProxy("savepassword.sh", routerPrivateIPAddress, args); if (result != null) { return new Answer(cmd, false, "Unable to save password to DomR."); - } else { - return new Answer(cmd); } + return new Answer(cmd); } protected Answer execute(final DhcpEntryCommand cmd) { @@ -814,16 +815,6 @@ public class VirtualRoutingResource implements Manager { return new ConsoleProxyLoadAnswer(cmd, proxyVmId, proxyVmName, success, result); } - public String savePassword(final String privateIpAddress, final String vmIpAddress, final String password, final String localPath) { - final Script command = new Script(_savepasswordPath, _startTimeout, s_logger); - command.add("-r", privateIpAddress); - command.add("-v", vmIpAddress); - command.add("-p", password); - command.add(localPath); - - return command.execute(); - } - public String assignGuestNetwork(final String dev, final String routerIP, final String routerGIP, final String gateway, final String cidr, final String netmask, final String dns, final String domainName) { @@ -1129,11 +1120,6 @@ public class VirtualRoutingResource implements Manager { throw new ConfigurationException("Unable to find the call_loadbalancer.sh"); } - _savepasswordPath = findScript("save_password_to_domr.sh"); - if (_savepasswordPath == null) { - throw new ConfigurationException("Unable to find save_password_to_domr.sh"); - } - _dhcpEntryPath = findScript("dhcp_entry.sh"); if (_dhcpEntryPath == null) { throw new ConfigurationException("Unable to find dhcp_entry.sh"); @@ -1216,6 +1202,41 @@ public class VirtualRoutingResource implements Manager { return "Unable to connect"; } + public boolean connect(final String ipAddress, int retry, int sleep) { + for (int i = 0; i <= retry; i++) { + SocketChannel sch = null; + try { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Trying to connect to " + ipAddress); + } + sch = SocketChannel.open(); + sch.configureBlocking(true); + + final InetSocketAddress addr = new InetSocketAddress(ipAddress, _port); + sch.connect(addr); + return true; + } catch (final IOException e) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Could not connect to " + ipAddress); + } + } finally { + if (sch != null) { + try { + sch.close(); + } catch (final IOException e) {} + } + } + try { + Thread.sleep(sleep); + } catch (final InterruptedException e) { + } + } + + s_logger.debug("Unable to logon to " + ipAddress); + + return false; + } + @Override public String getName() { return _name; diff --git a/core/src/com/cloud/agent/transport/Request.java b/core/src/com/cloud/agent/transport/Request.java index b0fa4cc2960..cbeb112fea7 100755 --- a/core/src/com/cloud/agent/transport/Request.java +++ b/core/src/com/cloud/agent/transport/Request.java @@ -31,14 +31,6 @@ import java.util.zip.GZIPOutputStream; import org.apache.log4j.Level; import org.apache.log4j.Logger; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig; -import com.cloud.exception.UnsupportedVersionException; -import com.cloud.serializer.GsonHelper; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.Pair; -import com.cloud.utils.exception.CloudRuntimeException; import com.google.gson.Gson; import com.google.gson.JsonArray; import com.google.gson.JsonDeserializationContext; @@ -50,6 +42,15 @@ import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; import com.google.gson.stream.JsonReader; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.SecStorageFirewallCfgCommand.PortConfig; +import com.cloud.exception.UnsupportedVersionException; +import com.cloud.serializer.GsonHelper; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; + /** * Request is a simple wrapper around command and answer to add sequencing, * versioning, and flags. Note that the version here represents the changes @@ -107,7 +108,8 @@ public class Request { protected long _agentId; protected Command[] _cmds; protected String _content; - + protected String _agentName; + protected Request() { } @@ -141,6 +143,11 @@ public class Request { setFromServer(fromServer); } + public Request(long agentId, String agentName, long mgmtId, Command[] cmds, boolean stopOnError, boolean fromServer) { + this(agentId, mgmtId, cmds, stopOnError, fromServer); + setAgentName(agentName); + } + public void setSequence(long seq) { _seq = seq; } @@ -158,14 +165,14 @@ public class Request { } protected Request(final Request that, final Command[] cmds) { - this._ver = that._ver; - this._seq = that._seq; + _ver = that._ver; + _seq = that._seq; setInSequence(that.executeInSequence()); setStopOnError(that.stopOnError()); - this._cmds = cmds; - this._mgmtId = that._mgmtId; - this._via = that._via; - this._agentId = that._agentId; + _cmds = cmds; + _mgmtId = that._mgmtId; + _via = that._via; + _agentId = that._agentId; setFromServer(!that.isFromServer()); } @@ -173,6 +180,10 @@ public class Request { _flags |= (stopOnError ? FLAG_STOP_ON_ERROR : 0); } + private final void setAgentName(String agentName) { + _agentName = agentName; + } + private final void setInSequence(boolean inSequence) { _flags |= (inSequence ? FLAG_IN_SEQUENCE : 0); } @@ -287,7 +298,7 @@ public class Request { retBuff.flip(); return retBuff; } - + public static ByteBuffer doCompress(ByteBuffer buffer, int length) { ByteArrayOutputStream byteOut = new ByteArrayOutputStream(length); byte[] array; @@ -307,11 +318,11 @@ public class Request { } return ByteBuffer.wrap(byteOut.toByteArray()); } - + public ByteBuffer[] toBytes() { final ByteBuffer[] buffers = new ByteBuffer[2]; ByteBuffer tmp; - + if (_content == null) { _content = s_gson.toJson(_cmds, _cmds.getClass()); } @@ -372,7 +383,7 @@ public class Request { } } } - + @Override public String toString() { return log("", true, Level.DEBUG); @@ -421,7 +432,11 @@ public class Request { buf.append(msg); buf.append(" { ").append(getType()); - buf.append(", MgmtId: ").append(_mgmtId).append(", via: ").append(_via); + if (_agentName != null) { + buf.append(", MgmtId: ").append(_mgmtId).append(", via: ").append(_via).append("(" + _agentName + ")"); + } else { + buf.append(", MgmtId: ").append(_mgmtId).append(", via: ").append(_via); + } buf.append(", Ver: ").append(_ver.toString()); buf.append(", Flags: ").append(Integer.toBinaryString(getFlags())).append(", "); buf.append(content); @@ -447,7 +462,7 @@ public class Request { if (version.ordinal() != Version.v1.ordinal() && version.ordinal() != Version.v3.ordinal()) { throw new UnsupportedVersionException("This version is no longer supported: " + version.toString(), UnsupportedVersionException.IncompatibleVersion); } - final byte reserved = buff.get(); // tossed away for now. + buff.get(); final short flags = buff.getShort(); final boolean isRequest = (flags & FLAG_REQUEST) > 0; @@ -456,7 +471,7 @@ public class Request { final int size = buff.getInt(); final long mgmtId = buff.getLong(); final long agentId = buff.getLong(); - + long via; if (version.ordinal() == Version.v1.ordinal()) { via = buff.getLong(); @@ -467,7 +482,7 @@ public class Request { if ((flags & FLAG_COMPRESSED) != 0) { buff = doDecompress(buff, size); } - + byte[] command = null; int offset = 0; if (buff.hasArray()) { @@ -519,7 +534,7 @@ public class Request { public static long getViaAgentId(final byte[] bytes) { return NumbersUtil.bytesToLong(bytes, 32); } - + public static boolean fromServer(final byte[] bytes) { return (bytes[3] & FLAG_FROM_SERVER) > 0; } diff --git a/core/src/com/cloud/exception/UsageServerException.java b/core/src/com/cloud/exception/UsageServerException.java index 68f83777b77..924934f0496 100644 --- a/core/src/com/cloud/exception/UsageServerException.java +++ b/core/src/com/cloud/exception/UsageServerException.java @@ -18,15 +18,20 @@ package com.cloud.exception; public class UsageServerException extends CloudException { - public UsageServerException() { - - } + /** + * + */ + private static final long serialVersionUID = -8398313106067116466L; + + public UsageServerException() { + + } + + public UsageServerException(String message) { + super(message); + } - public UsageServerException(String message) { - super(message); - } - } diff --git a/core/src/com/cloud/network/HAProxyConfigurator.java b/core/src/com/cloud/network/HAProxyConfigurator.java index 230912595cf..ae49a2e236f 100644 --- a/core/src/com/cloud/network/HAProxyConfigurator.java +++ b/core/src/com/cloud/network/HAProxyConfigurator.java @@ -44,6 +44,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { private static String[] globalSection = { "global", "\tlog 127.0.0.1:3914 local0 warning", "\tmaxconn 4096", + "\tmaxpipes 1024", "\tchroot /var/lib/haproxy", "\tuser haproxy", "\tgroup haproxy", @@ -122,7 +123,9 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { sb = new StringBuilder(); // FIXME sb.append("\t").append("balance ").append(algorithm); result.add(sb.toString()); - if (publicPort.equals(NetUtils.HTTP_PORT)) { + if (publicPort.equals(NetUtils.HTTP_PORT) + // && global option httpclose set (or maybe not in this spot???) + ) { sb = new StringBuilder(); sb.append("\t").append("mode http"); result.add(sb.toString()); @@ -434,7 +437,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { return sb.toString(); } - private List getRulesForPool(LoadBalancerTO lbTO) { + private List getRulesForPool(LoadBalancerTO lbTO, boolean keepAliveEnabled) { StringBuilder sb = new StringBuilder(); String poolName = sb.append(lbTO.getSrcIp().replace(".", "_")) .append('-').append(lbTO.getSrcPort()).toString(); @@ -498,7 +501,9 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { if ((stickinessSubRule != null) && !destsAvailable) { s_logger.warn("Haproxy stickiness policy for lb rule: " + lbTO.getSrcIp() + ":" + lbTO.getSrcPort() +": Not Applied, cause: backends are unavailable"); } - if ((publicPort.equals(NetUtils.HTTP_PORT)) || (httpbasedStickiness) ) { + if ((publicPort.equals(NetUtils.HTTP_PORT) + && !keepAliveEnabled + ) || (httpbasedStickiness) ) { sb = new StringBuilder(); sb.append("\t").append("mode http"); result.add(sb.toString()); @@ -516,23 +521,58 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { StringBuilder rule = new StringBuilder("\nlisten ").append(ruleName) .append(" ").append(statsIp).append(":") .append(lbCmd.lbStatsPort); + // TODO DH: write test for this in both cases + if(!lbCmd.keepAliveEnabled) { + s_logger.info("Haproxy mode http enabled"); + rule.append("\n\tmode http\n\toption httpclose"); + } rule.append( - "\n\tmode http\n\toption httpclose\n\tstats enable\n\tstats uri ") + "\n\tstats enable\n\tstats uri ") .append(lbCmd.lbStatsUri) .append("\n\tstats realm Haproxy\\ Statistics\n\tstats auth ") .append(lbCmd.lbStatsAuth); rule.append("\n"); - return rule.toString(); + String result = rule.toString(); + if(s_logger.isDebugEnabled()) { + s_logger.debug("Haproxystats rule: " + result); + } + return result; } @Override public String[] generateConfiguration(LoadBalancerConfigCommand lbCmd) { List result = new ArrayList(); List gSection = Arrays.asList(globalSection); +// note that this is overwritten on the String in the static ArrayList gSection.set(2,"\tmaxconn " + lbCmd.maxconn); + // TODO DH: write test for this function + String pipesLine = "\tmaxpipes " + Long.toString(Long.parseLong(lbCmd.maxconn)/4); + gSection.set(3,pipesLine); + if(s_logger.isDebugEnabled()) { + for(String s : gSection) { + s_logger.debug("global section: " + s); + } + } result.addAll(gSection); + // TODO decide under what circumstances these options are needed +// result.add("\tnokqueue"); +// result.add("\tnopoll"); + result.add(blankLine); - result.addAll(Arrays.asList(defaultsSection)); + List dSection = Arrays.asList(defaultsSection); + if(lbCmd.keepAliveEnabled) { + dSection.set(6, "\t#no option set here :<"); + dSection.set(7, "\tno option forceclose"); + } else { + dSection.set(6, "\toption forwardfor"); + dSection.set(7, "\toption forceclose"); + } + if(s_logger.isDebugEnabled()) { + for(String s : dSection) { + s_logger.debug("default section: " + s); + } + } + result.addAll(dSection); if (!lbCmd.lbStatsVisibility.equals("disabled")) { /* new rule : listen admin_page guestip/link-local:8081 */ if (lbCmd.lbStatsVisibility.equals("global")) { @@ -571,7 +611,7 @@ public class HAProxyConfigurator implements LoadBalancerConfigurator { if ( lbTO.isRevoked() ) { continue; } - List poolRules = getRulesForPool(lbTO); + List poolRules = getRulesForPool(lbTO, lbCmd.keepAliveEnabled); result.addAll(poolRules); has_listener = true; } diff --git a/core/src/com/cloud/storage/JavaStorageLayer.java b/core/src/com/cloud/storage/JavaStorageLayer.java index bfaa767eaed..e2e28ee5c36 100644 --- a/core/src/com/cloud/storage/JavaStorageLayer.java +++ b/core/src/com/cloud/storage/JavaStorageLayer.java @@ -17,7 +17,6 @@ package com.cloud.storage; import java.io.File; -import java.io.FileFilter; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -41,7 +40,7 @@ public class JavaStorageLayer implements StorageLayer { public JavaStorageLayer(boolean makeWorldWriteable) { this(); - this._makeWorldWriteable = makeWorldWriteable; + _makeWorldWriteable = makeWorldWriteable; } @Override @@ -171,7 +170,7 @@ public class JavaStorageLayer implements StorageLayer { File dir = new File(dirName); if (dir.exists()) { String uniqDirName = dir.getAbsolutePath() + File.separator + UUID.randomUUID().toString(); - if (this.mkdir(uniqDirName)) { + if (mkdir(uniqDirName)) { return new File(uniqDirName); } } @@ -219,6 +218,7 @@ public class JavaStorageLayer implements StorageLayer { return dirPaths; } + @Override public boolean setWorldReadableAndWriteable(File file) { return (file.setReadable(true, false) && file.setWritable(true, false)); } diff --git a/core/src/com/cloud/storage/resource/StoragePoolResource.java b/core/src/com/cloud/storage/resource/StoragePoolResource.java index 8dff97db9c0..f6d7896b34c 100644 --- a/core/src/com/cloud/storage/resource/StoragePoolResource.java +++ b/core/src/com/cloud/storage/resource/StoragePoolResource.java @@ -21,8 +21,6 @@ import com.cloud.agent.api.storage.CopyVolumeAnswer; import com.cloud.agent.api.storage.CopyVolumeCommand; import com.cloud.agent.api.storage.CreateAnswer; import com.cloud.agent.api.storage.CreateCommand; -import com.cloud.agent.api.storage.CreateVolumeOVAAnswer; -import com.cloud.agent.api.storage.CreateVolumeOVACommand; import com.cloud.agent.api.storage.DestroyCommand; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; diff --git a/core/src/com/cloud/storage/resource/StorageProcessor.java b/core/src/com/cloud/storage/resource/StorageProcessor.java index 5fa9f8a86e3..29f4a677375 100644 --- a/core/src/com/cloud/storage/resource/StorageProcessor.java +++ b/core/src/com/cloud/storage/resource/StorageProcessor.java @@ -23,8 +23,12 @@ import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import com.cloud.agent.api.Answer; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; public interface StorageProcessor { public Answer copyTemplateToPrimaryStorage(CopyCommand cmd); @@ -43,4 +47,6 @@ public interface StorageProcessor { public Answer deleteVolume(DeleteCommand cmd); public Answer createVolumeFromSnapshot(CopyCommand cmd); public Answer deleteSnapshot(DeleteCommand cmd); + Answer introduceObject(IntroduceObjectCmd cmd); + Answer forgetObject(ForgetObjectCmd cmd); } diff --git a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java index ab9aa2a3ee6..b43722a6418 100644 --- a/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java +++ b/core/src/com/cloud/storage/resource/StorageSubsystemCommandHandlerBase.java @@ -24,6 +24,7 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.log4j.Logger; @@ -33,7 +34,6 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; -import com.cloud.agent.api.to.NfsTO; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Volume; @@ -55,6 +55,8 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return execute((AttachCommand)command); } else if (command instanceof DettachCommand) { return execute((DettachCommand)command); + } else if (command instanceof IntroduceObjectCmd) { + return processor.introduceObject((IntroduceObjectCmd)command); } return new Answer((Command)command, false, "not implemented yet"); } @@ -65,7 +67,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma DataStoreTO srcDataStore = srcData.getDataStore(); DataStoreTO destDataStore = destData.getDataStore(); - if ((srcData.getObjectType() == DataObjectType.TEMPLATE) && (srcDataStore instanceof NfsTO) && (destData.getDataStore().getRole() == DataStoreRole.Primary)) { + if (srcData.getObjectType() == DataObjectType.TEMPLATE && srcData.getDataStore().getRole() == DataStoreRole.Image && destData.getDataStore().getRole() == DataStoreRole.Primary) { //copy template to primary storage return processor.copyTemplateToPrimaryStorage(cmd); } else if (srcData.getObjectType() == DataObjectType.TEMPLATE && srcDataStore.getRole() == DataStoreRole.Primary && destDataStore.getRole() == DataStoreRole.Primary) { @@ -80,18 +82,19 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma } else if (destData.getObjectType() == DataObjectType.TEMPLATE) { return processor.createTemplateFromVolume(cmd); } - } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && srcData.getDataStore().getRole() == DataStoreRole.Primary) { + } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.SNAPSHOT && + srcData.getDataStore().getRole() == DataStoreRole.Primary) { return processor.backupSnapshot(cmd); } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.VOLUME) { - return processor.createVolumeFromSnapshot(cmd); + return processor.createVolumeFromSnapshot(cmd); } else if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) { return processor.createTemplateFromSnapshot(cmd); } return new Answer(cmd, false, "not implemented yet"); } - - + + protected Answer execute(CreateObjectCommand cmd) { DataTO data = cmd.getData(); try { @@ -106,21 +109,21 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return new CreateObjectAnswer(e.toString()); } } - + protected Answer execute(DeleteCommand cmd) { DataTO data = cmd.getData(); Answer answer = null; if (data.getObjectType() == DataObjectType.VOLUME) { answer = processor.deleteVolume(cmd); } else if (data.getObjectType() == DataObjectType.SNAPSHOT) { - answer = processor.deleteSnapshot(cmd); + answer = processor.deleteSnapshot(cmd); } else { answer = new Answer(cmd, false, "unsupported type"); } return answer; } - + protected Answer execute(AttachCommand cmd) { DiskTO disk = cmd.getDisk(); if (disk.getType() == Volume.Type.ISO) { @@ -129,7 +132,7 @@ public class StorageSubsystemCommandHandlerBase implements StorageSubsystemComma return processor.attachVolume(cmd); } } - + protected Answer execute(DettachCommand cmd) { DiskTO disk = cmd.getDisk(); if (disk.getType() == Volume.Type.ISO) { diff --git a/core/src/com/cloud/storage/template/FtpTemplateUploader.java b/core/src/com/cloud/storage/template/FtpTemplateUploader.java index 61b1984634a..c3c9f1e74ad 100755 --- a/core/src/com/cloud/storage/template/FtpTemplateUploader.java +++ b/core/src/com/cloud/storage/template/FtpTemplateUploader.java @@ -30,203 +30,202 @@ import org.apache.log4j.Logger; public class FtpTemplateUploader implements TemplateUploader { - - public static final Logger s_logger = Logger.getLogger(FtpTemplateUploader.class.getName()); - public TemplateUploader.Status status = TemplateUploader.Status.NOT_STARTED; - public String errorString = ""; - public long totalBytes = 0; - public long entitySizeinBytes; - private String sourcePath; - private String ftpUrl; - private UploadCompleteCallback completionCallback; - private boolean resume; + + public static final Logger s_logger = Logger.getLogger(FtpTemplateUploader.class.getName()); + public TemplateUploader.Status status = TemplateUploader.Status.NOT_STARTED; + public String errorString = ""; + public long totalBytes = 0; + public long entitySizeinBytes; + private String sourcePath; + private String ftpUrl; + private UploadCompleteCallback completionCallback; private BufferedInputStream inputStream = null; private BufferedOutputStream outputStream = null; - private static final int CHUNK_SIZE = 1024*1024; //1M - - public FtpTemplateUploader(String sourcePath, String url, UploadCompleteCallback callback, long entitySizeinBytes){ - - this.sourcePath = sourcePath; - this.ftpUrl = url; - this.completionCallback = callback; - this.entitySizeinBytes = entitySizeinBytes; - - } - - public long upload(UploadCompleteCallback callback ) - { - - switch (status) { - case ABORTED: - case UNRECOVERABLE_ERROR: - case UPLOAD_FINISHED: - return 0; - default: - - } - - Date start = new Date(); - - StringBuffer sb = new StringBuffer(ftpUrl); - // check for authentication else assume its anonymous access. - /* if (user != null && password != null) + private static final int CHUNK_SIZE = 1024*1024; //1M + + public FtpTemplateUploader(String sourcePath, String url, UploadCompleteCallback callback, long entitySizeinBytes){ + + this.sourcePath = sourcePath; + ftpUrl = url; + completionCallback = callback; + this.entitySizeinBytes = entitySizeinBytes; + + } + + @Override + public long upload(UploadCompleteCallback callback ) + { + + switch (status) { + case ABORTED: + case UNRECOVERABLE_ERROR: + case UPLOAD_FINISHED: + return 0; + default: + + } + + new Date(); + + StringBuffer sb = new StringBuffer(ftpUrl); + // check for authentication else assume its anonymous access. + /* if (user != null && password != null) { sb.append( user ); sb.append( ':' ); sb.append( password ); sb.append( '@' ); - }*/ - /* - * type ==> a=ASCII mode, i=image (binary) mode, d= file directory - * listing - */ - sb.append( ";type=i" ); + }*/ + /* + * type ==> a=ASCII mode, i=image (binary) mode, d= file directory + * listing + */ + sb.append( ";type=i" ); - try - { - URL url = new URL( sb.toString() ); - URLConnection urlc = url.openConnection(); - File sourceFile = new File(sourcePath); - entitySizeinBytes = sourceFile.length(); + try + { + URL url = new URL( sb.toString() ); + URLConnection urlc = url.openConnection(); + File sourceFile = new File(sourcePath); + entitySizeinBytes = sourceFile.length(); - outputStream = new BufferedOutputStream( urlc.getOutputStream() ); - inputStream = new BufferedInputStream( new FileInputStream(sourceFile) ); + outputStream = new BufferedOutputStream( urlc.getOutputStream() ); + inputStream = new BufferedInputStream( new FileInputStream(sourceFile) ); - status = TemplateUploader.Status.IN_PROGRESS; + status = TemplateUploader.Status.IN_PROGRESS; - int bytes = 0; - byte[] block = new byte[CHUNK_SIZE]; - boolean done=false; - while (!done && status != Status.ABORTED ) { - if ( (bytes = inputStream.read(block, 0, CHUNK_SIZE)) > -1) { - outputStream.write(block,0, bytes); - totalBytes += bytes; - } else { - done = true; - } - } - status = TemplateUploader.Status.UPLOAD_FINISHED; - return totalBytes; - } catch (MalformedURLException e) { - status = TemplateUploader.Status.UNRECOVERABLE_ERROR; - errorString = e.getMessage(); - s_logger.error(errorString); - } catch (IOException e) { - status = TemplateUploader.Status.UNRECOVERABLE_ERROR; - errorString = e.getMessage(); - s_logger.error(errorString); - } - finally - { - try - { - if (inputStream != null){ - inputStream.close(); - } - if (outputStream != null){ - outputStream.close(); - } - }catch (IOException ioe){ - s_logger.error(" Caught exception while closing the resources" ); - } - if (callback != null) { - callback.uploadComplete(status); - } - } + int bytes = 0; + byte[] block = new byte[CHUNK_SIZE]; + boolean done=false; + while (!done && status != Status.ABORTED ) { + if ( (bytes = inputStream.read(block, 0, CHUNK_SIZE)) > -1) { + outputStream.write(block,0, bytes); + totalBytes += bytes; + } else { + done = true; + } + } + status = TemplateUploader.Status.UPLOAD_FINISHED; + return totalBytes; + } catch (MalformedURLException e) { + status = TemplateUploader.Status.UNRECOVERABLE_ERROR; + errorString = e.getMessage(); + s_logger.error(errorString); + } catch (IOException e) { + status = TemplateUploader.Status.UNRECOVERABLE_ERROR; + errorString = e.getMessage(); + s_logger.error(errorString); + } + finally + { + try + { + if (inputStream != null){ + inputStream.close(); + } + if (outputStream != null){ + outputStream.close(); + } + }catch (IOException ioe){ + s_logger.error(" Caught exception while closing the resources" ); + } + if (callback != null) { + callback.uploadComplete(status); + } + } - return 0; - } + return 0; + } - @Override - public void run() { - try { - upload(completionCallback); - } catch (Throwable t) { - s_logger.warn("Caught exception during upload "+ t.getMessage(), t); - errorString = "Failed to install: " + t.getMessage(); - status = TemplateUploader.Status.UNRECOVERABLE_ERROR; - } - - } + @Override + public void run() { + try { + upload(completionCallback); + } catch (Throwable t) { + s_logger.warn("Caught exception during upload "+ t.getMessage(), t); + errorString = "Failed to install: " + t.getMessage(); + status = TemplateUploader.Status.UNRECOVERABLE_ERROR; + } - @Override - public Status getStatus() { - return status; - } + } - @Override - public String getUploadError() { - return errorString; - } + @Override + public Status getStatus() { + return status; + } - @Override - public String getUploadLocalPath() { - return sourcePath; - } + @Override + public String getUploadError() { + return errorString; + } - @Override - public int getUploadPercent() { - if (entitySizeinBytes == 0) { - return 0; - } - return (int)(100.0*totalBytes/entitySizeinBytes); - } + @Override + public String getUploadLocalPath() { + return sourcePath; + } - @Override - public long getUploadTime() { - // TODO - return 0; - } + @Override + public int getUploadPercent() { + if (entitySizeinBytes == 0) { + return 0; + } + return (int)(100.0*totalBytes/entitySizeinBytes); + } - @Override - public long getUploadedBytes() { - return totalBytes; - } + @Override + public long getUploadTime() { + // TODO + return 0; + } - @Override - public void setResume(boolean resume) { - this.resume = resume; - - } + @Override + public long getUploadedBytes() { + return totalBytes; + } - @Override - public void setStatus(Status status) { - this.status = status; - } + @Override + public void setResume(boolean resume) { - @Override - public void setUploadError(String string) { - errorString = string; - } + } - @Override - public boolean stopUpload() { - switch (getStatus()) { - case IN_PROGRESS: - try { - if(outputStream != null) { - outputStream.close(); - } - if (inputStream != null){ - inputStream.close(); - } - } catch (IOException e) { - s_logger.error(" Caught exception while closing the resources" ); - } - status = TemplateUploader.Status.ABORTED; - return true; - case UNKNOWN: - case NOT_STARTED: - case RECOVERABLE_ERROR: - case UNRECOVERABLE_ERROR: - case ABORTED: - status = TemplateUploader.Status.ABORTED; - case UPLOAD_FINISHED: - return true; + @Override + public void setStatus(Status status) { + this.status = status; + } - default: - return true; - } - } + @Override + public void setUploadError(String string) { + errorString = string; + } + + @Override + public boolean stopUpload() { + switch (getStatus()) { + case IN_PROGRESS: + try { + if(outputStream != null) { + outputStream.close(); + } + if (inputStream != null){ + inputStream.close(); + } + } catch (IOException e) { + s_logger.error(" Caught exception while closing the resources" ); + } + status = TemplateUploader.Status.ABORTED; + return true; + case UNKNOWN: + case NOT_STARTED: + case RECOVERABLE_ERROR: + case UNRECOVERABLE_ERROR: + case ABORTED: + status = TemplateUploader.Status.ABORTED; + case UPLOAD_FINISHED: + return true; + + default: + return true; + } + } } diff --git a/core/src/com/cloud/storage/template/HttpTemplateDownloader.java b/core/src/com/cloud/storage/template/HttpTemplateDownloader.java index d87dd68bb81..f0f19629841 100644 --- a/core/src/com/cloud/storage/template/HttpTemplateDownloader.java +++ b/core/src/com/cloud/storage/template/HttpTemplateDownloader.java @@ -22,14 +22,9 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; -import java.net.Inet6Address; -import java.net.InetAddress; -import java.net.URI; import java.net.URISyntaxException; -import java.net.UnknownHostException; import java.util.Date; -import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.commons.httpclient.ChunkedInputStream; import org.apache.commons.httpclient.Credentials; import org.apache.commons.httpclient.Header; @@ -45,10 +40,11 @@ import org.apache.commons.httpclient.auth.AuthScope; import org.apache.commons.httpclient.methods.GetMethod; import org.apache.commons.httpclient.params.HttpMethodParams; import org.apache.log4j.Logger; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import com.cloud.agent.api.storage.Proxy; import com.cloud.storage.StorageLayer; -import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; @@ -56,145 +52,146 @@ import com.cloud.utils.UriUtils; * Download a template file using HTTP * */ -public class HttpTemplateDownloader implements TemplateDownloader { - public static final Logger s_logger = Logger.getLogger(HttpTemplateDownloader.class.getName()); +public class HttpTemplateDownloader extends ManagedContextRunnable implements TemplateDownloader { + public static final Logger s_logger = Logger.getLogger(HttpTemplateDownloader.class.getName()); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); - private static final int CHUNK_SIZE = 1024*1024; //1M - private String downloadUrl; - private String toFile; - public TemplateDownloader.Status status= TemplateDownloader.Status.NOT_STARTED; - public String errorString = " "; - private long remoteSize = 0; - public long downloadTime = 0; - public long totalBytes; - private final HttpClient client; - private GetMethod request; - private boolean resume = false; - private DownloadCompleteCallback completionCallback; - StorageLayer _storage; - boolean inited = true; + private static final int CHUNK_SIZE = 1024*1024; //1M + private String downloadUrl; + private String toFile; + public TemplateDownloader.Status status= TemplateDownloader.Status.NOT_STARTED; + public String errorString = " "; + private long remoteSize = 0; + public long downloadTime = 0; + public long totalBytes; + private final HttpClient client; + private GetMethod request; + private boolean resume = false; + private DownloadCompleteCallback completionCallback; + StorageLayer _storage; + boolean inited = true; - private String toDir; - private long MAX_TEMPLATE_SIZE_IN_BYTES; - private ResourceType resourceType = ResourceType.TEMPLATE; - private final HttpMethodRetryHandler myretryhandler; + private String toDir; + private long MAX_TEMPLATE_SIZE_IN_BYTES; + private ResourceType resourceType = ResourceType.TEMPLATE; + private final HttpMethodRetryHandler myretryhandler; - public HttpTemplateDownloader (StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSizeInBytes, String user, String password, Proxy proxy, ResourceType resourceType) { - this._storage = storageLayer; - this.downloadUrl = downloadUrl; - this.setToDir(toDir); - this.status = TemplateDownloader.Status.NOT_STARTED; - this.resourceType = resourceType; - this.MAX_TEMPLATE_SIZE_IN_BYTES = maxTemplateSizeInBytes; + public HttpTemplateDownloader (StorageLayer storageLayer, String downloadUrl, String toDir, DownloadCompleteCallback callback, long maxTemplateSizeInBytes, String user, String password, Proxy proxy, ResourceType resourceType) { + _storage = storageLayer; + this.downloadUrl = downloadUrl; + setToDir(toDir); + status = TemplateDownloader.Status.NOT_STARTED; + this.resourceType = resourceType; + MAX_TEMPLATE_SIZE_IN_BYTES = maxTemplateSizeInBytes; - this.totalBytes = 0; - this.client = new HttpClient(s_httpClientManager); + totalBytes = 0; + client = new HttpClient(s_httpClientManager); - myretryhandler = new HttpMethodRetryHandler() { - public boolean retryMethod( - final HttpMethod method, - final IOException exception, - int executionCount) { - if (executionCount >= 2) { - // Do not retry if over max retry count - return false; - } - if (exception instanceof NoHttpResponseException) { - // Retry if the server dropped connection on us - return true; - } - if (!method.isRequestSent()) { - // Retry if the request has not been sent fully or - // if it's OK to retry methods that have been sent - return true; - } - // otherwise do not retry - return false; - } - }; + myretryhandler = new HttpMethodRetryHandler() { + @Override + public boolean retryMethod( + final HttpMethod method, + final IOException exception, + int executionCount) { + if (executionCount >= 2) { + // Do not retry if over max retry count + return false; + } + if (exception instanceof NoHttpResponseException) { + // Retry if the server dropped connection on us + return true; + } + if (!method.isRequestSent()) { + // Retry if the request has not been sent fully or + // if it's OK to retry methods that have been sent + return true; + } + // otherwise do not retry + return false; + } + }; - try { - this.request = new GetMethod(downloadUrl); - this.request.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, myretryhandler); - this.completionCallback = callback; - //this.request.setFollowRedirects(false); + try { + request = new GetMethod(downloadUrl); + request.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, myretryhandler); + completionCallback = callback; + //this.request.setFollowRedirects(false); - File f = File.createTempFile("dnld", "tmp_", new File(toDir)); + File f = File.createTempFile("dnld", "tmp_", new File(toDir)); - if (_storage != null) { - _storage.setWorldReadableAndWriteable(f); - } + if (_storage != null) { + _storage.setWorldReadableAndWriteable(f); + } - toFile = f.getAbsolutePath(); - Pair hostAndPort = UriUtils.validateUrl(downloadUrl); + toFile = f.getAbsolutePath(); + Pair hostAndPort = UriUtils.validateUrl(downloadUrl); - if (proxy != null) { - client.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort()); - if (proxy.getUserName() != null) { - Credentials proxyCreds = new UsernamePasswordCredentials(proxy.getUserName(), proxy.getPassword()); - client.getState().setProxyCredentials(AuthScope.ANY, proxyCreds); - } - } - if ((user != null) && (password != null)) { - client.getParams().setAuthenticationPreemptive(true); - Credentials defaultcreds = new UsernamePasswordCredentials(user, password); - client.getState().setCredentials(new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds); - s_logger.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first() + ":" + hostAndPort.second()); - } else { - s_logger.info("No credentials configured for host=" + hostAndPort.first() + ":" + hostAndPort.second()); - } - } catch (IllegalArgumentException iae) { - errorString = iae.getMessage(); - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - inited = false; - } catch (Exception ex){ - errorString = "Unable to start download -- check url? "; - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - s_logger.warn("Exception in constructor -- " + ex.toString()); - } catch (Throwable th) { - s_logger.warn("throwable caught ", th); - } - } + if (proxy != null) { + client.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort()); + if (proxy.getUserName() != null) { + Credentials proxyCreds = new UsernamePasswordCredentials(proxy.getUserName(), proxy.getPassword()); + client.getState().setProxyCredentials(AuthScope.ANY, proxyCreds); + } + } + if ((user != null) && (password != null)) { + client.getParams().setAuthenticationPreemptive(true); + Credentials defaultcreds = new UsernamePasswordCredentials(user, password); + client.getState().setCredentials(new AuthScope(hostAndPort.first(), hostAndPort.second(), AuthScope.ANY_REALM), defaultcreds); + s_logger.info("Added username=" + user + ", password=" + password + "for host " + hostAndPort.first() + ":" + hostAndPort.second()); + } else { + s_logger.info("No credentials configured for host=" + hostAndPort.first() + ":" + hostAndPort.second()); + } + } catch (IllegalArgumentException iae) { + errorString = iae.getMessage(); + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + inited = false; + } catch (Exception ex){ + errorString = "Unable to start download -- check url? "; + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + s_logger.warn("Exception in constructor -- " + ex.toString()); + } catch (Throwable th) { + s_logger.warn("throwable caught ", th); + } + } - @Override - public long download(boolean resume, DownloadCompleteCallback callback) { - switch (status) { - case ABORTED: - case UNRECOVERABLE_ERROR: - case DOWNLOAD_FINISHED: - return 0; - default: + @Override + public long download(boolean resume, DownloadCompleteCallback callback) { + switch (status) { + case ABORTED: + case UNRECOVERABLE_ERROR: + case DOWNLOAD_FINISHED: + return 0; + default: - } + } int bytes=0; - File file = new File(toFile); - try { + File file = new File(toFile); + try { - long localFileSize = 0; - if (file.exists() && resume) { - localFileSize = file.length(); - s_logger.info("Resuming download to file (current size)=" + localFileSize); - } + long localFileSize = 0; + if (file.exists() && resume) { + localFileSize = file.length(); + s_logger.info("Resuming download to file (current size)=" + localFileSize); + } Date start = new Date(); - int responseCode=0; + int responseCode=0; - if (localFileSize > 0 ) { - // require partial content support for resume - request.addRequestHeader("Range", "bytes=" + localFileSize + "-"); - if (client.executeMethod(request) != HttpStatus.SC_PARTIAL_CONTENT) { - errorString = "HTTP Server does not support partial get"; - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - return 0; - } - } else if ((responseCode = client.executeMethod(request)) != HttpStatus.SC_OK) { - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - errorString = " HTTP Server returned " + responseCode + " (expected 200 OK) "; + if (localFileSize > 0 ) { + // require partial content support for resume + request.addRequestHeader("Range", "bytes=" + localFileSize + "-"); + if (client.executeMethod(request) != HttpStatus.SC_PARTIAL_CONTENT) { + errorString = "HTTP Server does not support partial get"; + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + return 0; + } + } else if ((responseCode = client.executeMethod(request)) != HttpStatus.SC_OK) { + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + errorString = " HTTP Server returned " + responseCode + " (expected 200 OK) "; return 0; //FIXME: retry? } @@ -202,16 +199,16 @@ public class HttpTemplateDownloader implements TemplateDownloader { boolean chunked = false; long remoteSize2 = 0; if (contentLengthHeader == null) { - Header chunkedHeader = request.getResponseHeader("Transfer-Encoding"); - if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) { - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - errorString=" Failed to receive length of download "; - return 0; //FIXME: what status do we put here? Do we retry? - } else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())){ - chunked = true; - } + Header chunkedHeader = request.getResponseHeader("Transfer-Encoding"); + if (chunkedHeader == null || !"chunked".equalsIgnoreCase(chunkedHeader.getValue())) { + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + errorString=" Failed to receive length of download "; + return 0; //FIXME: what status do we put here? Do we retry? + } else if ("chunked".equalsIgnoreCase(chunkedHeader.getValue())){ + chunked = true; + } } else { - remoteSize2 = Long.parseLong(contentLengthHeader.getValue()); + remoteSize2 = Long.parseLong(contentLengthHeader.getValue()); if ( remoteSize2 == 0 ) { status = TemplateDownloader.Status.DOWNLOAD_FINISHED; String downloaded = "(download complete remote=" + remoteSize + "bytes)"; @@ -222,22 +219,22 @@ public class HttpTemplateDownloader implements TemplateDownloader { } if (remoteSize == 0) { - remoteSize = remoteSize2; + remoteSize = remoteSize2; } if (remoteSize > MAX_TEMPLATE_SIZE_IN_BYTES) { - s_logger.info("Remote size is too large: " + remoteSize + " , max=" + MAX_TEMPLATE_SIZE_IN_BYTES); - status = Status.UNRECOVERABLE_ERROR; - errorString = "Download file size is too large"; - return 0; + s_logger.info("Remote size is too large: " + remoteSize + " , max=" + MAX_TEMPLATE_SIZE_IN_BYTES); + status = Status.UNRECOVERABLE_ERROR; + errorString = "Download file size is too large"; + return 0; } if (remoteSize == 0) { - remoteSize = MAX_TEMPLATE_SIZE_IN_BYTES; + remoteSize = MAX_TEMPLATE_SIZE_IN_BYTES; } - InputStream in = !chunked?new BufferedInputStream(request.getResponseBodyAsStream()) - : new ChunkedInputStream(request.getResponseBodyAsStream()); + InputStream in = !chunked ? new BufferedInputStream(request.getResponseBodyAsStream()) : new ChunkedInputStream( + request.getResponseBodyAsStream()); RandomAccessFile out = new RandomAccessFile(file, "rwd"); out.seek(localFileSize); @@ -249,187 +246,193 @@ public class HttpTemplateDownloader implements TemplateDownloader { boolean done=false; status = TemplateDownloader.Status.IN_PROGRESS; while (!done && status != Status.ABORTED && offset <= remoteSize) { - if ( (bytes = in.read(block, 0, CHUNK_SIZE)) > -1) { - out.write(block, 0, bytes); - offset +=bytes; - out.seek(offset); - totalBytes += bytes; - } else { - done = true; - } + if ( (bytes = in.read(block, 0, CHUNK_SIZE)) > -1) { + out.write(block, 0, bytes); + offset +=bytes; + out.seek(offset); + totalBytes += bytes; + } else { + done = true; + } } Date finish = new Date(); String downloaded = "(incomplete download)"; if (totalBytes >= remoteSize) { - status = TemplateDownloader.Status.DOWNLOAD_FINISHED; - downloaded = "(download complete remote=" + remoteSize + "bytes)"; + status = TemplateDownloader.Status.DOWNLOAD_FINISHED; + downloaded = "(download complete remote=" + remoteSize + "bytes)"; } errorString = "Downloaded " + totalBytes + " bytes " + downloaded; downloadTime += finish.getTime() - start.getTime(); + in.close(); out.close(); return totalBytes; - }catch (HttpException hte) { - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - errorString = hte.getMessage(); - } catch (IOException ioe) { - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; //probably a file write error? - errorString = ioe.getMessage(); - } finally { - if (status == Status.UNRECOVERABLE_ERROR && file.exists() && !file.isDirectory()) { - file.delete(); - } - request.releaseConnection(); - if (callback != null) { - callback.downloadComplete(status); + }catch (HttpException hte) { + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + errorString = hte.getMessage(); + } catch (IOException ioe) { + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; //probably a file write error? + errorString = ioe.getMessage(); + } finally { + if (status == Status.UNRECOVERABLE_ERROR && file.exists() && !file.isDirectory()) { + file.delete(); } - } - return 0; - } + request.releaseConnection(); + if (callback != null) { + callback.downloadComplete(status); + } + } + return 0; + } - public String getDownloadUrl() { - return downloadUrl; - } + public String getDownloadUrl() { + return downloadUrl; + } - public String getToFile() { + public String getToFile() { File file = new File(toFile); - return file.getAbsolutePath(); - } + return file.getAbsolutePath(); + } - public TemplateDownloader.Status getStatus() { - return status; - } + @Override + public TemplateDownloader.Status getStatus() { + return status; + } - public long getDownloadTime() { - return downloadTime; - } + @Override + public long getDownloadTime() { + return downloadTime; + } - public long getDownloadedBytes() { - return totalBytes; - } + @Override + public long getDownloadedBytes() { + return totalBytes; + } - @Override - @SuppressWarnings("fallthrough") - public boolean stopDownload() { - switch (getStatus()) { - case IN_PROGRESS: - if (request != null) { - request.abort(); - } - status = TemplateDownloader.Status.ABORTED; - return true; - case UNKNOWN: - case NOT_STARTED: - case RECOVERABLE_ERROR: - case UNRECOVERABLE_ERROR: - case ABORTED: - status = TemplateDownloader.Status.ABORTED; - case DOWNLOAD_FINISHED: - File f = new File(toFile); - if (f.exists()) { - f.delete(); - } - return true; + @Override + @SuppressWarnings("fallthrough") + public boolean stopDownload() { + switch (getStatus()) { + case IN_PROGRESS: + if (request != null) { + request.abort(); + } + status = TemplateDownloader.Status.ABORTED; + return true; + case UNKNOWN: + case NOT_STARTED: + case RECOVERABLE_ERROR: + case UNRECOVERABLE_ERROR: + case ABORTED: + status = TemplateDownloader.Status.ABORTED; + case DOWNLOAD_FINISHED: + File f = new File(toFile); + if (f.exists()) { + f.delete(); + } + return true; - default: - return true; - } - } + default: + return true; + } + } - @Override - public int getDownloadPercent() { - if (remoteSize == 0) { - return 0; - } + @Override + public int getDownloadPercent() { + if (remoteSize == 0) { + return 0; + } - return (int)(100.0*totalBytes/remoteSize); - } + return (int)(100.0*totalBytes/remoteSize); + } - @Override - public void run() { - try { - download(resume, completionCallback); - } catch (Throwable t) { - s_logger.warn("Caught exception during download "+ t.getMessage(), t); - errorString = "Failed to install: " + t.getMessage(); - status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; - } + @Override + protected void runInContext() { + try { + download(resume, completionCallback); + } catch (Throwable t) { + s_logger.warn("Caught exception during download "+ t.getMessage(), t); + errorString = "Failed to install: " + t.getMessage(); + status = TemplateDownloader.Status.UNRECOVERABLE_ERROR; + } - } + } - @Override - public void setStatus(TemplateDownloader.Status status) { - this.status = status; - } + @Override + public void setStatus(TemplateDownloader.Status status) { + this.status = status; + } - public boolean isResume() { - return resume; - } + public boolean isResume() { + return resume; + } - @Override - public String getDownloadError() { - return errorString; - } + @Override + public String getDownloadError() { + return errorString; + } - @Override - public String getDownloadLocalPath() { - return getToFile(); - } + @Override + public String getDownloadLocalPath() { + return getToFile(); + } - public void setResume(boolean resume) { - this.resume = resume; - } + @Override + public void setResume(boolean resume) { + this.resume = resume; + } - public void setToDir(String toDir) { - this.toDir = toDir; - } + public void setToDir(String toDir) { + this.toDir = toDir; + } - public String getToDir() { - return toDir; - } + public String getToDir() { + return toDir; + } - public long getMaxTemplateSizeInBytes() { - return this.MAX_TEMPLATE_SIZE_IN_BYTES; - } + @Override + public long getMaxTemplateSizeInBytes() { + return MAX_TEMPLATE_SIZE_IN_BYTES; + } - public static void main(String[] args) { - String url ="http:// dev.mysql.com/get/Downloads/MySQL-5.0/mysql-noinstall-5.0.77-win32.zip/from/http://mirror.services.wisc.edu/mysql/"; - try { - URI uri = new java.net.URI(url); - } catch (URISyntaxException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - TemplateDownloader td = new HttpTemplateDownloader(null, url,"/tmp/mysql", null, TemplateDownloader.DEFAULT_MAX_TEMPLATE_SIZE_IN_BYTES, null, null, null, null); - long bytes = td.download(true, null); - if (bytes > 0) { - System.out.println("Downloaded (" + bytes + " bytes)" + " in " + td.getDownloadTime()/1000 + " secs"); - } else { - System.out.println("Failed download"); - } + public static void main(String[] args) { + String url ="http:// dev.mysql.com/get/Downloads/MySQL-5.0/mysql-noinstall-5.0.77-win32.zip/from/http://mirror.services.wisc.edu/mysql/"; + try { + new java.net.URI(url); + } catch (URISyntaxException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + TemplateDownloader td = new HttpTemplateDownloader(null, url,"/tmp/mysql", null, TemplateDownloader.DEFAULT_MAX_TEMPLATE_SIZE_IN_BYTES, null, null, null, null); + long bytes = td.download(true, null); + if (bytes > 0) { + System.out.println("Downloaded (" + bytes + " bytes)" + " in " + td.getDownloadTime()/1000 + " secs"); + } else { + System.out.println("Failed download"); + } - } + } - @Override - public void setDownloadError(String error) { - errorString = error; - } + @Override + public void setDownloadError(String error) { + errorString = error; + } - @Override - public boolean isInited() { - return inited; - } + @Override + public boolean isInited() { + return inited; + } - public ResourceType getResourceType() { - return resourceType; - } + public ResourceType getResourceType() { + return resourceType; + } } diff --git a/core/src/com/cloud/storage/template/LocalTemplateDownloader.java b/core/src/com/cloud/storage/template/LocalTemplateDownloader.java index c8927a117d3..581524bb2f1 100644 --- a/core/src/com/cloud/storage/template/LocalTemplateDownloader.java +++ b/core/src/com/cloud/storage/template/LocalTemplateDownloader.java @@ -34,7 +34,7 @@ import com.cloud.storage.StorageLayer; public class LocalTemplateDownloader extends TemplateDownloaderBase implements TemplateDownloader { public static final Logger s_logger = Logger.getLogger(LocalTemplateDownloader.class); - + public LocalTemplateDownloader(StorageLayer storageLayer, String downloadUrl, String toDir, long maxTemplateSizeInBytes, DownloadCompleteCallback callback) { super(storageLayer, downloadUrl, toDir, maxTemplateSizeInBytes, callback); String filename = downloadUrl.substring(downloadUrl.lastIndexOf(File.separator)); @@ -44,14 +44,14 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T @Override public long download(boolean resume, DownloadCompleteCallback callback) { if (_status == Status.ABORTED || - _status == Status.UNRECOVERABLE_ERROR || - _status == Status.DOWNLOAD_FINISHED) { + _status == Status.UNRECOVERABLE_ERROR || + _status == Status.DOWNLOAD_FINISHED) { return 0; } _start = System.currentTimeMillis(); _resume = resume; - + File src; try { src = new File(new URI(_downloadUrl)); @@ -61,18 +61,20 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T return 0; } File dst = new File(_toFile); - + FileChannel fic = null; FileChannel foc = null; - + FileInputStream fis = null; + FileOutputStream fos = null; + try { - if (_storage != null) { - dst.createNewFile(); - _storage.setWorldReadableAndWriteable(dst); - } - + if (_storage != null) { + dst.createNewFile(); + _storage.setWorldReadableAndWriteable(dst); + } + ByteBuffer buffer = ByteBuffer.allocate(1024 * 512); - FileInputStream fis; + try { fis = new FileInputStream(src); } catch (FileNotFoundException e) { @@ -81,7 +83,6 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T return -1; } fic = fis.getChannel(); - FileOutputStream fos; try { fos = new FileOutputStream(dst); } catch (FileNotFoundException e) { @@ -89,11 +90,11 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T return -1; } foc = fos.getChannel(); - + _remoteSize = src.length(); - this._totalBytes = 0; + _totalBytes = 0; _status = TemplateDownloader.Status.IN_PROGRESS; - + try { while (_status != Status.ABORTED && fic.read(buffer) != -1) { buffer.flip(); @@ -104,13 +105,13 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T } catch (IOException e) { s_logger.warn("Unable to download", e); } - + String downloaded = "(incomplete download)"; if (_totalBytes == _remoteSize) { _status = TemplateDownloader.Status.DOWNLOAD_FINISHED; downloaded = "(download complete)"; } - + _errorString = "Downloaded " + _remoteSize + " bytes " + downloaded; _downloadTime += System.currentTimeMillis() - _start; return _totalBytes; @@ -125,14 +126,28 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T } catch (IOException e) { } } - + if (foc != null) { try { foc.close(); } catch (IOException e) { } } - + + if (fis != null) { + try { + fis.close(); + } catch (IOException e) { + } + } + + if (fos != null) { + try { + fos.close(); + } catch (IOException e) { + } + } + if (_status == Status.UNRECOVERABLE_ERROR && dst.exists()) { dst.delete(); } @@ -141,7 +156,7 @@ public class LocalTemplateDownloader extends TemplateDownloaderBase implements T } } } - + public static void main(String[] args) { String url ="file:///home/ahuang/Download/E3921_P5N7A-VM_manual.zip"; TemplateDownloader td = new LocalTemplateDownloader(null, url,"/tmp/mysql", TemplateDownloader.DEFAULT_MAX_TEMPLATE_SIZE_IN_BYTES, null); diff --git a/core/src/com/cloud/storage/template/RawImageProcessor.java b/core/src/com/cloud/storage/template/RawImageProcessor.java index 0e4c8c1822a..f516d75cfa5 100644 --- a/core/src/com/cloud/storage/template/RawImageProcessor.java +++ b/core/src/com/cloud/storage/template/RawImageProcessor.java @@ -25,9 +25,8 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.exception.InternalErrorException; -import com.cloud.storage.StorageLayer; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.template.Processor.FormatInfo; +import com.cloud.storage.StorageLayer; import com.cloud.utils.component.AdapterBase; @Local(value=Processor.class) diff --git a/core/src/com/cloud/storage/template/S3TemplateDownloader.java b/core/src/com/cloud/storage/template/S3TemplateDownloader.java index 340e0dba868..9dacbd31282 100644 --- a/core/src/com/cloud/storage/template/S3TemplateDownloader.java +++ b/core/src/com/cloud/storage/template/S3TemplateDownloader.java @@ -24,7 +24,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.Date; -import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.commons.httpclient.ChunkedInputStream; import org.apache.commons.httpclient.Credentials; import org.apache.commons.httpclient.Header; @@ -43,15 +42,15 @@ import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import com.amazonaws.AmazonClientException; -import com.amazonaws.auth.AWSCredentials; -import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.ProgressEvent; import com.amazonaws.services.s3.model.ProgressListener; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.StorageClass; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.Upload; + +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; + import com.cloud.agent.api.storage.Proxy; import com.cloud.agent.api.to.S3TO; import com.cloud.utils.Pair; @@ -62,7 +61,7 @@ import com.cloud.utils.UriUtils; * Download a template file using HTTP * */ -public class S3TemplateDownloader implements TemplateDownloader { +public class S3TemplateDownloader extends ManagedContextRunnable implements TemplateDownloader { public static final Logger s_logger = Logger.getLogger(S3TemplateDownloader.class.getName()); private static final MultiThreadedHttpConnectionManager s_httpClientManager = new MultiThreadedHttpConnectionManager(); @@ -89,15 +88,15 @@ public class S3TemplateDownloader implements TemplateDownloader { public S3TemplateDownloader(S3TO storageLayer, String downloadUrl, String installPath, DownloadCompleteCallback callback, long maxTemplateSizeInBytes, String user, String password, Proxy proxy, ResourceType resourceType) { - this.s3 = storageLayer; + s3 = storageLayer; this.downloadUrl = downloadUrl; this.installPath = installPath; - this.status = TemplateDownloader.Status.NOT_STARTED; + status = TemplateDownloader.Status.NOT_STARTED; this.resourceType = resourceType; - this.maxTemplateSizeInByte = maxTemplateSizeInBytes; + maxTemplateSizeInByte = maxTemplateSizeInBytes; - this.totalBytes = 0; - this.client = new HttpClient(s_httpClientManager); + totalBytes = 0; + client = new HttpClient(s_httpClientManager); myretryhandler = new HttpMethodRetryHandler() { @Override @@ -121,12 +120,12 @@ public class S3TemplateDownloader implements TemplateDownloader { }; try { - this.request = new GetMethod(downloadUrl); - this.request.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, myretryhandler); - this.completionCallback = callback; + request = new GetMethod(downloadUrl); + request.getParams().setParameter(HttpMethodParams.RETRY_HANDLER, myretryhandler); + completionCallback = callback; Pair hostAndPort = UriUtils.validateUrl(downloadUrl); - this.fileName = StringUtils.substringAfterLast(downloadUrl, "/"); + fileName = StringUtils.substringAfterLast(downloadUrl, "/"); if (proxy != null) { client.getHostConfiguration().setProxy(proxy.getHost(), proxy.getPort()); @@ -226,9 +225,6 @@ public class S3TemplateDownloader implements TemplateDownloader { // compute s3 key s3Key = join(asList(installPath, fileName), S3Utils.SEPARATOR); - // multi-part upload using S3 api to handle > 5G input stream - TransferManager tm = new TransferManager(S3Utils.acquireClient(s3)); - // download using S3 API ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(remoteSize); @@ -261,11 +257,20 @@ public class S3TemplateDownloader implements TemplateDownloader { } }); - // TransferManager processes all transfers asynchronously, - // so this call will return immediately. - Upload upload = tm.upload(putObjectRequest); + - upload.waitForCompletion(); + if ( !s3.getSingleUpload(remoteSize) ){ + // use TransferManager to do multipart upload + S3Utils.mputObject(s3, putObjectRequest); + } else{ + // single part upload, with 5GB limit in Amazon + S3Utils.putObject(s3, putObjectRequest); + while (status != TemplateDownloader.Status.DOWNLOAD_FINISHED && + status != TemplateDownloader.Status.UNRECOVERABLE_ERROR && + status != TemplateDownloader.Status.ABORTED) { + // wait for completion + } + } // finished or aborted Date finish = new Date(); @@ -361,7 +366,7 @@ public class S3TemplateDownloader implements TemplateDownloader { } @Override - public void run() { + protected void runInContext() { try { download(resume, completionCallback); } catch (Throwable t) { @@ -388,7 +393,7 @@ public class S3TemplateDownloader implements TemplateDownloader { @Override public String getDownloadLocalPath() { - return this.s3Key; + return s3Key; } @Override @@ -398,7 +403,7 @@ public class S3TemplateDownloader implements TemplateDownloader { @Override public long getMaxTemplateSizeInBytes() { - return this.maxTemplateSizeInByte; + return maxTemplateSizeInByte; } @Override diff --git a/core/src/com/cloud/storage/template/ScpTemplateDownloader.java b/core/src/com/cloud/storage/template/ScpTemplateDownloader.java index 724392f812f..fbc756f16b1 100644 --- a/core/src/com/cloud/storage/template/ScpTemplateDownloader.java +++ b/core/src/com/cloud/storage/template/ScpTemplateDownloader.java @@ -22,9 +22,10 @@ import java.net.URISyntaxException; import org.apache.log4j.Logger; +import com.trilead.ssh2.SCPClient; + import com.cloud.storage.StorageLayer; import com.cloud.utils.exception.CloudRuntimeException; -import com.trilead.ssh2.SCPClient; public class ScpTemplateDownloader extends TemplateDownloaderBase implements TemplateDownloader { private static final Logger s_logger = Logger.getLogger(ScpTemplateDownloader.class); @@ -83,7 +84,6 @@ public class ScpTemplateDownloader extends TemplateDownloaderBase implements Tem if (port == -1) { port = 22; } - long length = 0; File file = new File(_toFile); com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(uri.getHost(), port); diff --git a/core/src/com/cloud/storage/template/TemplateDownloaderBase.java b/core/src/com/cloud/storage/template/TemplateDownloaderBase.java index bdbdd457be1..7cbd4efe02d 100644 --- a/core/src/com/cloud/storage/template/TemplateDownloaderBase.java +++ b/core/src/com/cloud/storage/template/TemplateDownloaderBase.java @@ -18,11 +18,12 @@ package com.cloud.storage.template; import java.io.File; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.storage.StorageLayer; -public abstract class TemplateDownloaderBase implements TemplateDownloader { +public abstract class TemplateDownloaderBase extends ManagedContextRunnable implements TemplateDownloader { private static final Logger s_logger = Logger.getLogger(TemplateDownloaderBase.class); protected String _downloadUrl; @@ -123,7 +124,7 @@ public abstract class TemplateDownloaderBase implements TemplateDownloader { } @Override - public void run() { + protected void runInContext() { try { download(_resume, _callback); } catch (Exception e) { diff --git a/core/src/com/cloud/storage/template/TemplateUploader.java b/core/src/com/cloud/storage/template/TemplateUploader.java index 8e0373a5d15..32e877144e6 100755 --- a/core/src/com/cloud/storage/template/TemplateUploader.java +++ b/core/src/com/cloud/storage/template/TemplateUploader.java @@ -16,9 +16,6 @@ // under the License. package com.cloud.storage.template; -import com.cloud.storage.template.TemplateUploader.UploadCompleteCallback; -import com.cloud.storage.template.TemplateUploader.Status; - public interface TemplateUploader extends Runnable{ /** diff --git a/core/src/org/apache/cloudstack/storage/command/AttachCommand.java b/core/src/org/apache/cloudstack/storage/command/AttachCommand.java index 44bce910d02..7e47ba4e317 100644 --- a/core/src/org/apache/cloudstack/storage/command/AttachCommand.java +++ b/core/src/org/apache/cloudstack/storage/command/AttachCommand.java @@ -24,14 +24,6 @@ import com.cloud.agent.api.to.DiskTO; public final class AttachCommand extends Command implements StorageSubSystemCommand { private DiskTO disk; private String vmName; - private String _storageHost; - private int _storagePort; - private boolean _managed; - private String _iScsiName; - private String _chapInitiatorUsername; - private String _chapInitiatorPassword; - private String _chapTargetUsername; - private String _chapTargetPassword; public AttachCommand(DiskTO disk, String vmName) { super(); @@ -59,68 +51,4 @@ public final class AttachCommand extends Command implements StorageSubSystemComm public void setVmName(String vmName) { this.vmName = vmName; } - - public void setStorageHost(String storageHost) { - _storageHost = storageHost; - } - - public String getStorageHost() { - return _storageHost; - } - - public void setStoragePort(int storagePort) { - _storagePort = storagePort; - } - - public int getStoragePort() { - return _storagePort; - } - - public void setManaged(boolean managed) { - _managed = managed; - } - - public boolean isManaged() { - return _managed; - } - - public void set_iScsiName(String iScsiName) { - this._iScsiName = iScsiName; - } - - public String get_iScsiName() { - return _iScsiName; - } - - public void setChapInitiatorUsername(String chapInitiatorUsername) { - _chapInitiatorUsername = chapInitiatorUsername; - } - - public String getChapInitiatorUsername() { - return _chapInitiatorUsername; - } - - public void setChapInitiatorPassword(String chapInitiatorPassword) { - _chapInitiatorPassword = chapInitiatorPassword; - } - - public String getChapInitiatorPassword() { - return _chapInitiatorPassword; - } - - public void setChapTargetUsername(String chapTargetUsername) { - _chapTargetUsername = chapTargetUsername; - } - - public String getChapTargetUsername() { - return _chapTargetUsername; - } - - public void setChapTargetPassword(String chapTargetPassword) { - _chapTargetPassword = chapTargetPassword; - } - - public String getChapTargetPassword() { - return _chapTargetPassword; - } } diff --git a/core/src/org/apache/cloudstack/storage/command/CopyCommand.java b/core/src/org/apache/cloudstack/storage/command/CopyCommand.java index 629fafe545f..e9ec0b35f11 100644 --- a/core/src/org/apache/cloudstack/storage/command/CopyCommand.java +++ b/core/src/org/apache/cloudstack/storage/command/CopyCommand.java @@ -63,4 +63,8 @@ public final class CopyCommand extends Command implements StorageSubSystemComman this.cacheTO = cacheTO; } + public int getWaitInMillSeconds() { + return this.getWait() * 1000; + } + } diff --git a/core/src/org/apache/cloudstack/storage/command/DownloadCommand.java b/core/src/org/apache/cloudstack/storage/command/DownloadCommand.java index 84dd59db9f6..9cc3e497c19 100644 --- a/core/src/org/apache/cloudstack/storage/command/DownloadCommand.java +++ b/core/src/org/apache/cloudstack/storage/command/DownloadCommand.java @@ -26,7 +26,6 @@ import com.cloud.agent.api.storage.Proxy; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.storage.Storage.ImageFormat; -import com.cloud.storage.Volume; public class DownloadCommand extends AbstractDownloadCommand implements InternalIdentity { @@ -53,29 +52,29 @@ public class DownloadCommand extends AbstractDownloadCommand implements Internal public DownloadCommand(DownloadCommand that) { super(that); - this.hvm = that.hvm; - this.checksum = that.checksum; - this.id = that.id; - this.description = that.description; - this.auth = that.getAuth(); - this.setSecUrl(that.getSecUrl()); - this.maxDownloadSizeInBytes = that.getMaxDownloadSizeInBytes(); - this.resourceType = that.resourceType; - this.installPath = that.installPath; - this._store = that._store; + hvm = that.hvm; + checksum = that.checksum; + id = that.id; + description = that.description; + auth = that.getAuth(); + setSecUrl(that.getSecUrl()); + maxDownloadSizeInBytes = that.getMaxDownloadSizeInBytes(); + resourceType = that.resourceType; + installPath = that.installPath; + _store = that._store; } public DownloadCommand(TemplateObjectTO template, Long maxDownloadSizeInBytes) { super(template.getName(), template.getOrigUrl(), template.getFormat(), template.getAccountId()); - this._store = template.getDataStore(); - this.installPath = template.getPath(); - this.hvm = template.isRequiresHvm(); - this.checksum = template.getChecksum(); - this.id = template.getId(); - this.description = template.getDescription(); + _store = template.getDataStore(); + installPath = template.getPath(); + hvm = template.isRequiresHvm(); + checksum = template.getChecksum(); + id = template.getId(); + description = template.getDescription(); if (_store instanceof NfsTO) { - this.setSecUrl(((NfsTO) _store).getUrl()); + setSecUrl(((NfsTO) _store).getUrl()); } this.maxDownloadSizeInBytes = maxDownloadSizeInBytes; } @@ -87,12 +86,12 @@ public class DownloadCommand extends AbstractDownloadCommand implements Internal public DownloadCommand(VolumeObjectTO volume, Long maxDownloadSizeInBytes, String checkSum, String url, ImageFormat format) { super(volume.getName(), url, format, volume.getAccountId()); - this.checksum = checkSum; - this.id = volume.getVolumeId(); - this.installPath = volume.getPath(); - this._store = volume.getDataStore(); + checksum = checkSum; + id = volume.getVolumeId(); + installPath = volume.getPath(); + _store = volume.getDataStore(); this.maxDownloadSizeInBytes = maxDownloadSizeInBytes; - this.resourceType = ResourceType.VOLUME; + resourceType = ResourceType.VOLUME; } @Override public long getId() { @@ -184,6 +183,6 @@ public class DownloadCommand extends AbstractDownloadCommand implements Internal } public DataStoreTO getCacheStore() { - return this.cacheStore; + return cacheStore; } } diff --git a/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java b/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java new file mode 100644 index 00000000000..58fb7802019 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/ForgetObjectCmd.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public class ForgetObjectCmd extends Command implements StorageSubSystemCommand { + private DataTO dataTO; + public ForgetObjectCmd(DataTO data) { + this.dataTO = data; + } + + public DataTO getDataTO() { + return this.dataTO; + } + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java new file mode 100644 index 00000000000..03c74b8aaa0 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectAnswer.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataTO; + +public class IntroduceObjectAnswer extends Answer { + private DataTO dataTO; + public IntroduceObjectAnswer(DataTO dataTO) { + this.dataTO = dataTO; + } + + public DataTO getDataTO() { + return this.dataTO; + } +} diff --git a/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java new file mode 100644 index 00000000000..1aabed2d279 --- /dev/null +++ b/core/src/org/apache/cloudstack/storage/command/IntroduceObjectCmd.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.command; + +import com.cloud.agent.api.Command; +import com.cloud.agent.api.to.DataTO; + +public class IntroduceObjectCmd extends Command implements StorageSubSystemCommand { + private DataTO dataTO; + public IntroduceObjectCmd(DataTO dataTO) { + this.dataTO = dataTO; + } + + public DataTO getDataTO() { + return this.dataTO; + } + + @Override + public boolean executeInSequence() { + return false; + } +} diff --git a/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java b/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java index 0037ea57242..ec6c24092d3 100644 --- a/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/ImageStoreTO.java @@ -26,6 +26,7 @@ public class ImageStoreTO implements DataStoreTO { private String uri; private String providerName; private DataStoreRole role; + private String uuid; public ImageStoreTO() { @@ -76,4 +77,13 @@ public class ImageStoreTO implements DataStoreTO { return new StringBuilder("ImageStoreTO[type=").append(type).append("|provider=").append(providerName) .append("|role=").append(role).append("|uri=").append(uri).append("]").toString(); } + + @Override + public String getUuid() { + return uuid; + } + + public void setUuid(String uuid) { + this.uuid = uuid; + } } diff --git a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java index 5e870df3716..91d78a49350 100644 --- a/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java +++ b/core/src/org/apache/cloudstack/storage/to/PrimaryDataStoreTO.java @@ -46,6 +46,7 @@ public class PrimaryDataStoreTO implements DataStoreTO { return this.id; } + @Override public String getUuid() { return this.uuid; } diff --git a/core/src/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/org/apache/cloudstack/storage/to/VolumeObjectTO.java index 5685fad59c4..46659a3a2d0 100644 --- a/core/src/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -38,6 +38,8 @@ public class VolumeObjectTO implements DataTO { private String chainInfo; private Storage.ImageFormat format; private long id; + + private Long deviceId; private Long bytesReadRate; private Long bytesWriteRate; private Long iopsReadRate; @@ -70,6 +72,7 @@ public class VolumeObjectTO implements DataTO { this.iopsReadRate = volume.getIopsReadRate(); this.iopsWriteRate = volume.getIopsWriteRate(); this.hypervisorType = volume.getHypervisorType(); + setDeviceId(volume.getDeviceId()); } public String getUuid() { @@ -220,4 +223,13 @@ public class VolumeObjectTO implements DataTO { return iopsWriteRate; } + public Long getDeviceId() { + return deviceId; + } + + public void setDeviceId(Long deviceId) { + this.deviceId = deviceId; + } + + } diff --git a/core/test/com/cloud/network/HAProxyConfiguratorTest.java b/core/test/com/cloud/network/HAProxyConfiguratorTest.java new file mode 100644 index 00000000000..d854231f985 --- /dev/null +++ b/core/test/com/cloud/network/HAProxyConfiguratorTest.java @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network; + +import static org.junit.Assert.*; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.Mockito; + +import com.cloud.agent.api.routing.LoadBalancerConfigCommand; +import com.cloud.agent.api.to.LoadBalancerTO; + +/** + * @author dhoogland + * + */ +public class HAProxyConfiguratorTest { + + /** + * @throws java.lang.Exception + */ + @BeforeClass + public static void setUpBeforeClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @AfterClass + public static void tearDownAfterClass() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @Before + public void setUp() throws Exception { + } + + /** + * @throws java.lang.Exception + */ + @After + public void tearDown() throws Exception { + } + + /** + * Test method for {@link com.cloud.network.HAProxyConfigurator#generateConfiguration(com.cloud.agent.api.routing.LoadBalancerConfigCommand)}. + */ + @Test + public void testGenerateConfigurationLoadBalancerConfigCommand() { + LoadBalancerTO lb = new LoadBalancerTO("1", "10.2.0.1", 80, "http", "bla", false, false, false, null); + LoadBalancerTO[] lba = new LoadBalancerTO[1]; + lba[0] = lb; + HAProxyConfigurator hpg = new HAProxyConfigurator(); + LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "12", false); + String result = genConfig(hpg, cmd); + assertTrue("keepalive disabled should result in 'mode http' in the resulting haproxy config", result.contains("mode http")); + + cmd = new LoadBalancerConfigCommand(lba, "10.0.0.1", "10.1.0.1", "10.1.1.1", null, 1L, "4", true); + result = genConfig(hpg, cmd); + assertTrue("keepalive enabled should not result in 'mode http' in the resulting haproxy config",! result.contains("mode http")); + // TODO + // create lb command + // setup tests for + // maxconn (test for maxpipes as well) + // httpmode + } + + private String genConfig(HAProxyConfigurator hpg, LoadBalancerConfigCommand cmd) { + String [] sa = hpg.generateConfiguration(cmd); + StringBuilder sb = new StringBuilder(); + for(String s: sa) { + sb.append(s).append('\n'); + } + return sb.toString(); + } + +} diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java index 0b2bb1f4f3f..5262d3b78a6 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeAnswerTest.java @@ -27,7 +27,7 @@ import com.cloud.storage.Storage.StoragePoolType; public class AttachVolumeAnswerTest { AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname", - StoragePoolType.Filesystem, "vPath", "vName", + StoragePoolType.Filesystem, "vPath", "vName", 1073741824L, 123456789L, "chainInfo"); AttachVolumeAnswer ava1 = new AttachVolumeAnswer(avc); String results = ""; diff --git a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java index 6f413c0268d..1c5caca5f5c 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/AttachVolumeCommandTest.java @@ -26,7 +26,7 @@ import com.cloud.storage.Storage.StoragePoolType; public class AttachVolumeCommandTest { AttachVolumeCommand avc = new AttachVolumeCommand(true, false, "vmname", - StoragePoolType.Filesystem, "vPath", "vName", + StoragePoolType.Filesystem, "vPath", "vName", 1073741824L, 123456789L, "chainInfo"); @Test diff --git a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java index 0fee8c64d87..a7a1fd2a3c7 100644 --- a/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java +++ b/core/test/org/apache/cloudstack/api/agent/test/BackupSnapshotCommandTest.java @@ -27,7 +27,6 @@ import java.util.Date; import org.junit.Test; import com.cloud.agent.api.BackupSnapshotCommand; -import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.SwiftTO; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePool; diff --git a/debian/changelog b/debian/changelog index dc9c65d2066..d6af31f69dc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -cloudstack (4.3.0) unstable; urgency=low +cloudstack (4.3.0-snapshot) unstable; urgency=low * Update the version to 4.3.0.snapshot diff --git a/debian/cloudstack-agent.install b/debian/cloudstack-agent.install index a3cc86964dd..d708514fd14 100644 --- a/debian/cloudstack-agent.install +++ b/debian/cloudstack-agent.install @@ -21,6 +21,7 @@ /etc/init.d/cloudstack-agent /usr/bin/cloudstack-setup-agent /usr/bin/cloudstack-ssh +/usr/bin/cloudstack-agent-upgrade /var/log/cloudstack/agent /usr/share/cloudstack-agent/lib/* /usr/share/cloudstack-agent/plugins diff --git a/debian/cloudstack-agent.postinst b/debian/cloudstack-agent.postinst index 499ae6a695a..9bad1380bf0 100644 --- a/debian/cloudstack-agent.postinst +++ b/debian/cloudstack-agent.postinst @@ -34,7 +34,15 @@ case "$1" in fi done fi + + # Running cloudstack-agent-upgrade to update bridge name for upgrade from CloudStack 4.0.x (and before) to CloudStack 4.1 (and later) + /usr/bin/cloudstack-agent-upgrade + if [ ! -d "/etc/libvirt/hooks" ] ; then + mkdir /etc/libvirt/hooks + fi + cp -a /usr/share/cloudstack-agent/lib/libvirtqemuhook /etc/libvirt/hooks/qemu + /etc/init.d/libvirt-bin restart ;; esac -exit 0 \ No newline at end of file +exit 0 diff --git a/debian/cloudstack-management.install b/debian/cloudstack-management.install index a1325cdb2b5..f06ab86dda1 100644 --- a/debian/cloudstack-management.install +++ b/debian/cloudstack-management.install @@ -21,8 +21,6 @@ /etc/cloudstack/management/logging.properties /etc/cloudstack/management/commands.properties /etc/cloudstack/management/ehcache.xml -/etc/cloudstack/management/componentContext.xml -/etc/cloudstack/management/applicationContext.xml /etc/cloudstack/management/server-ssl.xml /etc/cloudstack/management/server-nonssl.xml /etc/cloudstack/management/server.xml @@ -33,7 +31,6 @@ /etc/cloudstack/management/tomcat6.conf /etc/cloudstack/management/web.xml /etc/cloudstack/management/environment.properties -/etc/cloudstack/management/nonossComponentContext.xml /etc/cloudstack/management/log4j-cloud.xml /etc/cloudstack/management/tomcat-users.xml /etc/cloudstack/management/context.xml diff --git a/debian/control b/debian/control index e6d1ef088f2..c756dcd0d8e 100644 --- a/debian/control +++ b/debian/control @@ -22,7 +22,7 @@ Description: CloudStack server library Package: cloudstack-agent Architecture: all -Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), libcommons-daemon-java, libjna-java, openssh-client, libvirt0, sysvinit-utils, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, perl-base, perl-modules, ebtables, vlan, wget, jsvc, ipset, python-libvirt +Depends: openjdk-6-jre | openjdk-7-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), libcommons-daemon-java, openssh-client, libvirt0, sysvinit-utils, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, perl-base, perl-modules, ebtables, vlan, wget, jsvc, ipset, python-libvirt, ethtool, iptables Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts Description: CloudStack agent The CloudStack agent is in charge of managing shared computing resources in diff --git a/debian/rules b/debian/rules index 5e3d58c4da3..4edf8930605 100755 --- a/debian/rules +++ b/debian/rules @@ -12,6 +12,7 @@ DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p') VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//') +MVNADD := $(shell if echo '$(DEBVERS)' | grep -q snapshot; then echo -SNAPSHOT; fi ) PACKAGE = $(shell dh_listpackages|head -n 1|cut -d '-' -f 1) SYSCONFDIR = "/etc" DESTDIR = "debian/tmp" @@ -65,12 +66,14 @@ install: mkdir $(DESTDIR)/var/log/$(PACKAGE)/agent mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent mkdir $(DESTDIR)/usr/share/$(PACKAGE)-agent/plugins - install -D agent/target/cloud-agent-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar - install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ + install -D agent/target/cloud-agent-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/$(PACKAGE)-agent.jar + install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D plugins/hypervisors/kvm/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D packaging/debian/init/cloud-agent $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-agent install -D agent/target/transformed/cloud-setup-agent $(DESTDIR)/usr/bin/cloudstack-setup-agent install -D agent/target/transformed/cloud-ssh $(DESTDIR)/usr/bin/cloudstack-ssh + install -D agent/target/transformed/cloudstack-agent-upgrade $(DESTDIR)/usr/bin/cloudstack-agent-upgrade + install -D agent/target/transformed/libvirtqemuhook $(DESTDIR)/usr/share/$(PACKAGE)-agent/lib/ install -D agent/target/transformed/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/agent # cloudstack-management @@ -90,7 +93,7 @@ install: mkdir $(DESTDIR)/var/lib/$(PACKAGE)/management mkdir $(DESTDIR)/var/lib/$(PACKAGE)/mnt cp -r client/target/utilities/scripts/db/* $(DESTDIR)/usr/share/$(PACKAGE)-management/setup/ - cp -r client/target/cloud-client-ui-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/ + cp -r client/target/cloud-client-ui-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/ cp server/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/server/ cp client/target/conf/* $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/ @@ -130,7 +133,7 @@ install: install -D client/target/utilities/bin/cloud-setup-management $(DESTDIR)/usr/bin/cloudstack-setup-management install -D client/target/utilities/bin/cloud-setup-encryption $(DESTDIR)/usr/bin/cloudstack-setup-encryption install -D client/target/utilities/bin/cloud-sysvmadm $(DESTDIR)/usr/bin/cloudstack-sysvmadm - install -D services/console-proxy/server/dist/systemvm.iso $(DESTDIR)/usr/share/$(PACKAGE)-common/vms/systemvm.iso + install -D systemvm/dist/systemvm.iso $(DESTDIR)/usr/share/$(PACKAGE)-common/vms/systemvm.iso # We need jasypt for cloud-install-sys-tmplt, so this is a nasty hack to get it into the right place install -D agent/target/dependencies/jasypt-1.9.0.jar $(DESTDIR)/usr/share/$(PACKAGE)-common/lib @@ -143,7 +146,7 @@ install: mkdir $(DESTDIR)/var/log/$(PACKAGE)/usage mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage mkdir $(DESTDIR)/usr/share/$(PACKAGE)-usage/plugins - install -D usage/target/cloud-usage-$(VERSION)-SNAPSHOT.jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar + install -D usage/target/cloud-usage-$(VERSION)$(MVNADD).jar $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/$(PACKAGE)-usage.jar install -D usage/target/dependencies/* $(DESTDIR)/usr/share/$(PACKAGE)-usage/lib/ cp usage/target/transformed/db.properties $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/ cp usage/target/transformed/log4j-cloud_usage.xml $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/usage/log4j-cloud.xml @@ -156,7 +159,7 @@ install: mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi mkdir $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup ln -s /usr/share/$(PACKAGE)-bridge/webapps/awsapi $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps7080/awsapi - cp -r awsapi/target/cloud-awsapi-$(VERSION)-SNAPSHOT/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi + cp -r awsapi/target/cloud-awsapi-$(VERSION)$(MVNADD)/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/webapps/awsapi install -D awsapi-setup/setup/cloud-setup-bridge $(DESTDIR)/usr/bin/cloudstack-setup-bridge install -D awsapi-setup/setup/cloudstack-aws-api-register $(DESTDIR)/usr/bin/cloudstack-aws-api-register cp -r awsapi-setup/db/mysql/* $(DESTDIR)/usr/share/$(PACKAGE)-bridge/setup diff --git a/deps/install-non-oss.sh b/deps/install-non-oss.sh index 0bf8e48d70c..940bd32ae59 100755 --- a/deps/install-non-oss.sh +++ b/deps/install-non-oss.sh @@ -16,7 +16,12 @@ # specific language governing permissions and limitations # under the License. +# From https://devcentral.f5.com +# Version: unknown mvn install:install-file -Dfile=cloud-iControl.jar -DgroupId=com.cloud.com.f5 -DartifactId=icontrol -Dversion=1.0 -Dpackaging=jar + +# From Citrix +# Version: unknown mvn install:install-file -Dfile=cloud-netscaler-sdx.jar -DgroupId=com.cloud.com.citrix -DartifactId=netscaler-sdx -Dversion=1.0 -Dpackaging=jar # From http://support.netapp.com/ (not available online, contact your support representative) diff --git a/developer/pom.xml b/developer/pom.xml index be14494b047..0eb18bf2d3f 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -74,7 +74,6 @@ maven-antrun-plugin - 1.7 generate-resources diff --git a/docs/README.txt b/docs/README.txt deleted file mode 100644 index e327fb9101c..00000000000 --- a/docs/README.txt +++ /dev/null @@ -1,325 +0,0 @@ -Author: Jessica Tomechak - -Updated: August 8, 2012 - - -------------------------------------------- - -WHAT'S IN THIS REPOSITORY: WORK IN PROGRESS - -------------------------------------------- - -This repository contains the source files for CloudStack documentation. The files are currently incomplete as we are in the process of converting documentation from an outdated file format into XML files for this repo. -The complete documentation can be seen at docs.cloudstack.org. - - - ----------------------------------- - -DOCUMENTATION SUBDIRECTORIES - ----------------------------------- - -United States English language source files are in the en-US subdirectory. -Additional language subdirectories can be added. - - -Each file in a language subdirectory contains one chunk of information that may be termed a section, module, or topic. The files are written in Docbook XML, using the Docbook version and tag supported by the Publican open-source documentation tool. - - - ----------------------------------- - -VALID XML TAGS - ----------------------------------- - -Certain tags are disallowed by Publican. Please consult their documentation for more details. -http://jfearn.fedorapeople.org/en-US/Publican/2.7/html/Users_Guide/ - -Your best bet is to copy an existing XML file and fill in your own content between the tags. - -At the bottom of this README, there is a fill-in-the-blanks XML template that you can go from. It shows the commonly used tags and explains a bit about how to use them. - - ----------------------------------- - -SECTIONS, CHAPTERS, AND BOOK FILES - ----------------------------------- - -The files for every topic and audience are in a single directory. The content is not divided into separate subdirectories for each book, or separate repositories for each book. Therefore, the content can be flexibly and easily re-used. In most cases, a file contains a single section that can be assembled with other sections to build any desired set of information. These files contain
...
tags. - - -Some of the XML files contain only a series of include tags to pull in content from other files. Such an "include file" is either a major section, a chapter in a book, or the master book file. A chapter contains ... tags. - - -The master book file contains ... tags. This file is referred to in the Publican configuration file, and is used as the controlling file when building the book. - - -Document names are derived from the docname setting in the appropriate .cfg file. -This should not have CloudStack in the name (which is redundant because of -the CloudStack brand that the documentation is built with. The docname variable -sets the name in the doc site table of contents. This name also needs to exist -as .xml and .ent in the en-US directory. Examples of appropriate docnames: -Admin_Guide -API_Developers_Guide -Installation_Guide - - - - -A Publican book file must also have certain other tags that are expected by -Publican when it builds the project. Copy an existing master book file to -get these tags. - - ----------------------------------- - -CONFIG FILES - ----------------------------------- - -For each book file, there must be a corresponding publican.cfg (or -.cfg) file in order to build the book with Publican. The -docname: attribute in the config file matches the name of the master book file; -for example, docname: cloudstack corresponds to the master book file -cloudstack.xml. - - -The .cfg files reside in the main directory, docs. To build a different book, -just use the Publican command line flag --config=.cfg. (We also -need per-book entities, Book_Info, Author_Info, and other Publican files. -The technique for pulling these in is TBD.) - - ----------------------------------- - -TO BUILD A BOOK - ----------------------------------- - -We will set up an automatic Publican job that generates new output whenever we -check in changes to this repository. You can also build a book locally as -follows. - - -First, install Publican, and get a local copy of the book source files. - - -Put the desired publican.cfg in the docs directory. Go to the command line, cd -to that directory, and run the publican build command. Specify what output -format(s) and what language(s) you want to build. Always start with a test -run. For example: - - -publican build --formats test --langs en-US - - -...followed by this command if the test is successful: - - -publican build --formats html,pdf --langs en-US - - -Output will be found in the tmp subdirectory of the docs directory. - - - ----------------------------------- - -LOCALIZATION - ----------------------------------- - -Localized versions of the documentation files can be stored in appropriately -named subdirectories parallel to en-US. The language code names to use for -these directories are listed in Publican documentation, -http://jfearn.fedorapeople.org/en-US/Publican/2.7/html/Users_Guide/appe-Users_Guide-Language_codes.html. -For example, Japanese XML files would be stored in the docs/ja-JP directory. - -Localization currently happens using Transifex and you can find the strings -to be translated at this location: -https://www.transifex.com/projects/p/ACS_DOCS/ - -In preparation for l10n, authors and docs folks must take not of a number of -things. -All .xml files must contain a translatable string. tags are not enough. -All new .xml files must have a corresponding entry in docs/.tx/config -Filenames should be less than 50 characters long. - -To generate new POT files and upload source do the following: -publican update_pot --config=./publican-all.cfg -tx push -s - -To receive translated files from publican, run the following command: -tx pull - - ----------------------------------- - -CONTRIBUTING - ----------------------------------- - -Contributors can create new section, chapter, book, publican.cfg, or localized -.xml files at any time. Submit them following the same patch approval procedure -that is used for contributing to CloudStack code. More information for -contributors is available at -https://cwiki.apache.org/confluence/display/CLOUDSTACK/Documentation+Team. - ----------------------------------- - -TAGS FOR A SECTION ----------------------------------- - - - -%BOOK_ENTITIES; -]> - - - - - - -
- Text of the section title - Here's the text of a paragraph in this section. - Always use &PRODUCT; rather than typing CloudStack. - Indent with 4 spaces, not with tab characters. - To hyperlink to a URL outside this document: Display text of the link here - To hyperlink to another section in this document: - The publication tools will automatically insert the display text of the link for you. - Use this for all tips and asides. Don't use other tags such as tip. - Our publication tool (publican) prefers the note tag. The tool will - automatically insert the text NOTE: for you, so please don't type it. - Use this for anything that is vital to avoid runtime errors. Don't use - other tags such as caution. Our publication tool (publican) prefers the warning tag. The tool will automatically insert the text WARNING: for you, so please don't type it. - Here's how to do a bulleted list: - - Bulleted list item text. - - Here's how to do a numbered list. These are used for step by step instructions - or to describe a sequence of events in time. For everything else, use a bulleted list. - - Text of the step - You might also want a sub-list within one of the list items. Like this: - - Inner list item text. - - - - Here's how to insert an image. Put the graphic file in images/, a subdirectory of the directory where this XML file is. - Refer to it using this tag. The tag is admittedly complex, but it's the one we need to use with publican: - - - - - YOUR_FILENAME_HERE.png: Alt text describing this image, such as - “structure of a zone.” Required for accessibility. - - A section can contain sub-sections. Please make each sub-section a separate file to enable reuse. - Then include the sub-section like this: - -
- - - ----------------------------------- - -TAGS FOR A CHAPTER ----------------------------------- - - - -%BOOK_ENTITIES; -]> - - - - - - - - Text of the chapter title - - - - - - ----------------------------------- - -TAGS FOR A BOOK ----------------------------------- - - - -%BOOK_ENTITIES; -]> - - - - - - - - - - - - ----------------------------------- - -BASIC RULES FOR INCLUDE STATEMENTS ----------------------------------- - -A book file must include chapter files. -A chapter file must include section files. -A section file can include other section files, but it doesn't have to. diff --git a/docs/en-US/Admin_Guide.ent b/docs/en-US/Admin_Guide.ent deleted file mode 100644 index abb18851bcf..00000000000 --- a/docs/en-US/Admin_Guide.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/Admin_Guide.xml b/docs/en-US/Admin_Guide.xml deleted file mode 100644 index d3b9706f84e..00000000000 --- a/docs/en-US/Admin_Guide.xml +++ /dev/null @@ -1,74 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - &PRODUCT; Administrator's Guide - Apache CloudStack - 4.2.0 - 1 - - - - Administration Guide for &PRODUCT;. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/Author_Group.xml b/docs/en-US/Author_Group.xml deleted file mode 100644 index ba9e651f876..00000000000 --- a/docs/en-US/Author_Group.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - Apache - CloudStack - - - diff --git a/docs/en-US/Book_Info.xml b/docs/en-US/Book_Info.xml deleted file mode 100644 index 327668dfc9d..00000000000 --- a/docs/en-US/Book_Info.xml +++ /dev/null @@ -1,47 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - &PRODUCT; Guide - Revised August 9, 2012 10:48 pm Pacific - Apache CloudStack - 4.2.0 - 1 - - - - Complete technical documentation of &PRODUCT;. - - - - - - - - - - - - diff --git a/docs/en-US/Book_Info_Release_Notes_4.xml b/docs/en-US/Book_Info_Release_Notes_4.xml deleted file mode 100644 index e1c270f3e14..00000000000 --- a/docs/en-US/Book_Info_Release_Notes_4.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Version 4.2.0 Release Notes - Apache &PRODUCT; - - - - Release notes for the Apache &PRODUCT; 4.2.0 release. - - - - - - - - - - - diff --git a/docs/en-US/CloudStack_GSoC_Guide.ent b/docs/en-US/CloudStack_GSoC_Guide.ent deleted file mode 100644 index 17415873334..00000000000 --- a/docs/en-US/CloudStack_GSoC_Guide.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/CloudStack_GSoC_Guide.xml b/docs/en-US/CloudStack_GSoC_Guide.xml deleted file mode 100644 index 2f537d40cef..00000000000 --- a/docs/en-US/CloudStack_GSoC_Guide.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - - - - &PRODUCT; Guide for the 2013 Google Summer of Code - Apache CloudStack - 4.3.0 - 1 - - - - Guide for 2013 Google Summer of Code Projects. - - - - - - - - - - - - - - - - diff --git a/docs/en-US/CloudStack_Nicira_NVP_Guide.ent b/docs/en-US/CloudStack_Nicira_NVP_Guide.ent deleted file mode 100644 index abb18851bcf..00000000000 --- a/docs/en-US/CloudStack_Nicira_NVP_Guide.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/CloudStack_Nicira_NVP_Guide.xml b/docs/en-US/CloudStack_Nicira_NVP_Guide.xml deleted file mode 100644 index 5431fc1cb43..00000000000 --- a/docs/en-US/CloudStack_Nicira_NVP_Guide.xml +++ /dev/null @@ -1,55 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - - - - &PRODUCT; Plugin Guide for the Nicira NVP Plugin - Apache CloudStack - 4.2.0 - 1 - - - - Plugin Guide for the Nicira NVP Plugin. - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/Common_Content/Legal_Notice.xml b/docs/en-US/Common_Content/Legal_Notice.xml deleted file mode 100644 index 2a2e3a7b3e7..00000000000 --- a/docs/en-US/Common_Content/Legal_Notice.xml +++ /dev/null @@ -1,15 +0,0 @@ - - - - - Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - - - http://www.apache.org/licenses/LICENSE-2.0 - - - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - - diff --git a/docs/en-US/Common_Content/feedback.xml b/docs/en-US/Common_Content/feedback.xml deleted file mode 100644 index 4b06c9f3898..00000000000 --- a/docs/en-US/Common_Content/feedback.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Feedback - to-do -
diff --git a/docs/en-US/Developers_Guide.ent b/docs/en-US/Developers_Guide.ent deleted file mode 100644 index 47a2b6757f8..00000000000 --- a/docs/en-US/Developers_Guide.ent +++ /dev/null @@ -1,21 +0,0 @@ - - - - - \ No newline at end of file diff --git a/docs/en-US/Developers_Guide.xml b/docs/en-US/Developers_Guide.xml deleted file mode 100644 index 7452e29ecf2..00000000000 --- a/docs/en-US/Developers_Guide.xml +++ /dev/null @@ -1,61 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - &PRODUCT; Developer's Guide - Apache CloudStack - 4.2.0 - - - - - This guide shows how to develop &PRODUCT;, use the API for operation and integration, access the usage data and use &PRODUCT; specific tools to ease development, testing and integration. - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/Installation_Guide.ent b/docs/en-US/Installation_Guide.ent deleted file mode 100644 index abb18851bcf..00000000000 --- a/docs/en-US/Installation_Guide.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/Installation_Guide.xml b/docs/en-US/Installation_Guide.xml deleted file mode 100644 index ea97f25c99c..00000000000 --- a/docs/en-US/Installation_Guide.xml +++ /dev/null @@ -1,62 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - &PRODUCT; Installation Guide - Apache CloudStack - 4.2.0 - 1 - - - Installation Guide for &PRODUCT;. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/LDAP-for-user-authentication.xml b/docs/en-US/LDAP-for-user-authentication.xml deleted file mode 100644 index 772d1c5e3e2..00000000000 --- a/docs/en-US/LDAP-for-user-authentication.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Using an LDAP Server for User Authentication - You can use an external LDAP server such as Microsoft Active Directory or OpenLDAP to authenticate &PRODUCT; end-users. - In order to do this you must: - - Set your LDAP configuration within &PRODUCT; - Create &PRODUCT; accounts for LDAP users - - To set up LDAP authentication in &PRODUCT;, open the global settings page and search for LDAP - Set ldap.basedn to match your sever's base directory. - Review the defaults for the following, ensure that they match your schema. - - ldap.email.attribute - ldap.firstname.attribute - ldap.lastname.attribute - ldap.username.attribute - ldap.user.object - - Optionally you can set the following: - - If you do not want to use anonymous binding you can set ldap.bind.principle and ldap.bind.password as credentials for your LDAP server that will grant &PRODUCT; permission to perform a search on the LDAP server. - For SSL support set ldap.truststore to a path on the file system where your trusted store is located. Along with this set ldap.truststore.password as the password that unlocks the truststore. - If you wish to filter down the user set that is granted access to &PRODUCT; via the LDAP attribute memberof you can do so using ldap.search.group.principle. - - Finally, you can add your LDAP server. To do so select LDAP Configuration from the views section within global settings. Click on "Configure LDAP" and fill in your server's hostname and port. - - -
diff --git a/docs/en-US/MidoNet_Plugin_Guide.ent b/docs/en-US/MidoNet_Plugin_Guide.ent deleted file mode 100644 index f31c40748c2..00000000000 --- a/docs/en-US/MidoNet_Plugin_Guide.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/MidoNet_Plugin_Guide.xml b/docs/en-US/MidoNet_Plugin_Guide.xml deleted file mode 100644 index 86182e60b71..00000000000 --- a/docs/en-US/MidoNet_Plugin_Guide.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - - - - &PRODUCT; Plugin Guide for the MidoNet Plugin - Apache CloudStack - 4.2.0 - 1 - - - - Plugin Guide for the MidoNet Plugin. - - - - - - - - - - - - - - - - diff --git a/docs/en-US/Preface.xml b/docs/en-US/Preface.xml deleted file mode 100644 index e046410234d..00000000000 --- a/docs/en-US/Preface.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Preface - - - - - diff --git a/docs/en-US/Release_Notes.ent b/docs/en-US/Release_Notes.ent deleted file mode 100644 index 7858ad5f2e0..00000000000 --- a/docs/en-US/Release_Notes.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/Release_Notes.xml b/docs/en-US/Release_Notes.xml deleted file mode 100644 index d1def441685..00000000000 --- a/docs/en-US/Release_Notes.xml +++ /dev/null @@ -1,4582 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Welcome to &PRODUCT; 4.2 - Welcome to the 4.2.0 release of &PRODUCT;, the second major release from the Apache - CloudStack project since its graduation from the Apache Incubator. &PRODUCT; 4.2 includes more - than 70 new features and enhancements. The focus of the release is on three major - areas: - - - Improved support for both legacy-style and cloud-style workloads - - - New third-party plug-in architecture - - - Networking enhancements - - - In addition to these major new areas of functionality, &PRODUCT; 4.2 provides many - additional enhancements in a variety of product areas. All of the new features are summarized - later in this Release Note. - This document contains information specific to this release of &PRODUCT;, including - upgrade instructions from prior releases, new features added to &PRODUCT;, API changes, and - issues fixed in the release. For installation instructions, please see the Installation Guide. For usage and administration instructions, please see the - &PRODUCT; Administrator's Guide. Developers and users who wish to work with the API - will find instruction in the &PRODUCT; API Developer's Guide - If you find any errors or problems in this guide, please see . - We hope you enjoy working with &PRODUCT;! - - - What's New in 4.2.0 - &PRODUCT; 4.2 includes the following new features. -
- Features to Support Heterogeneous Workloads - The following new features help &PRODUCT; 4.2 better support both legacy and cloud-era - style zones. -
- Regions - To increase reliability of the cloud, you can optionally group resources into - geographic regions. A region is the largest available organizational unit within a cloud - deployment. A region is made up of several availability zones, where each zone is - equivalent to a datacenter. Each region is controlled by its own cluster of Management - Servers, running in one of the zones. The zones in a region are typically located in close - geographical proximity. Regions are a useful technique for providing fault tolerance and - disaster recovery. - By grouping zones into regions, the cloud can achieve higher availability and - scalability. User accounts can span regions, so that users can deploy VMs in multiple, - widely-dispersed regions. Even if one of the regions becomes unavailable, the services are - still available to the end-user through VMs deployed in another region. And by grouping - communities of zones under their own nearby Management Servers, the latency of - communications within the cloud is reduced compared to managing widely-dispersed zones - from a single central Management Server. - Usage records can also be consolidated and tracked at the region level, creating - reports or invoices for each geographic region. -
-
- Object Storage Plugin Architecture - Artifacts such as templates, ISOs and snapshots are kept in storage which &PRODUCT; - refers to as secondary storage. To improve scalability and performance, as when a number - of hosts access secondary storage concurrently, object storage can be used for secondary - storage. Object storage can also provide built-in high availability capability. When using - object storage, access to secondary storage data can be made available across multiple - zones in a region. This is a huge benefit, as it is no longer necessary to copy templates, - snapshots etc. across zones as would be needed in an NFS-only environment. - Object storage is provided through third-party software such as Amazon Simple Storage - Service (S3) or any other object storage that supports the S3 interface. These third party - object storages can be integrated with &PRODUCT; by writing plugin software that uses the - object storage plugin capability introduced in &PRODUCT; 4.2. Several new pluggable - service interfaces are available so that different storage providers can develop - vendor-specific plugins based on the well-defined contracts that can be seamlessly managed - by &PRODUCT;. -
-
- Zone-Wide Primary Storage - (Supported on KVM and VMware) - In &PRODUCT; 4.2, you can provision primary storage on a per-zone basis. Data volumes - in the primary storage can be attached to any VM on any host in the zone. - In previous &PRODUCT; versions, each cluster had its own primary storage. Data in the - primary storage was directly available only to VMs within that cluster. If a VM in a - different cluster needed some of the data, it must be copied from one cluster to another, - using the zone's secondary storage as an intermediate step. This operation was - unnecessarily time-consuming. -
-
- VMware Datacenter Now Visible As a &PRODUCT; Zone - In order to support zone-wide functions for VMware, changes have been made so that - &PRODUCT; is now aware of VMware Datacenters and can map each Datacenter to a &PRODUCT; - zone. Previously, &PRODUCT; was only aware of VMware Clusters, a smaller organizational - unit than Datacenters. This implies that a single &PRODUCT; zone could possibly contain - clusters from different VMware Datacenters. In order for zone-wide functions, such as - zone-wide primary storage, to work for VMware hosts, &PRODUCT; has to make sure that a - zone contains only a single VMware Datacenter. Therefore, when you are creating a new - &PRODUCT; zone, you will now be able to select a VMware Datacenter for the zone. If you - are provisioning multiple VMware Datacenters, each one will be set up as a single zone in - &PRODUCT;. - - If you are upgrading from a previous &PRODUCT; version, and your existing deployment - contains a zone with clusters from multiple VMware Datacenters, that zone will not be - forcibly migrated to the new model. It will continue to function as before. However, any - new zone-wide operations, such as zone-wide primary storage, will not be available in - that zone. - - -
-
-
- Third-Party UI Plugin Framework - Using the new third-party plugin framework, you can write and install extensions to - &PRODUCT;. The installed and enabled plugins will appear in the UI. - The basic procedure for adding a UI plugin is explained in the Developer Guide. In - summary, the plugin developer creates the plugin code itself (in Javascript), a thumbnail - image, the plugin listing, and a CSS file. The &PRODUCT; administrator adds the folder - containing the plugin code under the &PRODUCT; PLUGINS folder and adds the plugin name to a - configuration file (plugins.js). - The next time the user refreshes the UI in the browser, the plugin will appear under the - Plugins button in the left navigation bar. -
-
- Networking Enhancements - The following new features provide additional networking functionality in &PRODUCT; - 4.2. -
- IPv6 - &PRODUCT; 4.2 introduces initial support for IPv6. This feature is provided as a - technical preview only. Full support is planned for a future release. -
-
- Portable IPs - Portable IPs in &PRODUCT; are elastic IPs that can be transferred across - geographically separated zones. As an administrator, you can provision a pool of portable - IPs at region level and are available for user consumption. The users can acquire portable - IPs if admin has provisioned portable public IPs at the region level they are part of. - These IPs can be used for any service within an advanced zone. You can also use portable - IPs for EIP service in Basic zones. Additionally, a portable IP can be transferred from - one network to another network. -
-
- N-Tier Applications - In &PRODUCT; 3.0.6, a functionality was added to allow users to create a multi-tier - application connected to a single instance of a Virtual Router that supports inter-VLAN - routing. Such a multi-tier application is called a virtual private cloud (VPC). Users were - also able to connect their multi-tier applications to a private Gateway or a Site-to-Site - VPN tunnel and route certain traffic to those gateways. For &PRODUCT; 4.2, additional - features are implemented to enhance VPC applications. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Support for KVMVPC is now supported on KVM - hypervisors.
-
- Load Balancing Support for VPC - In a VPC, you can configure two types of load balancing—external LB and - internal LB. External LB is nothing but a LB rule created to redirect the traffic - received at a public IP of the VPC virtual router. The traffic is load balanced within a - tier based on your configuration. Citrix NetScaler and VPC virtual router are supported - for external LB. When you use internal LB service, traffic received at a tier is load - balanced across different VMs within that tier. For example, traffic reached at Web tier - is redirected to another VM in that tier. External load balancing devices are not - supported for internal LB. The service is provided by a internal LB VM configured on the - target tier. -
- Load Balancing Within a Tier (External LB) - A &PRODUCT; user or administrator may create load balancing rules that balance - traffic received at a public IP to one or more VMs that belong to a network tier that - provides load balancing service in a VPC. A user creates a rule, specifies an - algorithm, and assigns the rule to a set of VMs within a tier. -
-
- Load Balancing Across Tiers - &PRODUCT; supports sharing workload across different tiers within your VPC. Assume - that multiple tiers are set up in your environment, such as Web tier and Application - tier. Traffic to each tier is balanced on the VPC virtual router on the public side. - If you want the traffic coming from the Web tier to the Application tier to be - balanced, use the internal load balancing feature offered by &PRODUCT;. -
-
- Netscaler Support for VPC - Citrix NetScaler is supported for external LB. Certified version for this feature - is NetScaler 10.0 Build 74.4006.e. -
-
-
- Enhanced Access Control List - Network Access Control List (ACL) on the VPC virtual router is enhanced. The network - ACLs can be created for the tiers only if the NetworkACL service is supported. In - &PRODUCT; terminology, Network ACL is a group of Network ACL items. Network ACL items - are nothing but numbered rules that are evaluated in order, starting with the lowest - numbered rule. These rules determine whether traffic is allowed in or out of any tier - associated with the network ACL. You need to add the Network ACL items to the Network - ACL, then associate the Network ACL with a tier. Network ACL is associated with a VPC - and can be assigned to multiple VPC tiers within a VPC. A Tier is associated with a - Network ACL at all the times. Each tier can be associated with only one ACL. - The default Network ACL is used when no ACL is associated. Default behavior is all - incoming traffic to guest networks is blocked and all outgoing traffic from guest - networks is allowed. Default network ACL cannot be removed or modified. -
- ACL on Private Gateway - The traffic on the VPC private gateway is controlled by creating both ingress and - egress network ACL rules. The ACLs contains both allow and deny rules. As per the - rule, all the ingress traffic to the private gateway interface and all the egress - traffic out from the private gateway interface are blocked. You can change this - default behaviour while creating a private gateway. -
-
- Allow ACL on All Level 4 Protocols - In addition to the existing protocol support for ICMP, TCP, UDP, support for All - Level 4 protocols is added. The protocol numbers from 0 to 255 are supported. -
-
- Support for ACL Deny Rules - In addition to the existing support for ACL Allow rules, support for ACL Deny - rules has been added in &PRODUCT; 4.2. As part of this, two operations are supported: - Number and Action. You can configure a rule, allow or deny, by using action. Use - Number to add a rule number. -
-
-
- Deploying VMs to a VPC Tier and Shared Networks - &PRODUCT; allows you to deploy VMs on a VPC tier and one or more shared networks. - With this feature, the VMs deployed in a multi-tier application can receive services - offered by a service provider over the shared network. One example of such a service is - monitoring service. -
-
- Adding a Private Gateway to a VPC - A private gateway can be added by the root admin only. The VPC private network has - 1:1 relationship with the NIC of the physical network. You can configure multiple - private gateways to a single VPC. No gateways with duplicated VLAN and IP are allowed in - the same data center. -
- Source NAT on Private Gateway - You might want to deploy multiple VPCs with the same super CIDR and guest tier - CIDR. Therefore, multiple guest VMs from different VPCs can have the same IPs to reach - a enterprise data center through the private gateway. In such cases, a NAT service - need to be configured on the private gateway. If Source NAT is enabled, the guest VMs - in VPC reaches the enterprise network via private gateway IP address by using the NAT - service. - The Source NAT service on a private gateway can be enabled while adding the - private gateway. On deletion of a private gateway, source NAT rules specific to the - private gateway are deleted. -
-
- VPN Gateways - Support up to 8 VPN Gateways is added. -
-
- Creating a Static Route - &PRODUCT; enables you to specify routing for the VPN connection you create. You - can enter one or CIDR addresses to indicate which traffic is to be routed back to the - gateway. -
-
- Blacklisting Routes - &PRODUCT; enables you to block a list of routes so that they are not assigned to - any of the VPC private gateways. Specify the list of routes that you want to blacklist - in the blacklisted.routes global parameter. Note that the parameter - update affects only new static route creations. If you block an existing static route, - it remains intact and continue functioning. You cannot add a static route if the route - is blacklisted for the zone. -
-
-
-
- Assigning VLANs to Isolated Networks - &PRODUCT; provides you the ability to control VLAN assignment to Isolated networks. - You can assign a VLAN ID when a network is created, just the way it's done for Shared - networks. - The former behaviour also is supported — VLAN is randomly allocated to a network - from the VNET range of the physical network when the network turns to Implemented state. - The VLAN is released back to the VNET pool when the network shuts down as a part of the - Network Garbage Collection. The VLAN can be re-used either by the same network when it is - implemented again, or by any other network. On each subsequent implementation of a - network, a new VLAN can be assigned. - - You cannot change a VLAN once it's assigned to the network. The VLAN remains with - the network for its entire life cycle. - -
-
- Persistent Networks - &PRODUCT; 4.2 supports Persistent Networks. The network that you can provision without - having to deploy any VMs on it is called a Persistent Network. A Persistent Network can be - part of a VPC or a non-VPC environment. With the addition of this feature, you will have - the ability to create a network in &PRODUCT; in which physical devices can be deployed - without having to run any VMs. Additionally, you can deploy physical devices on that - network. Another advantages is that you can create a VPC with a tier that consists only - physical devices. For example, you might create a VPC for a three-tier application, deploy - VMs for Web and Application tier, and use physical machines for the Database tier. Another - use case is that if you are providing services by using physical hardware, you can define - the network as persistent and therefore even if all its VMs are destroyed the services - will not be discontinued. -
-
- Cisco VNMC Support - Cisco Virtual Network Management Center (VNMC) provides centralized multi-device and - policy management for Cisco Network Virtual Services. When Cisco VNMC is integrated with - ASA 1000v Cloud Firewall and Cisco Nexus 1000v dvSwitch in &PRODUCT; you will be able to: - - - Configure Cisco ASA 1000v Firewalls - - - Create and apply security profiles that contain ACL policy sets for both ingress - and egress traffic, and NAT policy sets - - - &PRODUCT; supports Cisco VNMC on Cisco Nexus 1000v dvSwich-enabled VMware - hypervisors. -
-
- VMware vNetwork Distributed vSwitch - &PRODUCT; supports VMware vSphere Distributed Switch (VDS) for virtual network - configuration in a VMware vSphere environment. Each vCenter server instance can support up - to 128 VDSs and each VDS can manage up to 500 VMware hosts. &PRODUCT; supports configuring - virtual networks in a deployment with a mix of Virtual Distributed Switch, Standard - Virtual Switch and Nexus 1000v Virtual Switch. -
-
- IP Reservation in Isolated Guest Networks - In Isolated guest networks in &PRODUCT; 4.2, a part of the guest IP address space can - be reserved for non-&PRODUCT; VMs or physical servers. To do so, you configure a range of - Reserved IP addresses by specifying the CIDR when a guest network is in Implemented state. - The advantage of having this feature is that if your customers wish to have non-&PRODUCT; - controlled VMs or physical servers on the same network, they can use a part of the IP - address space that is primarily provided to the guest network. When IP reservation is - configured, the administrator can add additional VMs or physical servers that are not part - of &PRODUCT; to the same network and assign them the Reserved IP addresses. &PRODUCT; - guest VMs cannot acquire IPs from the Reserved IP Range. -
-
- Dedicated Resources: Public IP Addresses and VLANs Per Account - &PRODUCT; provides you the ability to reserve a set of public IP addresses and VLANs - exclusively for an account. During zone creation, you can continue to define a set of - VLANs and multiple public IP ranges. This feature extends the functionality to enable you - to dedicate a fixed set of VLANs and guest IP addresses for a tenant. - This feature provides you the following capabilities: - - - Reserve a VLAN range and public IP address range from an Advanced zone and assign - it to an account - - - Disassociate a VLAN and public IP address range from an account - - - - Ensure that you check whether the required range is available and conforms to - account limits. The maximum IPs per account limit cannot be superseded. - -
-
- Enhanced Juniper SRX Support for Egress Firewall Rules - Egress firewall rules were previously supported on virtual routers, and now they are - also supported on Juniper SRX external networking devices. - Egress traffic originates from a private network to a public network, such as the - Internet. By default, the egress traffic is blocked, so no outgoing traffic is allowed - from a guest network to the Internet. However, you can control the egress traffic in an - Advanced zone by creating egress firewall rules. When an egress firewall rule is applied, - the traffic specific to the rule is allowed and the remaining traffic is blocked. When all - the firewall rules are removed the default policy, Block, is applied. - - Egress firewall rules are not supported on Shared networks. They are supported only - on Isolated guest networks. - -
-
- Configuring the Default Egress Policy - The default egress policy for Isolated guest network can be configured by using - Network offering. Use the create network offering option to determine whether the default - policy should be block or allow all the traffic to the public network from a guest - network. Use this network offering to create the network. If no policy is specified, by - default all the traffic is allowed from the guest network that you create by using this - network offering. - You have two options: Allow and Deny. - If you select Allow for a network offering, by default egress traffic is allowed. - However, when an egress rule is configured for a guest network, rules are applied to block - the specified traffic and rest are allowed. If no egress rules are configured for the - network, egress traffic is accepted. If you select Deny for a network offering, by default - egress traffic for the guest network is blocked. However, when an egress rules is - configured for a guest network, rules are applied to allow the specified traffic. While - implementing a guest network, &PRODUCT; adds the firewall egress rule specific to the - default egress policy for the guest network. - This feature is supported only on virtual router and Juniper SRX. -
-
- Non-Contiguous VLAN Ranges - &PRODUCT; provides you with the flexibility to add non contiguous VLAN ranges to your - network. The administrator can either update an existing VLAN range or add multiple non - contiguous VLAN ranges while creating a zone. You can also use the UpdatephysicalNetwork - API to extend the VLAN range. -
-
- Isolation in Advanced Zone Using Private VLAN - Isolation of guest traffic in shared networks can be achieved by using Private VLANs - (PVLAN). PVLANs provide Layer 2 isolation between ports within the same VLAN. In a - PVLAN-enabled shared network, a user VM cannot reach other user VM though they can reach - the DHCP server and gateway, this would in turn allow users to control traffic within a - network and help them deploy multiple applications without communication between - application as well as prevent communication with other users’ VMs. - - - Isolate VMs in a shared networks by using Private VLANs. - - - Supported on KVM, XenServer, and VMware hypervisors. - - - PVLAN-enabled shared network can be a part of multiple networks of a guest VM. - - - - For further reading: - - - Understanding Private VLANs - - - Cisco Systems' Private VLANs: - Scalable Security in a Multi-Client Environment - - - Private VLAN (PVLAN) on vNetwork Distributed - Switch - Concept Overview (1010691) - - -
-
- Configuring Multiple IP Addresses on a Single NIC - (Supported on XenServer, KVM, and VMware hypervisors) - &PRODUCT; now provides you the ability to associate multiple private IP addresses per - guest VM NIC. This feature is supported on all the network configurations—Basic, - Advanced, and VPC. Security Groups, Static NAT and Port forwarding services are supported - on these additional IPs. In addition to the primary IP, you can assign additional IPs to - the guest VM NIC. Up to 256 IP addresses are allowed per NIC. - As always, you can specify an IP from the guest subnet; if not specified, an IP is - automatically picked up from the guest VM subnet. You can view the IPs associated with for - each guest VM NICs on the UI. You can apply NAT on these additional guest IPs by using - firewall configuration in the &PRODUCT; UI. You must specify the NIC to which the IP - should be associated. -
-
- Adding Multiple IP Ranges - (Supported on KVM, xenServer, and VMware hypervisors) - &PRODUCT; 4.2 provides you with the flexibility to add guest IP ranges from different - subnets in Basic zones and security groups-enabled Advanced zones. For security - groups-enabled Advanced zones, it implies multiple subnets can be added to the same VLAN. - With the addition of this feature, you will be able to add IP address ranges from the same - subnet or from a different one when IP address are exhausted. This would in turn allows - you to employ higher number of subnets and thus reduce the address management - overhead. - Ensure that you manually configure the gateway of the new subnet before adding the IP - range. Note that &PRODUCT; supports only one gateway for a subnet; overlapping subnets are - not currently supported. - You can also delete IP ranges. This operation fails if an IP from the remove range is - in use. If the remove range contains the IP address on which the DHCP server is running, - &PRODUCT; acquires a new IP from the same subnet. If no IP is available in the subnet, the - remove operation fails. - - The feature can only be implemented on IPv4 addresses. - -
-
- Support for Multiple Networks in VMs - (Supported on XenServer, VMware and KVM hypervisors) - &PRODUCT; 4.2 provides you the ability to add and remove multiple networks to a VM. - You can remove a network from a VM and add a new network. You can also change the default - network of a VM. With this functionality, hybrid or traditional server loads can be - accommodated with ease. - For adding or removing a NIC to work on VMware, ensure that vm-tools are running on - guest VMs. -
-
- Global Server Load Balancing - &PRODUCT; 4.2 supports Global Server Load Balancing (GSLB) functionalities to provide - business continuity by load balancing traffic to an instance on active zones only in case - of zone failures . &PRODUCT; achieve this by extending its functionality of integrating - with NetScaler Application Delivery Controller (ADC), which also provides various GSLB - capabilities, such as disaster recovery and load balancing. The DNS redirection technique - is used to achieve GSLB in &PRODUCT;. In order to support this functionality, region level - services and service provider are introduced. A new service 'GSLB' is introduced as a - region level service. The GSLB service provider is introduced that will provider the GSLB - service. Currently, NetScaler is the supported GSLB provider in &PRODUCT;. GSLB - functionality works in an Active-Active data center environment. -
-
- Enhanced Load Balancing Services Using External Provider on Shared VLANs - Network services like Firewall, Load Balancing, and NAT are now supported in shared - networks created in an advanced zone. In effect, the following network services shall be - made available to a VM in a shared network: Source NAT, Static NAT, Port Forwarding, - Firewall and Load balancing. Subset of these service can be chosen while creating a - network offering for shared networks. Services available in a shared network is defined by - the network offering and the service chosen in the network offering. For example, if - network offering for a shared network has source NAT service enabled, a public IP shall be - provisioned and source NAT is configured on the firewall device to provide public access - to the VMs on the shared network. Static NAT, Port Forwarding, Load Balancing, and - Firewall services shall be available only on the acquired public IPs associated with a - shared network. - Additionally, Netscaler and Juniper SRX firewall device can be configured inline or - side-by-side mode. -
-
- Health Checks for Load Balanced Instances - - This feature is supported only on NetScaler version 10.0 and beyond. - - (NetScaler load balancer only) A load balancer rule distributes requests among a pool - of services (a service in this context means an application running on a virtual machine). - When creating a load balancer rule, you can specify a health check which will ensure that - the rule forwards requests only to services that are healthy (running and available). When - a health check is in effect, the load balancer will stop forwarding requests to any - resources that it has found to be unhealthy. If the resource later becomes available - again, the periodic health check (periodicity is configurable) will discover it and the - resource will once again be made available to the load balancer. - To configure how often the health check is performed by default, use the global - configuration setting healthcheck.update.interval. This default applies to all the health - check policies in the cloud. You can override this value for an individual health check - policy. -
-
-
- Host and Virtual Machine Enhancements - The following new features expand the ways you can use hosts and virtual - machines. -
- VMware DRS Support - The VMware vSphere Distributed Resources Scheduler (DRS) is supported. -
-
- Windows 8 and Windows Server 2012 as VM Guest OS - (Supported on XenServer, VMware, and KVM) - Windows 8 and Windows Server 2012 can now be used as OS types on guest virtual - machines. The OS would be made available the same as any other, by uploading an ISO or a - template. The instructions for uploading ISOs and templates are given in the - Administrator's Guide. - - Limitation: When used with VMware hosts, this - feature works only for the following versions: vSphere ESXi 5.1 and ESXi 5.0 Patch - 4. - - -
-
- Change Account Ownership of Virtual Machines - A root administrator can now change the ownership of any virtual machine from one - account to any other account. A domain or sub-domain administrator can do the same for VMs - within the domain from one account to any other account in the domain. -
-
- Private Pod, Cluster, or Host - Dedicating pod, cluster or host to a specific domain/account means that the - domain/account will have sole access to the dedicated pod, cluster or hosts such that - scalability, security and manageability within a domain/account can be improved. The - resources which belong to that tenant will be placed into that dedicated pod, cluster or - host. -
-
- Resizing Volumes - &PRODUCT; provides the ability to resize data disks; &PRODUCT; controls volume size by - using disk offerings. This provides &PRODUCT; administrators with the flexibility to - choose how much space they want to make available to the end users. Volumes within the - disk offerings with the same storage tag can be resized. For example, if you only want to - offer 10, 50, and 100 GB offerings, the allowed resize should stay within those limits. - That implies if you define a 10 GB, a 50 GB and a 100 GB disk offerings, a user can - upgrade from 10 GB to 50 GB, or 50 GB to 100 GB. If you create a custom-sized disk - offering, then you have the option to resize the volume by specifying a new, larger size. - Additionally, using the resizeVolume API, a data volume can be moved from a static disk - offering to a custom disk offering with the size specified. This functionality allows - those who might be billing by certain volume sizes or disk offerings to stick to that - model, while providing the flexibility to migrate to whatever custom size necessary. This - feature is supported on KVM, XenServer, and VMware hosts. However, shrinking volumes is - not supported on VMware hosts -
-
- VMware Volume Snapshot Improved Performance - When you take a snapshot of a data volume on VMware, &PRODUCT; will now use a more - efficient storage technique to improve performance. - Previously, every snapshot was immediately exported from vCenter to a mounted NFS - share and packaged into an OVA file format. This operation consumed time and resources. - Starting from 4.2, the original file formats (e.g., VMDK) provided by vCenter will be - retained. An OVA file will only be created as needed, on demand. - The new process applies only to newly created snapshots after upgrade to &PRODUCT; - 4.2. Snapshots that have already been taken and stored in OVA format will continue to - exist in that format, and will continue to work as expected. -
-
- Storage Migration: XenMotion and vMotion - (Supported on XenServer and VMware) - Storage migration allows VMs to be moved from one host to another, where the VMs are - not located on storage shared between the two hosts. It provides the option to live - migrate a VM’s disks along with the VM itself. It is now possible to migrate a VM from one - XenServer resource pool / VMware cluster to another, or to migrate a VM whose disks are on - local storage, or even to migrate a VM’s disks from one storage repository to another, all - while the VM is running. -
-
- Configuring Usage of Linked Clones on VMware - (For ESX hypervisor in conjunction with vCenter) - In &PRODUCT; 4.2, the creation of VMs as full clones is allowed. In previous versions, - only linked clones were possible. - For a full description of clone types, refer to VMware documentation. In summary: A - full clone is a copy of an existing virtual machine which, once created, does not depend - in any way on the original virtual machine. A linked clone is also a copy of an existing - virtual machine, but it has ongoing dependency on the original. A linked clone shares the - virtual disk of the original VM, and retains access to all files that were present at the - time the clone was created. - A new global configuration setting has been added, vmware.create.full.clone. When the - administrator sets this to true, end users can create guest VMs only as full clones. The - default value is true for new installations. For customers upgrading from a previous - version of &PRODUCT;, the default value of vmware.create.full.clone is false. -
-
- VM Deployment Rules - Rules can be set up to ensure that particular VMs are not placed on the same physical - host. These "anti-affinity rules" can increase the reliability of applications by ensuring - that the failure of a single host can not take down the entire group of VMs supporting a - given application. See Affinity Groups in the &PRODUCT; 4.2 Administration Guide. -
-
- CPU and Memory Scaling for Running VMs - (Supported on VMware and XenServer) - You can now change the CPU and RAM values for a running virtual machine. In previous - versions of &PRODUCT;, this could only be done on a stopped VM. - It is not always possible to accurately predict the CPU and RAM requirements when you - first deploy a VM. You might need to increase or decrease these resources at any time - during the life of a VM. With the new ability to dynamically modify CPU and RAM levels, - you can change these resources for a running VM without incurring any downtime. - Dynamic CPU and RAM scaling can be used in the following cases: - - - New VMs that are created after the installation of &PRODUCT; 4.2. If you are - upgrading from a previous version of &PRODUCT;, your existing VMs created with - previous versions will not have the dynamic scaling capability. - - - User VMs on hosts running VMware and XenServer. - - - System VMs on VMware. - - - VM Tools or XenServer Tools must be installed on the virtual machine. - - - The new requested CPU and RAM values must be within the constraints allowed by the - hypervisor and the VM operating system. - - - To configure this feature, use the following new global configuration - variables: - - - enable.dynamic.scale.vm: Set to True to enable the feature. By default, the - feature is turned off. - - - scale.retry: How many times to attempt the scaling operation. Default = 2. - - -
-
- CPU and Memory Over-Provisioning - (Supported for XenServer, KVM, and VMware) - In &PRODUCT; 4.2, CPU and memory (RAM) over-provisioning factors can be set for each - cluster to change the number of VMs that can run on each host in the cluster. This helps - optimize the use of resources. By increasing the over-provisioning ratio, more resource - capacity will be used. If the ratio is set to 1, no over-provisioning is done. - In previous releases, &PRODUCT; did not perform memory over-provisioning. It performed - CPU over-provisioning based on a ratio configured by the administrator in the global - configuration setting cpu.overprovisioning.factor. Starting in 4.2, the administrator can - specify a memory over-provisioning ratio, and can specify both CPU and memory - over-provisioning ratios on a per-cluster basis, rather than only on a global - basis. - In any given cloud, the optimum number of VMs for each host is affected by such things - as the hypervisor, storage, and hardware configuration. These may be different for each - cluster in the same cloud. A single global over-provisioning setting could not provide the - best utilization for all the different clusters in the cloud. It had to be set for the - lowest common denominator. The new per-cluster setting provides a finer granularity for - better utilization of resources, no matter where the &PRODUCT; placement algorithm decides - to place a VM. -
-
- Kickstart Installation for Bare Metal Provisioning - &PRODUCT; 4.2 supports the kick start installation method for RPM-based Linux - operating systems on baremetal hosts in basic zones. Users can provision a baremetal host - managed by &PRODUCT; as long as they have the kick start file and corresponding OS - installation ISO ready. - Tested on CentOS 5.5, CentOS 6.2, CentOS 6.3, Ubuntu 12.04. - For more information, see the Baremetal Installation Guide. -
-
- Enhanced Bare Metal Support on Cisco UCS - You can now more easily provision new Cisco UCS server blades into &PRODUCT; for use - as bare metal hosts. The goal is to enable easy expansion of the cloud by leveraging the - programmability of the UCS converged infrastructure and &PRODUCT;’s knowledge of the cloud - architecture and ability to orchestrate. With this new feature, &PRODUCT; can - automatically understand the UCS environment, server profiles, etc. to make it easy to - deploy a bare metal OS on a Cisco UCS. -
-
- Changing a VM's Base Image - Every VM is created from a base image, which is a template or ISO which has been - created and stored in &PRODUCT;. Both cloud administrators and end users can create and - modify templates, ISOs, and VMs. - In &PRODUCT; 4.2, there is a new way to modify an existing VM. You can change an - existing VM from one base image to another. For example, suppose there is a template based - on a particular operating system, and the OS vendor releases a software patch. The - administrator or user naturally wants to apply the patch and then make sure existing VMs - start using it. Whether a software update is involved or not, it's also possible to simply - switch a VM from its current template to any other desired template. -
-
- Reset VM on Reboot - In &PRODUCT; 4.2, you can specify that you want to discard the root disk and create a - new one whenever a given VM is rebooted. This is useful for secure environments that need - a fresh start on every boot and for desktops that should not retain state. The IP address - of the VM will not change due to this operation. -
-
- Virtual Machine Snapshots for VMware - (VMware hosts only) In addition to the existing &PRODUCT; ability to snapshot - individual VM volumes, you can now take a VM snapshot to preserve all the VM's data - volumes as well as (optionally) its CPU/memory state. This is useful for quick restore of - a VM. For example, you can snapshot a VM, then make changes such as software upgrades. If - anything goes wrong, simply restore the VM to its previous state using the previously - saved VM snapshot. - The snapshot is created using the VMware native snapshot facility. The VM snapshot - includes not only the data volumes, but optionally also whether the VM is running or - turned off (CPU state) and the memory contents. The snapshot is stored in &PRODUCT;'s - primary storage. - VM snapshots can have a parent/child relationship. Each successive snapshot of the - same VM is the child of the snapshot that came before it. Each time you take an additional - snapshot of the same VM, it saves only the differences between the current state of the VM - and the state stored in the most recent previous snapshot. The previous snapshot becomes a - parent, and the new snapshot is its child. It is possible to create a long chain of these - parent/child snapshots, which amount to a "redo" record leading from the current state of - the VM back to the original. -
-
- Increased Userdata Size When Deploying a VM - You can now specify up to 32KB of userdata when deploying a virtual machine through - the &PRODUCT; UI or the deployVirtualMachine API call. -
-
- Set VMware Cluster Size Limit Depending on VMware Version - The maximum number of hosts in a vSphere cluster is determined by the VMware - hypervisor software. For VMware versions 4.2, 4.1, 5.0, and 5.1, the limit is 32 - hosts. - For &PRODUCT; 4.2, the global configuration setting vmware.percluster.host.max has - been removed. The maximum number of hosts in a VMware cluster is now determined by the - underlying hypervisor software. - - Best Practice: It is advisable for VMware clusters in &PRODUCT; to be smaller than - the VMware hypervisor's maximum size. A cluster size of up to 8 hosts has been found - optimal for most real-world situations. - -
-
- Limiting Resource Usage - Previously in &PRODUCT;, resource usage limit was imposed based on the resource count, - that is, restrict a user or domain on the basis of the number of VMs, volumes, or - snapshots used. In &PRODUCT; 4.2, a new set of resource types has been added to the - existing pool of resources (VMs, Volumes, and Snapshots) to support the customization - model—need-basis usage, such as large VM or small VM. The new resource types are now - broadly classified as CPU, RAM, Primary storage, and Secondary storage. &PRODUCT; 4.2 - allows the root administrator to impose resource usage limit by the following resource - types for Domain, Project and Accounts. - - - CPUs - - - Memory (RAM) - - - Primary Storage (Volumes) - - - Secondary Storage (Snapshots, Templates, ISOs) - - -
-
-
- Monitoring, Maintenance, and Operations Enhancements -
- Deleting and Archiving Events and Alerts - In addition to viewing a list of events and alerts in the UI, the administrator can - now delete and archive them. In order to support deleting and archiving alerts, the - following global parameters have been added: - - - alert.purge.delay: The alerts older than - specified number of days are purged. Set the value to 0 to never purge alerts - automatically. - - - alert.purge.interval: The interval in seconds to - wait before running the alert purge thread. The default is 86400 seconds (one - day). - - - - Archived alerts or events cannot be viewed in the UI, or by using the API. They are - maintained in the database for auditing or compliance purposes. - -
-
- Increased Granularity for Configuration Parameters - Some configuration parameters which were previously available only at the global level - of the cloud can now be set for smaller components of the cloud, such as at the zone - level. To set these parameters, look for the new Settings tab in the UI. You will find it - on the detail page for an account, cluster, zone, or primary storage. - The account level parameters are: remote.access.vpn.client.iprange, - allow.public.user.templates, use.system.public.ips, and - use.system.guest.vlans - The cluster level parameters are - cluster.storage.allocated.capacity.notificationthreshold, - cluster.storage.capacity.notificationthreshold, - cluster.cpu.allocated.capacity.notificationthreshold, - cluster.memory.allocated.capacity.notificationthreshold, - cluster.cpu.allocated.capacity.disablethreshold, - cluster.memory.allocated.capacity.disablethreshold, - cpu.overprovisioning.factor, mem.overprovisioning.factor, - vmware.reserve.cpu, and vmware.reserve.mem. - The zone level parameters are - pool.storage.allocated.capacity.disablethreshold, - pool.storage.capacity.disablethreshold, - storage.overprovisioning.factor, network.throttling.rate, - guest.domain.suffix, router.template.xen, - router.template.kvm, router.template.vmware, - router.template.hyperv, router.template.lxc, - enable.dynamic.scale.vm, use.external.dns, and - blacklisted.routes. -
-
- API Request Throttling - In &PRODUCT; 4.2, you can limit the rate at which API requests can be placed for each - account. This is useful to avoid malicious attacks on the Management Server, prevent - performance degradation, and provide fairness to all accounts. - If the number of API calls exceeds the threshold, an error message is returned for any - additional API calls. The caller will have to retry these API calls at another - time. - To control the API request throttling, use the following new global configuration - settings: - - - api.throttling.enabled - Enable/Disable API throttling. By default, this setting - is false, so API throttling is not enabled. - - - api.throttling.interval (in seconds) - Time interval during which the number of - API requests is to be counted. When the interval has passed, the API count is reset to - 0. - - - api.throttling.max - Maximum number of APIs that can be placed within the - api.throttling.interval period. - - - api.throttling.cachesize - Cache size for storing API counters. Use a value higher - than the total number of accounts managed by the cloud. One cache entry is needed for - each account, to store the running API total for that account within the current time - window. - - -
-
- Sending Alerts to External SNMP and Syslog Managers - In addition to showing administrator alerts on the Dashboard in the &PRODUCT; UI and - sending them in email, &PRODUCT; now can also send the same alerts to external SNMP or - Syslog management software. This is useful if you prefer to use an SNMP or Syslog manager - to monitor your cloud. - The supported protocol is SNMP version 2. -
-
- Changing the Default Password Encryption - Passwords are encoded when creating or updating users. The new default preferred - encoder, replacing MD5, is SHA256. It is more secure than MD5 hashing. If you take no - action to customize password encryption and authentication, SHA256 Salt will be - used. - If you prefer a different authentication mechanism, &PRODUCT; 4.2 provides a way for - you to determine the default encoding and authentication mechanism for admin and user - logins. Two new configurable lists have been introduced: userPasswordEncoders and - userAuthenticators. userPasswordEncoders allow you to configure the order of preference - for encoding passwords, and userAuthenticator allows you to configure the order in which - authentication schemes are invoked to validate user passwords. - The plain text user authenticator has been modified not to convert supplied passwords - to their md5 sums before checking them with the database entries. It performs a simple - string comparison between retrieved and supplied login passwords instead of comparing the - retrieved md5 hash of the stored password against the supplied md5 hash of the password, - because clients no longer hash the password. -
-
- Log Collection Utility cloud-bugtool - &PRODUCT; provides a command-line utility called cloud-bugtool to make it easier to - collect the logs and other diagnostic data required for troubleshooting. This is - especially useful when interacting with Citrix Technical Support. - You can use cloud-bugtool to collect the following: - - - Basic system and environment information and network configuration including IP - addresses, routing, and name resolver settings - - - Information about running processes - - - Management Server logs - - - System logs in /var/log/ - - - Dump of the cloud database - - - - cloud-bugtool collects information which might be considered sensitive and - confidential. Using the --nodb option to avoid the cloud database can - reduce this concern, though it is not guaranteed to exclude all sensitive data. - - -
-
- Snaphotting, Backups, Cloning and System VMs for RBD Primary Storage - - These new RBD features require at least librbd 0.61.7 (Cuttlefish) and libvirt - 0.9.14 on the KVM hypervisors. - - This release of &PRODUCT; will leverage the features of RBD format 2. This allows - snapshotting and backing up those snapshots. - Backups of snapshots to Secondary Storage are full copies of the RBD snapshot, they - are not RBD diffs. This because when restoring a backup of a snapshot it is not mandatory - that this backup is deployed on RBD again, it could also be a NFS Primary Storage. - Another key feature of RBD format 2 is cloning. With this release templates will be - copied to Primary Storage once and by using the cloning mechanism new disks will be cloned - from this parent template. This saves space and decreases deployment time for instances - dramatically. - Before this release, a NFS Primary Storage was still required for running the System - VMs from. The reason was a so called 'patch disk' that was generated by the hypervisor - which contained metadata for the System VM. The scripts generating this disk didn't - support RBD and thus System VMs had to be deployed from NFS. With 4.2 instead of the patch - disk a VirtIO serial console is used to pass meta information to System VMs. This enabled - the deployment of System VMs on RBD Primary Storage. -
-
-
- Issues Fixed in 4.2.0 - Apache CloudStack uses Jira to track its issues. All new features and bugs for 4.2.0 have been tracked - in Jira, and have a standard naming convention of "CLOUDSTACK-NNNN" where "NNNN" is the - issue number. - For list of issues fixed, see Issues Fixed in - 4.2. -
-
- Known Issues in 4.2.0 - This section includes a summary of known issues that were fixed in 4.2.0. For list of - known issues, see Known - Issues. -
-
- - Upgrade Instructions for 4.2 - This section contains upgrade instructions from prior versions of CloudStack to Apache - CloudStack 4.2.0. We include instructions on upgrading to Apache CloudStack from pre-Apache - versions of Citrix CloudStack (last version prior to Apache is 3.0.2) and from the releases - made while CloudStack was in the Apache Incubator. - If you run into any issues during upgrades, please feel free to ask questions on - users@cloudstack.apache.org or dev@cloudstack.apache.org. -
- Upgrade from 4.x.x to 4.2.0 - This section will guide you from &PRODUCT; 4.0.x versions to &PRODUCT; 4.2.0. - Any steps that are hypervisor-specific will be called out with a note. - - Package Structure Changes - The package structure for &PRODUCT; has changed significantly since the 4.0.x - releases. If you've compiled your own packages, you'll notice that the package names and - the number of packages has changed. This is not a bug. - However, this does mean that the procedure is not as simple as an apt-get - upgrade or yum update, so please follow this section - carefully. - - We recommend reading through this section once or twice before beginning your upgrade - procedure, and working through it on a test system before working on a production - system. - - - Most users of &PRODUCT; manage the installation and upgrades of &PRODUCT; with one - of Linux's predominant package systems, RPM or APT. This guide assumes you'll be using - RPM and Yum (for Red Hat Enterprise Linux or CentOS), or APT and Debian packages (for - Ubuntu). - - - Create RPM or Debian packages (as appropriate) and a repository from the 4.2.0 - source, or check the Apache CloudStack downloads page at http://cloudstack.apache.org/downloads.html for package repositories supplied - by community members. You will need them for step - or step . - Instructions for creating packages from the &PRODUCT; source are in the Installation - Guide. - - - Stop your management server or servers. Run this on all management server - hosts: - # service cloud-management stop - - - If you are running a usage server or usage servers, stop those as well: - # service cloud-usage stop - - - Make a backup of your MySQL database. If you run into any issues or need to roll - back the upgrade, this will assist in debugging or restoring your existing environment. - You'll be prompted for your password. - # mysqldump -u root -p cloud > cloudstack-backup.sql - - - If you have made changes to - /etc/cloud/management/components.xml, you'll need to carry these - over manually to the new file, - /etc/cloudstack/management/componentContext.xml. This is not done - automatically. (If you're unsure, we recommend making a backup of the original - components.xml to be on the safe side. - - - After upgrading to 4.2, API clients are expected to send plain text passwords for - login and user creation, instead of MD5 hash. If API client changes are not acceptable, - following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default - authenticator (1st entry in the userAuthenticators adapter list is default) - -<!-- Security adapters --> -<bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> - <property name="Adapters"> - <list> - <ref bean="PlainTextUserAuthenticator"/> - <ref bean="MD5UserAuthenticator"/> - <ref bean="LDAPUserAuthenticator"/> - </list> - </property> -</bean> - - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to - 4.2. - - - If you are using Ubuntu, follow this procedure to upgrade your packages. If not, - skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and APT repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the sources list for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/apt/sources.list.d/cloudstack.list on - any systems that have &PRODUCT; packages installed. - This file should have one line, which contains: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.0 - We'll change it to point to the new package repository: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.2 - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now update your apt package list: - $ sudo apt-get update - - - Now that you have the repository configured, it's time to install the - cloudstack-management package. This will pull in any other - dependencies you need. - $ sudo apt-get install cloudstack-management - - - You will need to manually install the cloudstack-agent - package: - $ sudo apt-get install cloudstack-agent - During the installation of cloudstack-agent, APT will copy - your agent.properties, log4j-cloud.xml, - and environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - When prompted whether you wish to keep your configuration, say Yes. - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - During the upgrade, log4j-cloud.xml was simply copied over, - so the logs will continue to be added to - /var/log/cloud/agent/agent.log. There's nothing - wrong with this, but if you prefer to be consistent, you can - change this by copying over the sample configuration file: - -cd /etc/cloudstack/agent -mv log4j-cloud.xml.dpkg-dist log4j-cloud.xml -service cloudstack-agent restart - - - - Once the agent is running, you can uninstall the old cloud-* packages from your - system: - sudo dpkg --purge cloud-agent - - - - - (VMware only) Additional steps are required for each VMware cluster. These steps - will not affect running guests in the cloud. These steps are required only for clouds - using VMware clusters: - - - Stop the Management Server: - service cloudstack-management stop - - - Generate the encrypted equivalent of your vCenter password: - java -classpath /usr/share/cloudstack-common/lib/jasypt-1.9.0.jar org.jasypt.intf.cli.JasyptPBEStringEncryptionCLI encrypt.sh input="_your_vCenter_password_" password="`cat /etc/cloudstack/management/key`" verbose=false - Store the output from this step, we need to add this in cluster_details table - and vmware_data_center tables in place of the plain text password - - - Find the ID of the row of cluster_details table that you have to update: - mysql -u <username> -p<password> - select * from cloud.cluster_details; - - - Update the plain text password with the encrypted one - update cloud.cluster_details set value = '_ciphertext_from_step_1_' where id = _id_from_step_2_; - - - Confirm that the table is updated: - select * from cloud.cluster_details; - - - Find the ID of the correct row of vmware_data_center that you want to - update - select * from cloud.vmware_data_center; - - - update the plain text password with the encrypted one: - update cloud.vmware_data_center set password = '_ciphertext_from_step_1_' where id = _id_from_step_5_; - - - Confirm that the table is updated: - select * from cloud.vmware_data_center; - - - Start the &PRODUCT; Management server - service cloudstack-management start - - - - - (KVM only) Additional steps are required for each KVM host. These steps will not - affect running guests in the cloud. These steps are required only for clouds using KVM - as hosts and only on the KVM hosts. - - - Manually clean up /var/cache/cloudstack. - - - Copy the 4.2 tar file to the host, untar it, and change directory to the - resulting directory. - - - Stop the running agent. - # service cloud-agent stop - - - Update the agent software. - # ./install.sh - - - Choose "U" to update the packages. - - - Start the agent. - # service cloudstack-agent start - - - - - If you are using CentOS or RHEL, follow this procedure to upgrade your packages. If - not, skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and yum repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the yum repository for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. - (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/yum.repos.d/cloudstack.repo on any - systems that have &PRODUCT; packages installed. - This file should have content similar to the following: - -[apache-cloudstack] -name=Apache CloudStack -baseurl=http://cloudstack.apt-get.eu/rhel/4.0/ -enabled=1 -gpgcheck=0 - - If you are using the community provided package repository, change the base url - to http://cloudstack.apt-get.eu/rhel/4.2/ - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now that you have the repository configured, it's time to install the - cloudstack-management package by upgrading the older - cloud-client package. - $ sudo yum upgrade cloud-client - - - For KVM hosts, you will need to upgrade the cloud-agent - package, similarly installing the new version as - cloudstack-agent. - $ sudo yum upgrade cloud-agent - During the installation of cloudstack-agent, the RPM will - copy your agent.properties, - log4j-cloud.xml, and - environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - - - For CentOS 5.5, perform the following: - - - Run the following command: - rpm -Uvh http://download.cloud.com/support/jsvc/jakarta-commons-daemon-jsvc-1.0.1-8.9.el6.x86_64.rpm - - - Upgrade the Usage server. - sudo yum upgrade cloud-usage - - - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - - - Once you've upgraded the packages on your management servers, you'll need to restart - the system VMs. Make sure port 8096 is open in your local host firewall to do - this. - There is a script that will do this for you, all you need to do is run the script - and supply the IP address for your MySQL instance and your MySQL credentials: - # nohup cloudstack-sysvmadm -d IP address -u cloud -p -a > sysvm.log 2>&1 & - You can monitor the log for progress. The process of restarting the system VMs can - take an hour or more. - # tail -f sysvm.log - The output to sysvm.log will look something like this: - -Stopping and starting 1 secondary storage vm(s)... -Done stopping and starting secondary storage vm(s) -Stopping and starting 1 console proxy vm(s)... -Done stopping and starting console proxy vm(s). -Stopping and starting 4 running routing vm(s)... -Done restarting router(s). - - - - - For Xen Hosts: Copy vhd-utils - This step is only for CloudStack installs that are using Xen hosts. - - Copy the file vhd-utils to - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver. - - -
-
- Upgrade from 3.0.2 to 4.2.0 - This section will guide you from Citrix CloudStack 3.0.2 to Apache CloudStack 4.2.0. - Sections that are hypervisor-specific will be called out with a note. - - - - The following upgrade instructions apply only if you're using VMware hosts. If - you're not using VMware hosts, skip this step and move on to . - - In each zone that includes VMware hosts, you need to add a new system VM template. - - - While running the existing 3.0.2 system, log in to the UI as root - administrator. - - - In the left navigation bar, click Templates. - - - In Select view, click Templates. - - - Click Register template. - The Register template dialog box is displayed. - - - In the Register template dialog box, specify the following values (do not change - these): - - - - - - - Field - Value - - - - - Name - systemvm-vmware-4.2 - - - Description - systemvm-vmware-4.2 - - - URL - http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova - - - Zone - Choose the zone where this hypervisor is used - - - Hypervisor - VMware - - - Format - OVA - - - OS Type - Debian GNU/Linux 5.0 (32-bit) - - - Extractable - no - - - Password Enabled - no - - - Public - no - - - Featured - no - - - - - - - Watch the screen to be sure that the template downloads successfully and enters - the READY state. Do not proceed until this is successful. - - - - - Stop all Usage Servers if running. Run this on all Usage Server hosts. - # service cloud-usage stop - - - Stop the Management Servers. Run this on all Management Server hosts. - # service cloud-management stop - - - On the MySQL master, take a backup of the MySQL databases. We recommend performing - this step even in test upgrades. If there is an issue, this will assist with - debugging. - In the following commands, it is assumed that you have set the root password on the - database, which is a CloudStack recommended best practice. Substitute your own MySQL - root password. - # mysqldump -u root -pmysql_password cloud > cloud-backup.dmp - # mysqldump -u root -pmysql_password cloud_usage > cloud-usage-backup.dmp - - - Either build RPM/DEB packages as detailed in the Installation Guide, or use one of - the community provided yum/apt repositories to gain access to the &PRODUCT; - binaries. - - - If you are using Ubuntu, follow this procedure to upgrade your packages. If not, - skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and APT repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the sources list for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/apt/sources.list.d/cloudstack.list on - any systems that have &PRODUCT; packages installed. - This file should have one line, which contains: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.0 - We'll change it to point to the new package repository: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.2 - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now update your apt package list: - $ sudo apt-get update - - - Now that you have the repository configured, it's time to install the - cloudstack-management package. This will pull in any other - dependencies you need. - $ sudo apt-get install cloudstack-management - - - You will need to manually install the cloudstack-agent - package: - $ sudo apt-get install cloudstack-agent - During the installation of cloudstack-agent, APT will copy - your agent.properties, log4j-cloud.xml, - and environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - When prompted whether you wish to keep your configuration, say Yes. - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - During the upgrade, log4j-cloud.xml was simply copied over, - so the logs will continue to be added to - /var/log/cloud/agent/agent.log. There's nothing - wrong with this, but if you prefer to be consistent, you can - change this by copying over the sample configuration file: - -cd /etc/cloudstack/agent -mv log4j-cloud.xml.dpkg-dist log4j-cloud.xml -service cloudstack-agent restart - - - - Once the agent is running, you can uninstall the old cloud-* packages from your - system: - sudo dpkg --purge cloud-agent - - - - - (KVM only) Additional steps are required for each KVM host. These steps will not - affect running guests in the cloud. These steps are required only for clouds using KVM - as hosts and only on the KVM hosts. - - - Copy the CloudPlatform 4.2 tar file to the host, untar it, and change directory - to the resulting directory. - - - Stop the running agent. - # service cloud-agent stop - - - Update the agent software. - # ./install.sh - - - Choose "U" to update the packages. - - - Start the agent. - # service cloudstack-agent start - - - - - If you are using CentOS or RHEL, follow this procedure to upgrade your packages. If - not, skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and yum repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the yum repository for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/yum.repos.d/cloudstack.repo on any - systems that have &PRODUCT; packages installed. - This file should have content similar to the following: - -[apache-cloudstack] -name=Apache CloudStack -baseurl=http://cloudstack.apt-get.eu/rhel/4.0/ -enabled=1 -gpgcheck=0 - - If you are using the community provided package repository, change the baseurl - to http://cloudstack.apt-get.eu/rhel/4.2/ - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now that you have the repository configured, it's time to install the - cloudstack-management package by upgrading the older - cloud-client package. - $ sudo yum upgrade cloud-client - - - For KVM hosts, you will need to upgrade the cloud-agent - package, similarly installing the new version as - cloudstack-agent. - $ sudo yum upgrade cloud-agent - During the installation of cloudstack-agent, the RPM will - copy your agent.properties, - log4j-cloud.xml, and - environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - - - If you have made changes to your copy of - /etc/cloud/management/components.xml the changes will be - preserved in the upgrade. However, you need to do the following steps to place these - changes in a new version of the file which is compatible with version 4.2.0. - - - Make a backup copy of /etc/cloud/management/components.xml. - For example: - # mv /etc/cloud/management/components.xml /etc/cloud/management/components.xml-backup - - - Copy /etc/cloud/management/components.xml.rpmnew to create - a new /etc/cloud/management/components.xml: - # cp -ap /etc/cloud/management/components.xml.rpmnew /etc/cloud/management/components.xml - - - Merge your changes from the backup file into the new - components.xml. - # vi /etc/cloudstack/management/components.xml - - - - If you have more than one management server node, repeat the upgrade steps on each - node. - - - - After upgrading to 4.2, API clients are expected to send plain text passwords for - login and user creation, instead of MD5 hash. Incase, api client changes are not - acceptable, following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default - authenticator (1st entry in the userAuthenticators adapter list is default) - -<!-- Security adapters --> -<bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> - <property name="Adapters"> - <list> - <ref bean="PlainTextUserAuthenticator"/> - <ref bean="MD5UserAuthenticator"/> - <ref bean="LDAPUserAuthenticator"/> - </list> - </property> -</bean> - - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to - 4.2. - - - Start the first Management Server. Do not start any other Management Server nodes - yet. - # service cloudstack-management start - Wait until the databases are upgraded. Ensure that the database upgrade is complete. - After confirmation, start the other Management Servers one at a time by running the same - command on each node. - - Failing to restart the Management Server indicates a problem in the upgrade. - Having the Management Server restarted without any issues indicates that the upgrade - is successfully completed. - - - - Start all Usage Servers (if they were running on your previous version). Perform - this on each Usage Server host. - # service cloudstack-usage start - - - Additional steps are required for each KVM host. These steps will not affect running - guests in the cloud. These steps are required only for clouds using KVM as hosts and - only on the KVM hosts. - - - Configure a yum or apt repository containing the &PRODUCT; packages as outlined - in the Installation Guide. - - - Stop the running agent. - # service cloud-agent stop - - - Update the agent software with one of the following command sets as appropriate - for your environment. - # yum update cloud-* - # apt-get update - # apt-get upgrade cloud-* - - - Edit /etc/cloudstack/agent/agent.properties to change the - resource parameter from - "com.cloud.agent.resource.computing.LibvirtComputingResource" to - "com.cloud.hypervisor.kvm.resource.LibvirtComputingResource". - - - Start the cloud agent and cloud management services. - # service cloudstack-agent start - - - When the Management Server is up and running, log in to the CloudStack UI and - restart the virtual router for proper functioning of all the features. - - - - - Log in to the CloudStack UI as administrator, and check the status of the hosts. All - hosts should come to Up state (except those that you know to be offline). You may need - to wait 20 or 30 minutes, depending on the number of hosts. - - Troubleshooting: If login fails, clear your browser cache and reload the - page. - - Do not proceed to the next step until the hosts show in Up state. - - - If you are upgrading from 3.0.2, perform the following: - - - Ensure that the admin port is set to 8096 by using the "integration.api.port" - global parameter. - This port is used by the cloud-sysvmadm script at the end of the upgrade - procedure. For information about how to set this parameter, see "Setting Global - Configuration Parameters" in the Installation Guide. - - - Restart the Management Server. - - If you don't want the admin port to remain open, you can set it to null after - the upgrade is done and restart the management server. - - - - - - Run the cloud-sysvmadm script to stop, then start, all Secondary - Storage VMs, Console Proxy VMs, and virtual routers. Run the script once on each - management server. Substitute your own IP address of the MySQL instance, the MySQL user - to connect as, and the password to use for that user. In addition to those parameters, - provide the -c and -r arguments. For - example: - # nohup cloud-sysvmadm -d 192.168.1.5 -u cloud -p password -c -r > - sysvm.log 2>&1 & - # tail -f sysvm.log - This might take up to an hour or more to run, depending on the number of accounts in - the system. - - - If needed, upgrade all Citrix XenServer hypervisor hosts in your cloud to a version - supported by CloudStack 4.2.0. The supported versions are XenServer 5.6 SP2 and 6.0.2. - Instructions for upgrade can be found in the CloudStack 4.2.0 Installation Guide under - "Upgrading XenServer Versions." - - - Now apply the XenServer hotfix XS602E003 (and any other needed hotfixes) to - XenServer v6.0.2 hypervisor hosts. - - - Disconnect the XenServer cluster from CloudStack. - In the left navigation bar of the CloudStack UI, select Infrastructure. Under - Clusters, click View All. Select the XenServer cluster and click Actions - - Unmanage. - This may fail if there are hosts not in one of the states Up, Down, - Disconnected, or Alert. You may need to fix that before unmanaging this - cluster. - Wait until the status of the cluster has reached Unmanaged. Use the CloudStack - UI to check on the status. When the cluster is in the unmanaged state, there is no - connection to the hosts in the cluster. - - - To clean up the VLAN, log in to one XenServer host and run: - /opt/xensource/bin/cloud-clean-vlan.sh - - - Now prepare the upgrade by running the following on one XenServer host: - /opt/xensource/bin/cloud-prepare-upgrade.sh - If you see a message like "can't eject CD", log in to the VM and unmount the CD, - then run this script again. - - - Upload the hotfix to the XenServer hosts. Always start with the Xen pool master, - then the slaves. Using your favorite file copy utility (e.g. WinSCP), copy the - hotfixes to the host. Place them in a temporary folder such as /tmp. - On the Xen pool master, upload the hotfix with this command: - xe patch-upload file-name=XS602E003.xsupdate - Make a note of the output from this command, which is a UUID for the hotfix - file. You'll need it in another step later. - - (Optional) If you are applying other hotfixes as well, you can repeat the - commands in this section with the appropriate hotfix number. For example, - XS602E004.xsupdate. - - - - Manually live migrate all VMs on this host to another host. First, get a list of - the VMs on this host: - # xe vm-list - Then use this command to migrate each VM. Replace the example host name and VM - name with your own: - # xe vm-migrate live=true host=host-name - vm=VM-name - - Troubleshooting - If you see a message like "You attempted an operation on a VM which requires - PV drivers to be installed but the drivers were not detected," run: - /opt/xensource/bin/make_migratable.sh - b6cf79c8-02ee-050b-922f-49583d9f1a14. - - - - Apply the hotfix. First, get the UUID of this host: - # xe host-list - Then use the following command to apply the hotfix. Replace the example host - UUID with the current host ID, and replace the hotfix UUID with the output from the - patch-upload command you ran on this machine earlier. You can also get the hotfix - UUID by running xe patch-list. - xe patch-apply host-uuid=host-uuid uuid=hotfix-uuid - - - Copy the following files from the CloudStack Management Server to the - host. - - - - - - - Copy from here... - ...to here - - - - - /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py - /opt/xensource/sm/NFSSR.py - - - /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/setupxenserver.sh - /opt/xensource/bin/setupxenserver.sh - - - /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/make_migratable.sh - /opt/xensource/bin/make_migratable.sh - - - - - - - (Only for hotfixes XS602E005 and XS602E007) You need to apply a new Cloud - Support Pack. - - - Download the CSP software onto the XenServer host from one of the following - links: - For hotfix XS602E005: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E005/56710/xe-phase-2/xenserver-cloud-supp.tgz - For hotfix XS602E007: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E007/57824/xe-phase-2/xenserver-cloud-supp.tgz - - - Extract the file: - # tar xf xenserver-cloud-supp.tgz - - - Run the following script: - # xe-install-supplemental-pack xenserver-cloud-supp.iso - - - If the XenServer host is part of a zone that uses basic networking, disable - Open vSwitch (OVS): - # xe-switch-network-backend bridge - - - - - Reboot this XenServer host. - - - Run the following: - /opt/xensource/bin/setupxenserver.sh - - If the message "mv: cannot stat `/etc/cron.daily/logrotate': No such file or - directory" appears, you can safely ignore it. - - - - Run the following: - for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk '{print $NF}'`; do xe pbd-plug uuid=$pbd ; - - - On each slave host in the Xen pool, repeat these steps, starting from "manually - live migrate VMs." - - - - - - Troubleshooting Tip - If passwords which you know to be valid appear not to work after upgrade, or other UI - issues are seen, try clearing your browser cache and reloading the UI page. - -
-
- Upgrade from 2.2.14 to 4.2.0 - - - Ensure that you query your IPaddress usage records and process them; for example, - issue invoices for any usage that you have not yet billed users for. - Starting in 3.0.2, the usage record format for IP addresses is the same as the rest - of the usage types. Instead of a single record with the assignment and release dates, - separate records are generated per aggregation period with start and end dates. After - upgrading to 4.2.0, any existing IP address usage records in the old format will no - longer be available. - - - If you are using version 2.2.0 - 2.2.13, first upgrade to 2.2.14 by using the - instructions in the 2.2.14 - Release Notes. - - KVM Hosts - If KVM hypervisor is used in your cloud, be sure you completed the step to insert - a valid username and password into the host_details table on each KVM node as - described in the 2.2.14 Release Notes. This step is critical, as the database will be - encrypted after the upgrade to 4.2.0. - - - - While running the 2.2.14 system, log in to the UI as root administrator. - - - Using the UI, add a new System VM template for each hypervisor type that is used in - your cloud. In each zone, add a system VM template for each hypervisor used in that - zone - - - In the left navigation bar, click Templates. - - - In Select view, click Templates. - - - Click Register template. - The Register template dialog box is displayed. - - - In the Register template dialog box, specify the following values depending on - the hypervisor type (do not change these): - - - - - - - Hypervisor - Description - - - - - XenServer - Name: systemvm-xenserver-4.2.0 - Description: systemvm-xenserver-4.2.0 - URL:http://download.cloud.com/templates/4.2/systemvmtemplate-2013-07-12-master-xen.vhd.bz2 - Zone: Choose the zone where this hypervisor is used - Hypervisor: XenServer - Format: VHD - OS Type: Debian GNU/Linux 6.0 (32-bit) - Extractable: no - Password Enabled: no - Public: no - Featured: no - - - - KVM - Name: systemvm-kvm-4.2.0 - Description: systemvm-kvm-4.2.0 - URL: - http://download.cloud.com/templates/4.2/systemvmtemplate-2013-06-12-master-kvm.qcow2.bz2 - Zone: Choose the zone where this hypervisor is used - Hypervisor: KVM - Format: QCOW2 - OS Type: Debian GNU/Linux 5.0 (32-bit) - Extractable: no - Password Enabled: no - Public: no - Featured: no - - - - VMware - Name: systemvm-vmware-4.2.0 - Description: systemvm-vmware-4.2.0 - URL: - http://download.cloud.com/templates/4.2/systemvmtemplate-4.2-vh7.ova - Zone: Choose the zone where this hypervisor is used - Hypervisor: VMware - Format: OVA - OS Type: Debian GNU/Linux 5.0 (32-bit) - Extractable: no - Password Enabled: no - Public: no - Featured: no - - - - - - - - - - Watch the screen to be sure that the template downloads successfully and enters the - READY state. Do not proceed until this is successful - - - WARNING: If you use more than one type of - hypervisor in your cloud, be sure you have repeated these steps to download the system - VM template for each hypervisor type. Otherwise, the upgrade will fail. - - - Stop all Usage Servers if running. Run this on all Usage Server hosts. - # service cloud-usage stop - - - Stop the Management Servers. Run this on all Management Server hosts. - # service cloud-management stop - - - On the MySQL master, take a backup of the MySQL databases. We recommend performing - this step even in test upgrades. If there is an issue, this will assist with - debugging. - In the following commands, it is assumed that you have set the root password on the - database, which is a CloudStack recommended best practice. Substitute your own MySQL - root password. - # mysqldump -u root -pmysql_password cloud > cloud-backup.dmp - # mysqldump -u root -pmysql_password cloud_usage > cloud-usage-backup.dmp - - - - Either build RPM/DEB packages as detailed in the Installation Guide, or use one of - the community provided yum/apt repositories to gain access to the &PRODUCT; binaries. - - - - If you are using Ubuntu, follow this procedure to upgrade your packages. If not, - skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and APT repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the sources list for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/apt/sources.list.d/cloudstack.list on - any systems that have &PRODUCT; packages installed. - This file should have one line, which contains: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.0 - We'll change it to point to the new package repository: - deb http://cloudstack.apt-get.eu/ubuntu precise 4.2 - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now update your apt package list: - $ sudo apt-get update - - - Now that you have the repository configured, it's time to install the - cloudstack-management package. This will pull in any other - dependencies you need. - $ sudo apt-get install cloudstack-management - - - On KVM hosts, you will need to manually install the - cloudstack-agent package: - $ sudo apt-get install cloudstack-agent - During the installation of cloudstack-agent, APT will copy - your agent.properties, log4j-cloud.xml, - and environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - When prompted whether you wish to keep your configuration, say Yes. - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - During the upgrade, log4j-cloud.xml was simply copied over, - so the logs will continue to be added to - /var/log/cloud/agent/agent.log. There's nothing - wrong with this, but if you prefer to be consistent, you can - change this by copying over the sample configuration file: - -cd /etc/cloudstack/agent -mv log4j-cloud.xml.dpkg-dist log4j-cloud.xml -service cloudstack-agent restart - - - - Once the agent is running, you can uninstall the old cloud-* packages from your - system: - sudo dpkg --purge cloud-agent - - - - - If you are using CentOS or RHEL, follow this procedure to upgrade your packages. If - not, skip to step . - - Community Packages - This section assumes you're using the community supplied packages for &PRODUCT;. - If you've created your own packages and yum repository, substitute your own URL for - the ones used in these examples. - - - - The first order of business will be to change the yum repository for each system - with &PRODUCT; packages. This means all management servers, and any hosts that have - the KVM agent. (No changes should be necessary for hosts that are running VMware or - Xen.) - Start by opening /etc/yum.repos.d/cloudstack.repo on any - systems that have &PRODUCT; packages installed. - This file should have content similar to the following: - -[apache-cloudstack] -name=Apache CloudStack -baseurl=http://cloudstack.apt-get.eu/rhel/4.0/ -enabled=1 -gpgcheck=0 - - If you are using the community provided package repository, change the baseurl - to http://cloudstack.apt-get.eu/rhel/4.2/ - If you're using your own package repository, change this line to read as - appropriate for your 4.2.0 repository. - - - Now that you have the repository configured, it's time to install the - cloudstack-management package by upgrading the older - cloud-client package. - $ sudo yum upgrade cloud-client - - - For KVM hosts, you will need to upgrade the cloud-agent - package, similarly installing the new version as - cloudstack-agent. - $ sudo yum upgrade cloud-agent - During the installation of cloudstack-agent, the RPM will - copy your agent.properties, - log4j-cloud.xml, and - environment.properties from - /etc/cloud/agent to - /etc/cloudstack/agent. - - - Verify that the file - /etc/cloudstack/agent/environment.properties has a line that - reads: - paths.script=/usr/share/cloudstack-common - If not, add the line. - - - Restart the agent: - -service cloud-agent stop -killall jsvc -service cloudstack-agent start - - - - - - If you have made changes to your existing copy of the file components.xml in your - previous-version CloudStack installation, the changes will be preserved in the upgrade. - However, you need to do the following steps to place these changes in a new version of - the file which is compatible with version 4.0.0-incubating. - - How will you know whether you need to do this? If the upgrade output in the - previous step included a message like the following, then some custom content was - found in your old components.xml, and you need to merge the two files: - - warning: /etc/cloud/management/components.xml created as /etc/cloud/management/components.xml.rpmnew - - - Make a backup copy of your - /etc/cloud/management/components.xml file. For - example: - # mv /etc/cloud/management/components.xml /etc/cloud/management/components.xml-backup - - - Copy /etc/cloud/management/components.xml.rpmnew to create - a new /etc/cloud/management/components.xml: - # cp -ap /etc/cloud/management/components.xml.rpmnew /etc/cloud/management/components.xml - - - Merge your changes from the backup file into the new components.xml file. - # vi /etc/cloud/management/components.xml - - - - - - After upgrading to 4.2, API clients are expected to send plain text passwords for - login and user creation, instead of MD5 hash. If API client changes are not acceptable, - following changes are to be made for backward compatibility: - Modify componentsContext.xml, and make PlainTextUserAuthenticator as the default - authenticator (1st entry in the userAuthenticators adapter list is default) - -<!-- Security adapters --> -<bean id="userAuthenticators" class="com.cloud.utils.component.AdapterList"> - <property name="Adapters"> - <list> - <ref bean="PlainTextUserAuthenticator"/> - <ref bean="MD5UserAuthenticator"/> - <ref bean="LDAPUserAuthenticator"/> - </list> - </property> -</bean> - - PlainTextUserAuthenticator works the same way MD5UserAuthenticator worked prior to - 4.2. - - - If you have made changes to your existing copy of the - /etc/cloud/management/db.properties file in your previous-version - CloudStack installation, the changes will be preserved in the upgrade. However, you need - to do the following steps to place these changes in a new version of the file which is - compatible with version 4.0.0-incubating. - - - Make a backup copy of your file - /etc/cloud/management/db.properties. For example: - # mv /etc/cloud/management/db.properties /etc/cloud/management/db.properties-backup - - - Copy /etc/cloud/management/db.properties.rpmnew to create a - new /etc/cloud/management/db.properties: - # cp -ap /etc/cloud/management/db.properties.rpmnew etc/cloud/management/db.properties - - - Merge your changes from the backup file into the new db.properties file. - # vi /etc/cloud/management/db.properties - - - - - On the management server node, run the following command. It is recommended that you - use the command-line flags to provide your own encryption keys. See Password and Key - Encryption in the Installation Guide. - # cloudstack-setup-encryption -e encryption_type -m management_server_key -k database_key - When used without arguments, as in the following example, the default encryption - type and keys will be used: - - - (Optional) For encryption_type, use file or web to indicate the technique used - to pass in the database encryption password. Default: file. - - - (Optional) For management_server_key, substitute the default key that is used to - encrypt confidential parameters in the properties file. Default: password. It is - highly recommended that you replace this with a more secure value - - - (Optional) For database_key, substitute the default key that is used to encrypt - confidential parameters in the CloudStack database. Default: password. It is highly - recommended that you replace this with a more secure value. - - - - - Repeat steps 10 - 14 on every management server node. If you provided your own - encryption key in step 14, use the same key on all other management servers. - - - Start the first Management Server. Do not start any other Management Server nodes - yet. - # service cloudstack-management start - Wait until the databases are upgraded. Ensure that the database upgrade is complete. - You should see a message like "Complete! Done." After confirmation, start the other - Management Servers one at a time by running the same command on each node. - - - Start all Usage Servers (if they were running on your previous version). Perform - this on each Usage Server host. - # service cloudstack-usage start - - - (KVM only) Additional steps are required for each KVM host. These steps will not - affect running guests in the cloud. These steps are required only for clouds using KVM - as hosts and only on the KVM hosts. - - - Copy the CloudPlatform 4.2 tar file to the host, untar it, and change directory - to the resulting directory. - - - Stop the running agent. - # service cloud-agent stop - - - Update the agent software. - # ./install.sh - - - Choose "U" to update the packages. - - - Start the agent. - # service cloudstack-agent start - - - - - (KVM only) Perform the following additional steps on each KVM host. - These steps will not affect running guests in the cloud. These steps are required - only for clouds using KVM as hosts and only on the KVM hosts. - - - Configure your CloudStack package repositories as outlined in the Installation - Guide - - - Stop the running agent. - # service cloud-agent stop - - - Update the agent software with one of the following command sets as - appropriate. - # yum update cloud-* - - # apt-get update - # apt-get upgrade cloud-* - - - - Start the agent. - # service cloudstack-agent start - - - Copy the contents of the agent.properties file to the new - agent.properties file by using the following command - sed -i 's/com.cloud.agent.resource.computing.LibvirtComputingResource/com.cloud.hypervisor.kvm.resource.LibvirtComputingResource/g' /etc/cloud/agent/agent.properties - - - Start the cloud agent and cloud management services. - - - When the Management Server is up and running, log in to the CloudStack UI and - restart the virtual router for proper functioning of all the features. - - - - - Log in to the CloudStack UI as admin, and check the status of the hosts. All hosts - should come to Up state (except those that you know to be offline). You may need to wait - 20 or 30 minutes, depending on the number of hosts. - Do not proceed to the next step until the hosts show in the Up state. If the hosts - do not come to the Up state, contact support. - - - Run the following script to stop, then start, all Secondary Storage VMs, Console - Proxy VMs, and virtual routers. - - - Run the command once on one management server. Substitute your own IP address of - the MySQL instance, the MySQL user to connect as, and the password to use for that - user. In addition to those parameters, provide the "-c" and "-r" arguments. For - example: - # nohup cloud-sysvmadm -d 192.168.1.5 -u cloud -p password -c -r > sysvm.log 2>&1 & - # tail -f sysvm.log - This might take up to an hour or more to run, depending on the number of - accounts in the system. - - - After the script terminates, check the log to verify correct execution: - # tail -f sysvm.log - The content should be like the following: - - Stopping and starting 1 secondary storage vm(s)... - Done stopping and starting secondary storage vm(s) - Stopping and starting 1 console proxy vm(s)... - Done stopping and starting console proxy vm(s). - Stopping and starting 4 running routing vm(s)... - Done restarting router(s). - - - - - - If you would like additional confirmation that the new system VM templates were - correctly applied when these system VMs were rebooted, SSH into the System VM and check - the version. - Use one of the following techniques, depending on the hypervisor. - - XenServer or KVM: - SSH in by using the link local IP address of the system VM. For example, in the - command below, substitute your own path to the private key used to log in to the - system VM and your own link local IP. - - Run the following commands on the XenServer or KVM host on which the system VM is - present: - # ssh -i private-key-path link-local-ip -p 3922 - # cat /etc/cloudstack-release - The output should be like the following: - Cloudstack Release 4.0.0-incubating Mon Oct 9 15:10:04 PST 2012 - - ESXi - SSH in using the private IP address of the system VM. For example, in the command - below, substitute your own path to the private key used to log in to the system VM and - your own private IP. - - Run the following commands on the Management Server: - # ssh -i private-key-path private-ip -p 3922 - # cat /etc/cloudstack-release - - The output should be like the following: - Cloudstack Release 4.0.0-incubating Mon Oct 9 15:10:04 PST 2012 - - - If needed, upgrade all Citrix XenServer hypervisor hosts in your cloud to a version - supported by CloudStack 4.0.0-incubating. The supported versions are XenServer 5.6 SP2 - and 6.0.2. Instructions for upgrade can be found in the CloudStack 4.0.0-incubating - Installation Guide. - - - Apply the XenServer hotfix XS602E003 (and any other needed hotfixes) to XenServer - v6.0.2 hypervisor hosts. - - - Disconnect the XenServer cluster from CloudStack. - In the left navigation bar of the CloudStack UI, select Infrastructure. Under - Clusters, click View All. Select the XenServer cluster and click Actions - - Unmanage. - This may fail if there are hosts not in one of the states Up, Down, - Disconnected, or Alert. You may need to fix that before unmanaging this - cluster. - Wait until the status of the cluster has reached Unmanaged. Use the CloudStack - UI to check on the status. When the cluster is in the unmanaged state, there is no - connection to the hosts in the cluster. - - - To clean up the VLAN, log in to one XenServer host and run: - /opt/xensource/bin/cloud-clean-vlan.sh - - - Prepare the upgrade by running the following on one XenServer host: - /opt/xensource/bin/cloud-prepare-upgrade.sh - If you see a message like "can't eject CD", log in to the VM and umount the CD, - then run this script again. - - - Upload the hotfix to the XenServer hosts. Always start with the Xen pool master, - then the slaves. Using your favorite file copy utility (e.g. WinSCP), copy the - hotfixes to the host. Place them in a temporary folder such as /root or /tmp. - On the Xen pool master, upload the hotfix with this command: - xe patch-upload file-name=XS602E003.xsupdate - Make a note of the output from this command, which is a UUID for the hotfix - file. You'll need it in another step later. - - (Optional) If you are applying other hotfixes as well, you can repeat the - commands in this section with the appropriate hotfix number. For example, - XS602E004.xsupdate. - - - - Manually live migrate all VMs on this host to another host. First, get a list of - the VMs on this host: - # xe vm-list - Then use this command to migrate each VM. Replace the example host name and VM - name with your own: - # xe vm-migrate live=true host=host-name vm=VM-name - - Troubleshooting - If you see a message like "You attempted an operation on a VM which requires - PV drivers to be installed but the drivers were not detected," run: - /opt/xensource/bin/make_migratable.sh - b6cf79c8-02ee-050b-922f-49583d9f1a14. - - - - Apply the hotfix. First, get the UUID of this host: - # xe host-list - Then use the following command to apply the hotfix. Replace the example host - UUID with the current host ID, and replace the hotfix UUID with the output from the - patch-upload command you ran on this machine earlier. You can also get the hotfix - UUID by running xe patch-list. - xe patch-apply host-uuid=host-uuid - uuid=hotfix-uuid - - - Copy the following files from the CloudStack Management Server to the - host. - - - - - - - Copy from here... - ...to here - - - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py - /opt/xensource/sm/NFSSR.py - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/setupxenserver.sh - /opt/xensource/bin/setupxenserver.sh - - - /usr/lib64/cloudstack-common/scripts/vm/hypervisor/xenserver/make_migratable.sh - /opt/xensource/bin/make_migratable.sh - - - - - - - (Only for hotfixes XS602E005 and XS602E007) You need to apply a new Cloud - Support Pack. - - - Download the CSP software onto the XenServer host from one of the following - links: - For hotfix XS602E005: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E005/56710/xe-phase-2/xenserver-cloud-supp.tgz - For hotfix XS602E007: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E007/57824/xe-phase-2/xenserver-cloud-supp.tgz - - - Extract the file: - # tar xf xenserver-cloud-supp.tgz - - - Run the following script: - # xe-install-supplemental-pack - xenserver-cloud-supp.iso - - - If the XenServer host is part of a zone that uses basic networking, disable - Open vSwitch (OVS): - # xe-switch-network-backend bridge - - - - - Reboot this XenServer host. - - - Run the following: - /opt/xensource/bin/setupxenserver.sh - - If the message "mv: cannot stat `/etc/cron.daily/logrotate': No such file or - directory" appears, you can safely ignore it. - - - - Run the following: - for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk - '{print $NF}'`; do xe pbd-plug uuid=$pbd ; - - - - On each slave host in the Xen pool, repeat these steps, starting from "manually - live migrate VMs." - - - - -
-
- - API Changes in 4.2 -
- Added API Commands in 4.2 -
- Secondary Storage - - - addImageStore (Adds all types of secondary storage providers, S3/Swift/NFS) - - - createSecondaryStagingStore (Adds a staging secondary storage in each zone) - - - listImageStores (Lists all secondary storages, S3/Swift/NFS) - - - listSecondaryStagingStores (Lists all staging secondary storages) - - - addS3 (Adds a Amazon Simple Storage Service instance.) It is recommended to use - addImageStore instead. - - - listS3s (Lists all the Amazon Simple Storage Service instances.) It is recommended - to use listImageStores instead. - - -
-
- VM Snapshot - - - createVMSnapshot (Creates a virtual machine snapshot; see ) - - - deleteVMSnapshot (Deletes a virtual machine snapshot) - - - listVMSnapshot (Shows a virtual machine snapshot) - - - revertToVMSnapshot (Returns a virtual machine to the state and data saved in a - given snapshot) - - -
-
- Load Balancer Health Check - - - createLBHealthCheckPolicy (Creates a new health check policy for a load balancer - rule; see ) - - - deleteLBHealthCheckPolicy (Deletes an existing health check policy from a load - balancer rule) - - - listLBHealthCheckPolicies (Displays the health check policy for a load balancer - rule) - - -
-
- Egress Firewall Rules - - - createEgressFirewallRules (Creates an egress firewall rule on the guest network; - see ) - - - deleteEgressFirewallRules (Deletes a egress firewall rule on the guest - network.) - - - listEgressFirewallRules (Lists the egress firewall rules configured for a guest - network.) - - -
-
- SSH Key - - - resetSSHKeyForVirtualMachine (Resets the SSHkey for virtual machine.) - - -
-
- Bare Metal - - - addBaremetalHost (Adds a new host. Technically, this API command was present in - v3.0.6, but its functionality was disabled. See ) - - - addBaremetalDhcp (Adds a DHCP server for bare metal hosts) - - - addBaremetalPxePingServer (Adds a PXE PING server for bare metal hosts) - - - addBaremetalPxeKickStartServer (Adds a PXE server for bare metal hosts) - - - listBaremetalDhcp (Shows the DHCP servers currently defined for bare metal - hosts) - - - listBaremetalPxePingServer (Shows the PXE PING servers currently defined for bare - metal hosts) - - -
-
- NIC - - - addNicToVirtualMachine (Adds a new NIC to the specified VM on a selected network; - see ) - - - removeNicFromVirtualMachine (Removes the specified NIC from a selected VM.) - - - updateDefaultNicForVirtualMachine (Updates the specified NIC to be the default one - for a selected VM.) - - - addIpToNic (Assigns secondary IP to a NIC.) - - - removeIpFromNic (Assigns secondary IP to a NIC.) - - - listNics (Lists the NICs associated with a VM.) - - -
-
- Regions - - - addRegion (Registers a Region into another Region; see ) - - - updateRegion (Updates Region details: ID, Name, Endpoint, User API Key, and User - Secret Key.) - - - removeRegion (Removes a Region from current Region.) - - - listRegions (Get all the Regions. They can be filtered by using the ID or - Name.) - - -
-
- User - - - getUser (This API can only be used by the Admin. Get user account details by using - the API Key.) - - -
-
- API Throttling - - - getApiLimit (Show number of remaining APIs for the invoking user in current - window) - - - resetApiLimit (For root admin, if accountId parameter is passed, it will reset - count for that particular account, otherwise it will reset all counters) - - - resetApiLimit (Reset the API count.) - - -
-
- Locking - - - lockAccount (Locks an account) - - - lockUser (Locks a user account) - - -
-
- VM Scaling - - - scaleVirtualMachine (Scales the virtual machine to a new service offering.) - - -
-
- Migrate Volume - - - migrateVirtualMachineWithVolume (Attempts migrating VM with its volumes to a - different host.) - - - listStorageProviders (Lists storage providers.) - - - findStoragePoolsForMigration (Lists storage pools available for migrating a - volume.) - - -
-
- Dedicated IP and VLAN - - - dedicatePublicIpRange (Dedicates a Public IP range to an account.) - - - releasePublicIpRange (Releases a Public IP range back to the system pool.) - - - dedicateGuestVlanRange (Dedicates a guest VLAN range to an account.) - - - releaseDedicatedGuestVlanRange (Releases a dedicated guest VLAN range to the - system.) - - - listDedicatedGuestVlanRanges (Lists dedicated guest VLAN ranges.) - - -
-
- Port Forwarding - - - updatePortForwardingRule (Updates a port forwarding rule. Only the private port - and the VM can be updated.) - - -
-
- Scale System VM - - - scaleSystemVm (Scale the service offering for a systemVM, console proxy, or - secondary storage.) - - -
-
- Deployment Planner - - - listDeploymentPlanners (Lists all the deployment planners available.) - - -
-
- Archive and Delete Events and Alerts - - - archiveEvents (Archive one or more events.) - - - deleteEvents (Delete one or more events.) - - - archiveAlerts (Archive one or more alerts.) - - - deleteAlerts (Delete one or more alerts.) - - -
-
- Host Reservation - - - releaseHostReservation (Releases host reservation.) - - -
-
- Resize Volume - - - resizeVolume (Resizes a volume.) - - - updateVolume (Updates the volume.) - - -
-
- Egress Firewall Rules - - - createEgressFirewallRule (Creates a egress firewall rule for a given network. ) - - - - deleteEgressFirewallRule (Deletes an egress firewall rule.) - - - listEgressFirewallRules (Lists all egress firewall rules for network.) - - -
-
- Network ACL - - - updateNetworkACLItem (Updates ACL item with specified ID.) - - - createNetworkACLList (Creates a Network ACL for the given VPC.) - - - deleteNetworkACLList (Deletes a Network ACL.) - - - replaceNetworkACLList (Replaces ACL associated with a Network or private gateway.) - - - - listNetworkACLLists (Lists all network ACLs.) - - -
-
- Resource Detail - - - addResourceDetail (Adds detail for the Resource.) - - - removeResourceDetail (Removes detail for the Resource.) - - - listResourceDetails (List resource details.) - - -
-
- Nicira Integration - - - addNiciraNvpDevice (Adds a Nicira NVP device.) - - - deleteNiciraNvpDevice (Deletes a Nicira NVP device.) - - - listNiciraNvpDevices (Lists Nicira NVP devices.) - - - listNiciraNvpDeviceNetworks (Lists network that are using a Nicira NVP device.) - - - -
-
- BigSwitch VNS - - - addBigSwitchVnsDevice (Adds a BigSwitch VNS device.) - - - deleteBigSwitchVnsDevice (Deletes a BigSwitch VNS device.) - - - listBigSwitchVnsDevices (Lists BigSwitch VNS devices.) - - -
-
- Simulator - - - configureSimulator (Configures a simulator.) - - -
-
- API Discovery - - - listApis (Lists all the available APIs on the server, provided by the API - Discovery plugin.) - - -
-
- Global Load Balancer - - - createGlobalLoadBalancerRule (Creates a global load balancer rule.) - - - deleteGlobalLoadBalancerRule (Deletes a global load balancer rule.) - - - updateGlobalLoadBalancerRule (update global load balancer rules.) - - - listGlobalLoadBalancerRules (Lists load balancer rules.) - - - assignToGlobalLoadBalancerRule (Assign load balancer rule or list of load balancer - rules to a global load balancer rules.) - - - removeFromGlobalLoadBalancerRule (Removes a load balancer rule association with - global load balancer rule) - - -
-
- Load Balancer - - - createLoadBalancer (Creates a Load Balancer) - - - listLoadBalancers (Lists Load Balancers) - - - deleteLoadBalancer (Deletes a load balancer) - - - configureInternalLoadBalancerElement (Configures an Internal Load Balancer - element.) - - - createInternalLoadBalancerElement (Create an Internal Load Balancer element.) - - - - listInternalLoadBalancerElements (Lists all available Internal Load Balancer - elements.) - - -
-
- Affinity Group - - - createAffinityGroup (Creates an affinity or anti-affinity group.) - - - deleteAffinityGroup (Deletes an affinity group.) - - - listAffinityGroups (Lists all the affinity groups.) - - - updateVMAffinityGroup (Updates the affinity or anti-affinity group associations of - a VM. The VM has to be stopped and restarted for the new properties to take effect.) - - - - listAffinityGroupTypes (Lists affinity group types available.) - - -
-
- Portable IP - - - createPortableIpRange (Adds a range of portable portable IPs to a Region.) - - - deletePortableIpRange (Deletes a range of portable portable IPs associated with a - Region.) - - - listPortableIpRanges (Lists portable IP ranges.) - - -
-
- Internal Load Balancer VM - - - stopInternalLoadBalancerVM (Stops an Internal LB VM.) - - - startInternalLoadBalancerVM (Starts an existing Internal LB VM.) - - - listInternalLoadBalancerVMs (List internal LB VMs.) - - -
-
- Network Isolation - - - listNetworkIsolationMethods (Lists supported methods of network isolation.) - - - -
-
- Dedicated Resources - - - dedicateZone (Dedicates a zone.) - - - dedicatePod (Dedicates a pod.) - - - dedicateCluster (Dedicate an existing cluster.) - - - dedicateHost (Dedicates a host.) - - - releaseDedicatedZone (Release dedication of zone.) - - - releaseDedicatedPod (Release dedication for the pod.) - - - releaseDedicatedCluster (Release dedication for cluster.) - - - releaseDedicatedHost (Release dedication for host.) - - - listDedicatedZones (List dedicated zones.) - - - listDedicatedPods (Lists dedicated pods.) - - - listDedicatedClusters (Lists dedicated clusters.) - - - listDedicatedHosts (Lists dedicated hosts.) - - -
-
-
- Changed API Commands in 4.2 - - - - - - - - API Commands - - - Description - - - - - - - listNetworkACLs - - - The following new request parameters are added: aclid (optional), action - (optional), protocol (optional) - The following new response parameters are added: aclid, action, number - - - - - copyTemplate - - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - listRouters - - - The following new response parameters are added: ip6dns1, ip6dns2, role - - - - - updateConfiguration - - - The following new request parameters are added: accountid (optional), - clusterid (optional), storageid (optional), zoneid (optional) - The following new response parameters are added: id, scope - - - - - listVolumes - - - The following request parameter is removed: details - The following new response parameter is added: displayvolume - - - - - suspendProject - - - The following new response parameters are added: cpuavailable, cpulimit, - cputotal, ipavailable, iplimit, iptotal, memoryavailable, memorylimit, - memorytotal, networkavailable, networklimit, networktotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal, - snapshotavailable, snapshotlimit, snapshottotal, templateavailable, templatelimit, - templatetotal, vmavailable, vmlimit, vmrunning, vmstopped, vmtotal, - volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, vpctotal - - - - - - listRemoteAccessVpns - - - The following new response parameters are added: id - - - - - registerTemplate - - - The following new request parameters are added: imagestoreuuid (optional), - isdynamicallyscalable (optional), isrouting (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - addTrafficMonitor - - - The following response parameters are removed: privateinterface, privatezone, - publicinterface, publiczone, usageinterface, username - - - - - createTemplate - - - The following response parameters are removed: clusterid, clustername, - disksizeallocated, disksizetotal, disksizeused, ipaddress, path, podid, podname, - state, tags, type - The following new response parameters are added: account, accountid, bootable, - checksum, crossZones, details, displaytext, domain, domainid, format, hostid, - hostname, hypervisor, isdynamicallyscalable, isextractable, isfeatured, ispublic, - isready, ostypeid, ostypename, passwordenabled, project, projectid, removed, size, - sourcetemplateid, sshkeyenabled, status, templatetag, templatetype, tags - - - - - listLoadBalancerRuleInstances - - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - migrateVolume - - - The following new request parameters is added: livemigrate (optional) - The following new response parameters is added: displayvolume - - - - - createAccount - - - The following new request parameters are added: accountid (optional), userid - (optional) - The following new response parameters are added: accountdetails, cpuavailable, - cpulimit, cputotal, defaultzoneid, ipavailable, iplimit, iptotal, - iscleanuprequired, isdefault, memoryavailable, memorylimit, memorytotal, name, - networkavailable, networkdomain, networklimit, networktotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - projectavailable, projectlimit, projecttotal, receivedbytes, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal, - sentbytes, snapshotavailable, snapshotlimit, snapshottotal, templateavailable, - templatelimit, templatetotal, vmavailable, vmlimit, vmrunning, vmstopped, vmtotal, - volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, vpctotal, - user - The following parameters are removed: account, accountid, apikey, created, - email, firstname, lastname, secretkey, timezone, username - - - - - updatePhysicalNetwork - - - The following new request parameters is added: removevlan (optional) - - - - - listTrafficMonitors - - - The following response parameters are removed: privateinterface, privatezone, - publicinterface, publiczone, usageinterface, username - - - - - attachIso - - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listProjects - - - The following new request parameters are added: cpuavailable, cpulimit, - cputotal, ipavailable, iplimit, iptotal, memoryavailable, memorylimit, - memorytotal, networkavailable, networklimit, networktotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal, - snapshotavailable, snapshotlimit, snapshottotal, templateavailable, templatelimit, - templatetotal, vmavailable, vmlimit, vmrunning, vmstopped, vmtotal, - volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, vpctotal - - - - - - enableAccount - - - The following new response parameters are added: cpuavailable, cpulimit, - cputotal, isdefault, memoryavailable, memorylimit, memorytotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal - - - - - listPublicIpAddresses - - - The following new response parameters are added: isportable, vmipaddress - - - - - - enableStorageMaintenance - - - The following new response parameters are added: hypervisor, scope, - suitableformigration - - - - - listLoadBalancerRules - - - The following new request parameters is added: networkid (optional) - The following new response parameters is added: networkid - - - - - stopRouter - - - The following new response parameters are added: ip6dns1, ip6dns2, role - - - - - - listClusters - - - The following new response parameters are added: cpuovercommitratio, - memoryovercommitratio - - - - - attachVolume - - - The following new response parameter is added: displayvolume - - - - - updateVPCOffering - - - The following request parameters is made mandatory: id - - - - - resetSSHKeyForVirtualMachine - - - The following new request parameter is added: keypair (required) - The following parameter is removed: name - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - updateCluster - - - The following request parameters are removed: cpuovercommitratio, - memoryovercommitratio (optional) - - - - - listPrivateGateways - - - The following new response parameters are added: aclid, sourcenatsupported - - - - - - ldapConfig - - - The following new request parameters are added: listall (optional) - The following parameters has been made optional: searchbase, hostname, - queryfilter - The following new response parameter is added: ssl - - - - - listTemplates - - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - listNetworks - - - The following new response parameters are added: aclid, displaynetwork, - ip6cidr, ip6gateway, ispersistent, networkcidr, reservediprange - - - - - restartNetwork - - - The following new response parameters are added: isportable, vmipaddress - - - - - - prepareTemplate - - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - rebootVirtualMachine - - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - changeServiceForRouter - - - The following new request parameters are added: aclid (optional), action - (optional), protocol (optional) - The following new response parameters are added: id, scope - - - - - updateZone - - - The following new request parameters are added: ip6dns1 (optional), ip6dns2 - (optional) - The following new response parameters are added: ip6dns1, ip6dns2 - - - - - ldapRemove - - - The following new response parameters are added: ssl - - - - - updateServiceOffering - - - The following new response parameters are added: deploymentplanner, isvolatile - - - - - - updateStoragePool - - - The following new response parameters are added: hypervisor, scope, - suitableformigration - - - - - listFirewallRules - - - The following request parameter is removed: traffictype - The following new response parameters are added: networkid - - - - - updateUser - - - The following new response parameters are added: iscallerchilddomain, - isdefault - - - - - updateProject - - - The following new response parameters are added: cpuavailable, cpulimit, - cputotal, ipavailable, iplimit, iptotal, memoryavailable, memorylimit, - memorytotal, networkavailable, networklimit, networktotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal, - snapshotavailable, snapshotlimit, snapshottotal, templateavailable, templatelimit, - templatetotal, vmavailable, vmlimit, vmrunning, vmstopped, vmtotal, - volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, vpctotal - - - - - - updateTemplate - - - The following new request parameters are added: isdynamicallyscalable - (optional), isrouting (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - disableUser - - - The following new response parameters are added: iscallerchilddomain, - isdefault - - - - - activateProject - - - The following new response parameters are added: cpuavailable, cpulimit, - cputotal, ipavailable, iplimit, iptotal, memoryavailable, memorylimit, - memorytotal, networkavailable, networklimit, networktotal, - primarystorageavailable, primarystoragelimit, primarystoragetotal, - secondarystorageavailable, secondarystoragelimit, secondarystoragetotal, - snapshotavailable, snapshotlimit, snapshottotal, templateavailable, templatelimit, - templatetotal, vmavailable, vmlimit, vmrunning, vmstopped, vmtotal, - volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, vpctotal - - - - - - createNetworkACL - - - The following new request parameters are added: aclid (optional), action - (optional), number (optional) - The following request parameter is now optional: networkid - The following new response parameters are added: aclid, action, number - - - - - enableStaticNat - - - The following new request parameters are added: vmguestip (optional) - - - - - registerIso - - - The following new request parameters are added: imagestoreuuid (optional), - isdynamicallyscalable (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - createIpForwardingRule - - - The following new response parameter is added: vmguestip - - - - - resetPasswordForVirtualMachine - - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - createVolume - - - The following new request parameter is added: displayvolume (optional) - The following new response parameter is added: displayvolume - - - - - startRouter - - - The following new response parameters are added: ip6dns1, ip6dns2, role - - - - - - listCapabilities - - - The following new response parameters are added: apilimitinterval and - apilimitmax. - See . - - - - - createServiceOffering - - - The following new request parameters are added: deploymentplanner (optional), - isvolatile (optional), serviceofferingdetails (optional). - isvolatie indicates whether the service offering includes Volatile VM - capability, which will discard the VM's root disk and create a new one on reboot. - See . - The following new response parameters are added: deploymentplanner, isvolatile - - - - - - restoreVirtualMachine - - - The following request parameter is added: templateID (optional). This is used - to point to the new template ID when the base image is updated. The parameter - templateID can be an ISO ID in case of restore vm deployed using ISO. See . - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - createNetwork - - - The following new request parameters are added: aclid (optional), - displaynetwork (optional), endipv6 (optional), ip6cidr (optional), ip6gateway - (optional), isolatedpvlan (optional), startipv6 (optional) - The following new response parameters are added: aclid, displaynetwork, - ip6cidr, ip6gateway, ispersistent, networkcidr, reservediprange - - - - - createVlanIpRange - - - The following new request parameters are added: startipv6, endipv6, - ip6gateway, ip6cidr - Changed parameters: startip (is now optional) - The following new response parameters are added: startipv6, endipv6, - ip6gateway, ip6cidr - - - - - CreateZone - - - The following new request parameters are added: ip6dns1, ip6dns2 - The following new response parameters are added: ip6dns1, ip6dns2 - - - - - deployVirtualMachine - - - The following request parameters are added: affinitygroupids (optional), - affinitygroupnames (optional), displayvm (optional), ip6address (optional) - The following request parameter is modified: iptonetworklist has a new - possible value, ipv6 - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - createNetworkOffering - - - The following request parameters are added: details (optional), - egressdefaultpolicy (optional), ispersistent (optional) - ispersistent determines if the network or network offering created or listed - by using this offering are persistent or not. - The following response parameters are added: details, egressdefaultpolicy, - ispersistent - - - - - listNetworks - - - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed - by using this offering are persistent or not. - - - - - listNetworkOfferings - - - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed - by using this offering are persistent or not. - For listNetworkOfferings, the following response parameter has been added: - details, egressdefaultpolicy, ispersistent - - - - - addF5LoadBalancer - configureNetscalerLoadBalancer - addNetscalerLoadBalancer - listF5LoadBalancers - configureF5LoadBalancer - listNetscalerLoadBalancers - - - The following response parameter is removed: inline. - - - - - listRouters - - - For nic responses, the following fields have been added. - - - ip6address - - - ip6gateway - - - ip6cidr - - - - - - - listVirtualMachines - - - The following request parameters are added: affinitygroupid (optional), vpcid - (optional) - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listRouters - listZones - - - For DomainRouter and DataCenter response, the following fields have been - added. - - - ip6dns1 - - - ip6dns2 - - - For listZones, the following optional request parameters are added: name, - networktype - - - - - listFirewallRules - createFirewallRule - - - The following request parameter is added: traffictype (optional). - The following response parameter is added: networkid - - - - - listUsageRecords - - - The following response parameter is added: virtualsize. - - - - - deleteIso - - - The following request parameter is removed: forced - - - - - addCluster - - - The following request parameters are added: guestvswitchtype (optional), - guestvswitchtype (optional), publicvswitchtype (optional), publicvswitchtype - (optional) - See . - The following request parameters are removed: cpuovercommitratio, - memoryovercommitratio - - - - - updateCluster - - - The following request parameters are added: cpuovercommitratio, - ramovercommitratio - See . - - - - - createStoragePool - - - The following request parameters are added: hypervisor (optional), provider - (optional), scope (optional) - The following request parameters have been made mandatory: podid, - clusterid - See . - The following response parameter has been added: hypervisor, scope, - suitableformigration - - - - - listStoragePools - - - The following request parameter is added: scope (optional) - See . - The following response parameters are added: hypervisor, scope, - suitableformigration - - - - - updateDiskOffering - - - The following response parameter is added: displayoffering - - - - - changeServiceForVirtualMachine - - - The following response parameter are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - recoverVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listCapabilities - - - The following response parameters are added: apilimitinterval, apilimitmax - - - - - - createRemoteAccessVpn - - - The following response parameters are added: id - - - - - startVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - detachIso - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - updateVPC - - - The following request parameters has been made mandatory: id, name - - - - - associateIpAddress - - - The following request parameters are added: isportable (optional), regionid - (optional) - The following response parameters are added: isportable, vmipaddress - - - - - listProjectAccounts - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, - vmlimit, vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - - - disableAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - listPortForwardingRules - - - The following response parameters are added: vmguestip - - - - - migrateVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - cancelStorageMaintenance - - - The following response parameters are added: hypervisor, scope, - suitableformigration - - - - - createPortForwardingRule - - The following request parameter is added: vmguestip (optional) The - following response parameter is added: vmguestip - - - - addVpnUser - - - The following response parameter is added: state - - - - - createVPCOffering - - - The following request parameter is added: serviceproviderlist (optional) - - - - - - assignVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listConditions - - - The following response parameters are added: account, counter, domain, - domainid, project, projectid, relationaloperator, threshold - Removed response parameters: name, source, value - - - - - createPrivateGateway - - - The following request parameters are added: aclid (optional), - sourcenatsupported (optional) - The following response parameters are added: aclid, sourcenatsupported - - - - - updateVirtualMachine - - - The following request parameters are added: displayvm (optional), - isdynamicallyscalable (optional) - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - destroyRouter - - - The following response parameters are added: ip6dns1, ip6dns2, role - - - - - listServiceOfferings - - - The following response parameters are added: deploymentplanner, isvolatile - - - - - - listUsageRecords - - - The following response parameters are removed: virtualsize - - - - - createProject - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, - vmlimit, vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - - - enableUser - - - The following response parameters are added: iscallerchilddomain, isdefault - - - - - - createLoadBalancerRule - - - The following response parameter is added: networkid - - - - - updateAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - copyIso - - - The following response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - uploadVolume - - - The following request parameters are added: imagestoreuuid (optional), - projectid (optional - The following response parameters are added: displayvolume - - - - - createDomain - - - The following request parameter is added: domainid (optional) - - - - - stopVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listAccounts - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - createSnapshot - - - The following response parameter is added: zoneid - - - - - updateIso - - - The following request parameters are added: isdynamicallyscalable (optional), - isrouting (optional) - The following response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - listIpForwardingRules - - - The following response parameter is added: vmguestip - - - - - updateNetwork - - - The following request parameters are added: displaynetwork (optional), - guestvmcidr (optional) - The following response parameters are added: aclid, displaynetwork, ip6cidr, - ip6gateway, ispersistent, networkcidr, reservediprange - - - - - destroyVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - createDiskOffering - - - The following request parameter is added: displayoffering (optional) - The following response parameter is added: displayoffering - - - - - rebootRouter - - - The following response parameters are added: ip6dns1, ip6dns2, role - - - - - listConfigurations - - - The following request parameters are added: accountid (optional), clusterid - (optional), storageid (optional), zoneid (optional) - The following response parameters are added: id, scope - - - - - createUser - - - The following request parameter is added: userid (optional) - The following response parameters are added: iscallerchilddomain, - isdefault - - - - - listDiskOfferings - - - The following response parameter is added: displayoffering - - - - - detachVolume - - - The following response parameter is added: displayvolume - - - - - deleteUser - - - The following response parameters are added: displaytext, success - Removed parameters: id, account, accountid, accounttype, apikey, created, - domain, domainid, email, firstname, lastname, secretkey, state, timezone, username - - - - - - listSnapshots - - - The following request parameter is added: zoneid (optional) - The following response parameter is added: zoneid - - - - - markDefaultZoneForAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - restartVPC - - - The following request parameters are made mandatory: id - - - - - updateHypervisorCapabilities - - - The following response parameters are added: hypervisor, hypervisorversion, - maxdatavolumeslimit, maxguestslimit, maxhostspercluster, securitygroupenabled, - storagemotionenabled - Removed parameters: cpunumber, cpuspeed, created, defaultuse, displaytext, - domain, domainid, hosttags, issystem, limitcpuuse, memory, name, networkrate, - offerha, storagetype, systemvmtype, tags - - - - - updateLoadBalancerRule - - - The following response parameter is added: networkid - - - - - listVlanIpRanges - - - The following response parameters are added: endipv6, ip6cidr, ip6gateway, - startipv6 - - - - - listHypervisorCapabilities - - - The following response parameters are added: maxdatavolumeslimit, - maxhostspercluster, storagemotionenabled - - - - - updateNetworkOffering - - - The following response parameters are added: details, egressdefaultpolicy, - ispersistent - - - - - createVirtualRouterElement - - - The following request parameters are added: providertype (optional) - - - - - listVpnUsers - - - The following response parameter is added: state - - - - - listUsers - - - The following response parameters are added: iscallerchilddomain, isdefault - - - - - - listSupportedNetworkServices - - - The following response parameter is added: provider - - - - - listIsos - - - The following response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - -
-
- Deprecated APIs - - - addExternalLoadBalancer (Adds F5 external load balancer appliance.) - - - deleteExternalLoadBalancer (Deletes a F5 external load balancer appliance added in a - zone.) - - - listExternalLoadBalancers (Lists F5 external load balancer appliances added in a - zone.) - - -
-
-
diff --git a/docs/en-US/Revision_History.xml b/docs/en-US/Revision_History.xml deleted file mode 100644 index 55d741a64f2..00000000000 --- a/docs/en-US/Revision_History.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Revision History - - - - 0-0 - Tue May 29 2012 - - Jessica - Tomechak - - - - - Initial creation of book by publican - - - - - - diff --git a/docs/en-US/Revision_History_Install_Guide.xml b/docs/en-US/Revision_History_Install_Guide.xml deleted file mode 100644 index ee8dd31325a..00000000000 --- a/docs/en-US/Revision_History_Install_Guide.xml +++ /dev/null @@ -1,55 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Revision History - - - - 1-0 - October 5 2012 - - Jessica - Tomechak - - - - Radhika - PC - - - - Wido - den Hollander - - - - - Initial publication - - - - - - diff --git a/docs/en-US/SSL-keystore-path-and-password.xml b/docs/en-US/SSL-keystore-path-and-password.xml deleted file mode 100644 index f7b7426874d..00000000000 --- a/docs/en-US/SSL-keystore-path-and-password.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- SSL Keystore Path and Password - If the LDAP server requires SSL, you need to enable it in the ldapConfig command by setting the parameters ssl, truststore, and truststorepass. Before enabling SSL for ldapConfig, you need to get the certificate which the LDAP server is using and add it to a trusted keystore. You will need to know the path to the keystore and the password. -
diff --git a/docs/en-US/VPN-user-usage-record-format.xml b/docs/en-US/VPN-user-usage-record-format.xml deleted file mode 100644 index dd66fb4d0d4..00000000000 --- a/docs/en-US/VPN-user-usage-record-format.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- VPN User Usage Record Format - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – VPN user ID - usagetype – A number representing the usage type (see Usage Types) - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/about-clusters.xml b/docs/en-US/about-clusters.xml deleted file mode 100644 index aa8604ccd52..00000000000 --- a/docs/en-US/about-clusters.xml +++ /dev/null @@ -1,63 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Clusters - - A cluster provides a way to group hosts. To be precise, a cluster is a - XenServer server pool, a set of KVM servers, , or a - VMware cluster preconfigured in vCenter. The hosts in a cluster all - have identical hardware, run the same hypervisor, are on the same subnet, - and access the same shared primary storage. Virtual machine instances - (VMs) can be live-migrated from one host to another within the same - cluster, without interrupting service to the user. - - - A cluster is the third-largest organizational unit within a &PRODUCT; - deployment. Clusters are contained within pods, and pods are contained - within zones. Size of the cluster is limited by the underlying hypervisor, - although the &PRODUCT; recommends less in most cases; see Best Practices. - - - A cluster consists of one or more hosts and one or more primary storage - servers. - - - - - - cluster-overview.png: Structure of a simple cluster - - &PRODUCT; allows multiple clusters in a cloud deployment. - - Even when local storage is used exclusively, clusters are still required - organizationally, even if there is just one host per cluster. - - - When VMware is used, every VMware cluster is managed by a vCenter server. - Administrator must register the vCenter server with &PRODUCT;. There may - be multiple vCenter servers per zone. Each vCenter server may manage - multiple VMware clusters. - -
diff --git a/docs/en-US/about-hosts.xml b/docs/en-US/about-hosts.xml deleted file mode 100644 index 87b6bab1ee1..00000000000 --- a/docs/en-US/about-hosts.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Hosts - A host is a single computer. Hosts provide the computing resources that run the guest virtual machines. Each host has hypervisor software installed on it to manage the guest VMs. For example, a Linux KVM-enabled server, a Citrix XenServer server, and an ESXi server are hosts. - The host is the smallest organizational unit within a &PRODUCT; deployment. Hosts are contained within clusters, clusters are contained within pods, and pods are contained within zones. - Hosts in a &PRODUCT; deployment: - - Provide the CPU, memory, storage, and networking resources needed to host the virtual - machines - Interconnect using a high bandwidth TCP/IP network and connect to the Internet - May reside in multiple data centers across different geographic locations - May have different capacities (different CPU speeds, different amounts of RAM, etc.), although the hosts within a cluster must all be homogeneous - - Additional hosts can be added at any time to provide more capacity for guest VMs. - &PRODUCT; automatically detects the amount of CPU and memory resources provided by the Hosts. - Hosts are not visible to the end user. An end user cannot determine which host their guest has been assigned to. - For a host to function in &PRODUCT;, you must do the following: - - Install hypervisor software on the host - Assign an IP address to the host - Ensure the host is connected to the &PRODUCT; Management Server - -
diff --git a/docs/en-US/about-password-encryption.xml b/docs/en-US/about-password-encryption.xml deleted file mode 100644 index a13ff60fc95..00000000000 --- a/docs/en-US/about-password-encryption.xml +++ /dev/null @@ -1,65 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Password and Key Encryption - &PRODUCT; stores several sensitive passwords and secret keys that are used to provide - security. These values are always automatically encrypted: - - - Database secret key - - - Database password - - - SSH keys - - - Compute node root password - - - VPN password - - - User API secret key - - - VNC password - - - &PRODUCT; uses the Java Simplified Encryption (JASYPT) library. The data values are - encrypted and decrypted using a database secret key, which is stored in one of &PRODUCT;’s - internal properties files along with the database password. The other encrypted values listed - above, such as SSH keys, are in the &PRODUCT; internal database. - Of course, the database secret key itself can not be stored in the open – it must be - encrypted. How then does &PRODUCT; read it? A second secret key must be provided from an - external source during Management Server startup. This key can be provided in one of two ways: - loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a - configuration setting that lets it know which of these methods will be used. If the encryption - type is set to "file," the key must be in a file in a known location. If the encryption type is - set to "web," the administrator runs the utility - com.cloud.utils.crypt.EncryptionSecretKeySender, which relays the key to the Management Server - over a known port. - The encryption type, database secret key, and Management Server secret key are set during - &PRODUCT; installation. They are all parameters to the &PRODUCT; database setup script - (cloudstack-setup-databases). The default values are file, password, and password. It is, of course, - highly recommended that you change these to more secure keys. -
diff --git a/docs/en-US/about-physical-networks.xml b/docs/en-US/about-physical-networks.xml deleted file mode 100644 index b22e48b7779..00000000000 --- a/docs/en-US/about-physical-networks.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- About Physical Networks - Part of adding a zone is setting up the physical network. One or (in an advanced zone) more physical networks can be associated with each zone. The network corresponds to a NIC on the hypervisor host. Each physical network can carry one or more types of network traffic. The choices of traffic type for each network vary depending on whether you are creating a zone with basic networking or advanced networking. - A physical network is the actual network hardware and wiring in a zone. A zone can have multiple physical networks. An administrator can: - - Add/Remove/Update physical networks in a zone - Configure VLANs on the physical network - Configure a name so the network can be recognized by hypervisors - Configure the service providers (firewalls, load balancers, etc.) available on a physical network - Configure the IP addresses trunked to a physical network - Specify what type of traffic is carried on the physical network, as well as other properties like network speed - - - - - - - -
diff --git a/docs/en-US/about-pods.xml b/docs/en-US/about-pods.xml deleted file mode 100644 index 57ae1a319b3..00000000000 --- a/docs/en-US/about-pods.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Pods - A pod often represents a single rack. Hosts in the same pod are in the same subnet. - A pod is the second-largest organizational unit within a &PRODUCT; deployment. Pods are contained within zones. Each zone can contain one or more pods. - A pod consists of one or more clusters of hosts and one or more primary storage servers. - Pods are not visible to the end user. - - - - - - pod-overview.png: Nested structure of a simple pod - -
diff --git a/docs/en-US/about-primary-storage.xml b/docs/en-US/about-primary-storage.xml deleted file mode 100644 index 9af9f2dae13..00000000000 --- a/docs/en-US/about-primary-storage.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Primary Storage - Primary storage is associated with a cluster and/or a zone. It stores the disk volumes for all of the VMs running on hosts in that cluster. You can add multiple primary storage servers to a cluster or a zone (at least one is required at the cluster level). Primary storage is typically located close to the hosts for increased performance. &PRODUCT; manages the allocation of guest virtual disks to particular primary storage devices. - Primary storage uses the concept of a storage tag. A storage tag is a label that is used to identify the primary storage. Each primary storage can be associated with zero, one, or more storage tags. When a VM is spun up or a data disk attached to a VM for the first time, these tags, if supplied, are used to determine which primary storage can support the VM or data disk (ex. say you need to guarantee a certain number of IOPS to a particular volume). - Primary storage can be either static or dynamic. Static primary storage is what CloudStack has traditionally supported. In this model, the administrator must present CloudStack with a certain amount of preallocated storage (ex. a volume from a SAN) and CloudStack can place many of its volumes on this storage. In the newer, dynamic model, the administrator can present CloudStack with a storage system itself (ex. a SAN). CloudStack, working in concert with a plug-in developed for that storage system, can dynamically create volumes on the storage system. A valuable use for this ability is Quality of Service (QoS). If a volume created in CloudStack can be backed by a dedicated volume on a SAN (i.e. a one-to-one mapping between a SAN volume and a CloudStack volume) and the SAN provides QoS, then CloudStack can provide QoS. - &PRODUCT; is designed to work with all standards-compliant iSCSI and NFS servers that are supported by the underlying hypervisor, including, for example: - - SolidFire for iSCSI - Dell EqualLogicâ„¢ for iSCSI - Network Appliances filers for NFS and iSCSI - Scale Computing for NFS - - If you intend to use only local disk for your installation, you can skip to Add Secondary Storage. -
diff --git a/docs/en-US/about-regions.xml b/docs/en-US/about-regions.xml deleted file mode 100644 index a12c183abd3..00000000000 --- a/docs/en-US/about-regions.xml +++ /dev/null @@ -1,50 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- About Regions - To increase reliability of the cloud, you can optionally group resources into multiple geographic regions. - A region is the largest available organizational unit within a &PRODUCT; deployment. - A region is made up of several availability zones, where each zone is roughly equivalent to a datacenter. - Each region is controlled by its own cluster of Management Servers, running in one of the zones. - The zones in a region are typically located in close geographical proximity. - Regions are a useful technique for providing fault tolerance and disaster recovery. - By grouping zones into regions, the cloud can achieve higher availability and scalability. - User accounts can span regions, so that users can deploy VMs in multiple, widely-dispersed regions. - Even if one of the regions becomes unavailable, the services are still available to the end-user through VMs deployed in another region. - And by grouping communities of zones under their own nearby Management Servers, the latency of communications within the cloud is reduced - compared to managing widely-dispersed zones from a single central Management Server. - - - Usage records can also be consolidated and tracked at the region level, creating reports or invoices for each geographic region. - - - - - - region-overview.png: Nested structure of a region. - - Regions are visible to the end user. When a user starts a guest VM on a particular &PRODUCT; Management Server, - the user is implicitly selecting that region for their guest. - Users might also be required to copy their private templates to additional regions to enable creation of guest VMs using their templates in those regions. -
\ No newline at end of file diff --git a/docs/en-US/about-secondary-storage.xml b/docs/en-US/about-secondary-storage.xml deleted file mode 100644 index 516ec0e6b78..00000000000 --- a/docs/en-US/about-secondary-storage.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Secondary Storage - Secondary storage stores the following: - - Templates — OS images that can be used to boot VMs and can include additional configuration information, such as installed applications - ISO images — disc images containing data or bootable media for operating systems - Disk volume snapshots — saved copies of VM data which can be used for data recovery or to create new templates - - The items in secondary storage are available to all hosts in the scope of - the secondary storage, which may be defined as per zone or per region. - To make items in secondary storage available to all hosts throughout the cloud, you can - add object storage in addition to the - zone-based NFS Secondary Staging Store. - It is not necessary to - copy templates and snapshots from one zone to another, as would be required when using zone - NFS alone. Everything is available everywhere. - &PRODUCT; provides plugins that enable both - OpenStack Object Storage (Swift, - swift.openstack.org) - and Amazon Simple Storage Service (S3) object storage. - When using one of these storage plugins, you configure Swift or S3 storage for - the entire &PRODUCT;, then set up the NFS Secondary Staging Store for each zone. The NFS - storage in each zone acts as a staging area through which all templates and other secondary - storage data pass before being forwarded to Swoft or S3. - The backing object storage acts as a cloud-wide - resource, making templates and other data available to any zone in the cloud. -
diff --git a/docs/en-US/about-security-groups.xml b/docs/en-US/about-security-groups.xml deleted file mode 100644 index 6a31b25ef48..00000000000 --- a/docs/en-US/about-security-groups.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- About Security Groups - Security groups provide a way to isolate traffic to VMs. A security group is a group of - VMs that filter their incoming and outgoing traffic according to a set of rules, called - ingress and egress rules. These rules filter network traffic according to the IP address - that is attempting to communicate with the VM. Security groups are particularly useful in - zones that use basic networking, because there is a single guest network for all guest VMs. - In advanced zones, security groups are supported only on the KVM hypervisor. - In a zone that uses advanced networking, you can instead define multiple guest networks to isolate traffic to VMs. - - - Each &PRODUCT; account comes with a default security group that denies all inbound traffic and allows all outbound traffic. The default security group can be modified so that all new VMs inherit some other desired set of rules. - Any &PRODUCT; user can set up any number of additional security groups. When a new VM is launched, it is assigned to the default security group unless another user-defined security group is specified. A VM can be a member of any number of security groups. Once a VM is assigned to a security group, it remains in that group for its entire lifetime; you can not move a running VM from one security group to another. - You can modify a security group by deleting or adding any number of ingress and egress rules. When you do, the new rules apply to all VMs in the group, whether running or stopped. - If no ingress rules are specified, then no traffic will be allowed in, except for responses to any traffic that has been allowed out through an egress rule. -
diff --git a/docs/en-US/about-virtual-networks.xml b/docs/en-US/about-virtual-networks.xml deleted file mode 100644 index 4dbd2018b27..00000000000 --- a/docs/en-US/about-virtual-networks.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- About Virtual Networks - A virtual network is a logical construct that enables multi-tenancy on a single physical network. In &PRODUCT; a virtual network can be shared or isolated. - - - -
diff --git a/docs/en-US/about-working-with-vms.xml b/docs/en-US/about-working-with-vms.xml deleted file mode 100644 index 90e5abf07f8..00000000000 --- a/docs/en-US/about-working-with-vms.xml +++ /dev/null @@ -1,64 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Working with Virtual Machines - &PRODUCT; provides administrators with complete control over the lifecycle of all guest VMs - executing in the cloud. &PRODUCT; provides several guest management operations for end users and - administrators. VMs may be stopped, started, rebooted, and destroyed. - Guest VMs have a name and group. VM names and groups are opaque to &PRODUCT; and are - available for end users to organize their VMs. Each VM can have three names for use in different - contexts. Only two of these names can be controlled by the user: - - - Instance name – a unique, immutable ID that is generated by &PRODUCT; and can not - be modified by the user. This name conforms to the requirements in IETF RFC 1123. - - - Display name – the name displayed in the &PRODUCT; web UI. Can be set by the user. - Defaults to instance name. - - - Name – host name that the DHCP server assigns to the VM. Can be set by the user. - Defaults to instance name - - - - You can append the display name of a guest VM to its internal name. For more information, - see . - - Guest VMs can be configured to be Highly Available (HA). An HA-enabled VM is monitored by - the system. If the system detects that the VM is down, it will attempt to restart the VM, - possibly on a different host. For more information, see HA-Enabled Virtual Machines on - Each new VM is allocated one public IP address. When the VM is started, &PRODUCT; - automatically creates a static NAT between this public IP address and the private IP address of - the VM. - If elastic IP is in use (with the NetScaler load balancer), the IP address initially - allocated to the new VM is not marked as elastic. The user must replace the automatically - configured IP with a specifically acquired elastic IP, and set up the static NAT mapping between - this new IP and the guest VM’s private IP. The VM’s original IP address is then released and - returned to the pool of available public IPs. Optionally, you can also decide not to allocate a - public IP to a VM in an EIP-enabled Basic zone. For more information on Elastic IP, see . - &PRODUCT; cannot distinguish a guest VM that was shut down by the user (such as with the - “shutdown†command in Linux) from a VM that shut down unexpectedly. If an HA-enabled VM is shut - down from inside the VM, &PRODUCT; will restart it. To shut down an HA-enabled VM, you must go - through the &PRODUCT; UI or API. -
diff --git a/docs/en-US/about-zones.xml b/docs/en-US/about-zones.xml deleted file mode 100644 index 2a4eeb4659f..00000000000 --- a/docs/en-US/about-zones.xml +++ /dev/null @@ -1,74 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- About Zones - A zone is the second largest organizational unit within a &PRODUCT; deployment. A zone - typically corresponds to a single datacenter, although it is permissible to have multiple - zones in a datacenter. The benefit of organizing infrastructure into zones is to provide - physical isolation and redundancy. For example, each zone can have its own power supply and - network uplink, and the zones can be widely separated geographically (though this is not - required). - A zone consists of: - - One or more pods. Each pod contains one or more clusters of hosts and one or more primary storage servers. - A zone may contain one or more primary storage servers, which are shared by all the pods in the zone. - Secondary storage, which is shared by all the pods in the zone. - - - - - - zone-overview.png: Nested structure of a simple zone. - - Zones are visible to the end user. When a user starts a guest VM, the user must select a zone for their guest. Users might also be required to copy their private templates to additional zones to enable creation of guest VMs using their templates in those zones. - Zones can be public or private. Public zones are visible to all users. This means that any user may create a guest in that zone. Private zones are reserved for a specific domain. Only users in that domain or its subdomains may create guests in that zone. - Hosts in the same zone are directly accessible to each other without having to go through a firewall. Hosts in different zones can access each other through statically configured VPN tunnels. - For each zone, the administrator must decide the following. - - How many pods to place in each zone. - How many clusters to place in each pod. - How many hosts to place in each cluster. - (Optional) How many primary storage servers to place in each zone and total capacity for these storage servers. - How many primary storage servers to place in each cluster and total capacity for these storage servers. - How much secondary storage to deploy in a zone. - - When you add a new zone using the &PRODUCT; UI, you will be prompted to configure the zone’s physical network - and add the first pod, cluster, host, primary storage, and secondary storage. - In order to support zone-wide functions for VMware, &PRODUCT; is aware of VMware Datacenters and can map each Datacenter to a - &PRODUCT; zone. To enable features like storage live migration and zone-wide - primary storage for VMware hosts, &PRODUCT; has to make sure that a zone - contains only a single VMware Datacenter. Therefore, when you are creating a new - &PRODUCT; zone, you can select a VMware Datacenter for the zone. If you - are provisioning multiple VMware Datacenters, each one will be set up as a single zone - in &PRODUCT;. - - If you are upgrading from a previous &PRODUCT; version, and your existing - deployment contains a zone with clusters from multiple VMware Datacenters, that zone - will not be forcibly migrated to the new model. It will continue to function as - before. However, any new zone-wide operations, such as zone-wide primary storage - and live storage migration, will - not be available in that zone. - - -
diff --git a/docs/en-US/accept-membership-invite.xml b/docs/en-US/accept-membership-invite.xml deleted file mode 100644 index dc59d00af65..00000000000 --- a/docs/en-US/accept-membership-invite.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Accepting a Membership Invitation - If you have received an invitation to join a &PRODUCT; project, and you want to accept the invitation, follow these steps: - - Log in to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select View, choose Invitations. - If you see the invitation listed onscreen, click the Accept button. Invitations listed on screen were sent to you using your &PRODUCT; account name. - If you received an email invitation, click the Enter Token button, and provide the project ID and unique ID code (token) from the email. - -
- diff --git a/docs/en-US/accessing-system-vms.xml b/docs/en-US/accessing-system-vms.xml deleted file mode 100755 index e1b6090d7af..00000000000 --- a/docs/en-US/accessing-system-vms.xml +++ /dev/null @@ -1,66 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Accessing System VMs - It may sometimes be necessary to access System VMs for diagnostics of certain issues, for example if you are experiencing SSVM (Secondary Storage VM) connection issues. Use the steps below in order to connect to the SSH console of a running System VM. - - Accessing System VMs over the network requires the use of private keys and connecting to System VMs SSH Daemon on port 3922. - XenServer/KVM Hypervisors store this key at /root/.ssh/id_rsa.cloud on each &PRODUCT; agent. - To access System VMs running on ESXi, the key is stored on the management server at /var/lib/cloudstack/management/.ssh/id_rsa. - - - - Find the details of the System VM - - Log in with admin privileges to the &PRODUCT; UI. - Click Infrastructure, then System VMs, and then click the name of a running VM. - Take a note of the 'Host', 'Private IP Address' and 'Link Local IP Address' of the System VM you wish to access. - - - - - XenServer/KVM Hypervisors - - Connect to the Host of which the System VM is running. - SSH the 'Link Local IP Address' of the System VM from the Host on which the VM is running. - Format: ssh -i <path-to-private-key> <link-local-ip> -p 3922 - Example: root@faith:~# ssh -i /root/.ssh/id_rsa.cloud 169.254.3.93 -p 3922 - - - - ESXi Hypervisors - - Connect to your &PRODUCT; Management Server. - ESXi users should SSH to the private IP address of the System VM. - Format: ssh -i <path-to-private-key> <vm-private-ip> -p 3922 - Example: root@management:~# ssh -i /var/lib/cloudstack/management/.ssh/id_rsa 172.16.0.250 -p 3922 - - - - - - - -
diff --git a/docs/en-US/accessing-vms.xml b/docs/en-US/accessing-vms.xml deleted file mode 100644 index 67d9d774172..00000000000 --- a/docs/en-US/accessing-vms.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Accessing VMs - Any user can access their own virtual machines. The administrator can access all VMs running in the cloud. - To access a VM through the &PRODUCT; UI: - - Log in to the &PRODUCT; UI as a user or admin. - Click Instances, then click the name of a running VM. - Click the View Console button . - - To access a VM directly over the network: - - The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See . - If a port is open but you can not access the VM using ssh, it’s possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VM’s operating system. - If the network has an external firewall device, you will need to create a firewall rule to allow access. See . - -
- diff --git a/docs/en-US/accounts-users-domains.xml b/docs/en-US/accounts-users-domains.xml deleted file mode 100644 index 3accbbe9b84..00000000000 --- a/docs/en-US/accounts-users-domains.xml +++ /dev/null @@ -1,133 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Accounts, Users, and Domains - - Accounts - An account typically represents a customer of the service provider or a department in a large organization. Multiple users can exist in an account. - - - Domains - Accounts are grouped by domains. Domains usually contain multiple accounts that have some logical relationship to each other and a set of delegated administrators with some authority over the domain and its subdomains. For example, a service provider with several resellers could create a domain for each reseller. - - For each account created, the Cloud installation creates three different types of user accounts: root administrator, domain administrator, and user. - - Users - Users are like aliases in the account. Users in the same account are not isolated from each other, but they are isolated from users in other accounts. Most installations need not surface the notion of users; they just have one user per account. The same user cannot belong to multiple accounts. - - Username is unique in a domain across accounts in that domain. The same username can exist in other domains, including sub-domains. Domain name can repeat only if the full pathname from root is unique. For example, you can create root/d1, as well as root/foo/d1, and root/sales/d1. - Administrators are accounts with special privileges in the system. There may be multiple administrators in the system. Administrators can create or delete other administrators, and change the password for any user in the system. - - Domain Administrators - Domain administrators can perform administrative operations for users who belong to that domain. Domain administrators do not have visibility into physical servers or other domains. - - - Root Administrator - Root administrators have complete access to the system, including managing templates, service offerings, customer care administrators, and domains - - - Resource Ownership - Resources belong to the account, not individual users in that account. For example, - billing, resource limits, and so on are maintained by the account, not the users. A user - can operate on any resource in the account provided the user has privileges for that - operation. The privileges are determined by the role. A root administrator can change - the ownership of any virtual machine from one account to any other account by using the - assignVirtualMachine API. A domain or sub-domain administrator can do the same for VMs - within the domain from one account to any other account in the domain or any of its - sub-domains. - -
- Dedicating Resources to Accounts and Domains - The root administrator can dedicate resources to a specific domain or account - that needs private infrastructure for additional security or performance guarantees. - A zone, pod, cluster, or host can be reserved by the root administrator for a specific domain or account. - Only users in that domain or its subdomain may use the infrastructure. - For example, only users in a given domain can create guests in a zone dedicated to that domain. - There are several types of dedication available: - - - Explicit dedication. A zone, pod, cluster, or host is dedicated to an account or - domain by the root administrator during initial deployment and - configuration. - Strict implicit dedication. A host will not be shared across multiple accounts. For example, - strict implicit dedication is useful for deployment of certain types of - applications, such as desktops, where no host can be shared - between different accounts without violating the desktop software's terms of license. - Preferred implicit dedication. The VM will be deployed in dedicated infrastructure if - possible. Otherwise, the VM can be deployed in shared - infrastructure. - -
- How to Dedicate a Zone, Cluster, Pod, or Host to an Account or Domain - For explicit dedication: When deploying a new zone, pod, cluster, or host, the - root administrator can click the Dedicated checkbox, then choose a domain or account - to own the resource. - To explicitly dedicate an existing zone, pod, cluster, or host: log in as the root admin, - find the resource in the UI, and click the Dedicate button. - - - - - dedicate-resource-button.png: button to dedicate a zone, pod, cluster, or host - - - For implicit dedication: The administrator creates a compute service offering and - in the Deployment Planner field, chooses ImplicitDedicationPlanner. Then in Planner - Mode, the administrator specifies either Strict or Preferred, depending on whether - it is permissible to allow some use of shared resources when dedicated resources are - not available. Whenever a user creates a VM based on this service offering, it is - allocated on one of the dedicated hosts. -
-
- How to Use Dedicated Hosts - To use an explicitly dedicated host, use the explicit-dedicated type of affinity - group (see ). For example, when creating a new VM, - an end user can choose to place it on dedicated infrastructure. This operation will - succeed only if some infrastructure has already been assigned as dedicated to the - user's account or domain. -
-
- Behavior of Dedicated Hosts, Clusters, Pods, and Zones - The administrator can live migrate VMs away from dedicated hosts if desired, whether the destination - is a host reserved for a different account/domain or a host that is shared (not dedicated to any particular account or domain). - &PRODUCT; will generate an alert, but the operation is allowed. - Dedicated hosts can be used in conjunction with host tags. If both a host tag and dedication are requested, - the VM will be placed only on a host that meets both requirements. If there is no dedicated resource available - to that user that also has the host tag requested by the user, then the VM will not deploy. - If you delete an account or domain, any hosts, clusters, pods, and zones that were - dedicated to it are freed up. They will now be available to be shared by any account - or domain, or the administrator may choose to re-dedicate them to a different - account or domain. - System VMs and virtual routers affect the behavior of host dedication. - System VMs and virtual routers are owned by the &PRODUCT; system account, - and they can be deployed on any host. They do not adhere to explicit dedication. - The presence of system vms and virtual routers on a host makes it unsuitable for strict implicit dedication. - The host can not be used for strict implicit dedication, - because the host already has VMs of a specific account (the default system account). - However, a host with system VMs or virtual routers can be used - for preferred implicit dedication. - -
-
-
diff --git a/docs/en-US/accounts.xml b/docs/en-US/accounts.xml deleted file mode 100644 index 1c4454c6a3f..00000000000 --- a/docs/en-US/accounts.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Accounts - - - diff --git a/docs/en-US/acquire-new-ip-address.xml b/docs/en-US/acquire-new-ip-address.xml deleted file mode 100644 index 3dbd79e3f2d..00000000000 --- a/docs/en-US/acquire-new-ip-address.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Acquiring a New IP Address - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click the name of the network where you want to work with. - - - Click View IP Addresses. - - - Click Acquire New IP. - The Acquire New IP window is displayed. - - - Specify whether you want cross-zone IP or not. - If you want Portable IP click Yes in the confirmation dialog. If you want a normal - Public IP click No. - For more information on Portable IP, see . - Within a few moments, the new IP address should appear with the state Allocated. You can - now use the IP address in port forwarding or static NAT rules. - - -
diff --git a/docs/en-US/acquire-new-ip-for-vpc.xml b/docs/en-US/acquire-new-ip-for-vpc.xml deleted file mode 100644 index c0cb876d483..00000000000 --- a/docs/en-US/acquire-new-ip-for-vpc.xml +++ /dev/null @@ -1,88 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Acquiring a New IP Address for a VPC - When you acquire an IP address, all IP addresses are allocated to VPC, not to the guest - networks within the VPC. The IPs are associated to the guest network only when the first - port-forwarding, load balancing, or Static NAT rule is created for the IP or the network. IP - can't be associated to more than one network at a time. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - The following options are displayed. - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select IP Addresses. - The Public IP Addresses page is displayed. - - - Click Acquire New IP, and click Yes in the confirmation dialog. - You are prompted for confirmation because, typically, IP addresses are a limited - resource. Within a few moments, the new IP address should appear with the state Allocated. - You can now use the IP address in port forwarding, load balancing, and static NAT - rules. - - -
diff --git a/docs/en-US/add-additional-guest-network.xml b/docs/en-US/add-additional-guest-network.xml deleted file mode 100644 index c684da023da..00000000000 --- a/docs/en-US/add-additional-guest-network.xml +++ /dev/null @@ -1,65 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding an Additional Guest Network - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click Add guest network. Provide the following information: - - - Name: The name of the network. This will be - user-visible. - - - Display Text: The description of the network. This - will be user-visible. - - - Zone. The name of the zone this network applies to. - Each zone is a broadcast domain, and therefore each zone has a different IP range for - the guest network. The administrator must configure the IP range for each zone. - - - Network offering: If the administrator has - configured multiple network offerings, select the one you want to use for this - network. - - - Guest Gateway: The gateway that the guests should - use. - - - Guest Netmask: The netmask in use on the subnet the - guests will use. - - - - - Click Create. - - -
diff --git a/docs/en-US/add-clusters-kvm-xenserver.xml b/docs/en-US/add-clusters-kvm-xenserver.xml deleted file mode 100644 index ad5737191fd..00000000000 --- a/docs/en-US/add-clusters-kvm-xenserver.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Add Cluster: KVM or XenServer - These steps assume you have already installed the hypervisor on the hosts and logged in to - the &PRODUCT; UI. - - - In the left navigation, choose Infrastructure. In Zones, click View More, then click the - zone in which you want to add the cluster. - - - Click the Compute tab. - - - In the Clusters node of the diagram, click View All. - - - Click Add Cluster. - - - Choose the hypervisor type for this cluster. - - - Choose the pod in which you want to create the cluster. - - - Enter a name for the cluster. This can be text of your choosing and is not used by - &PRODUCT;. - - - Click OK. - - -
diff --git a/docs/en-US/add-clusters-ovm.xml b/docs/en-US/add-clusters-ovm.xml deleted file mode 100644 index d0b0688e6a3..00000000000 --- a/docs/en-US/add-clusters-ovm.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Add Cluster: OVM - To add a Cluster of hosts that run Oracle VM (OVM): - - Add a companion non-OVM cluster to the Pod. This cluster provides an environment where the &PRODUCT; System VMs can run. You should have already installed a non-OVM hypervisor on at least one Host to prepare for this step. Depending on which hypervisor you used: - - For VMWare, follow the steps in . When finished, return here and continue with the next step. - For KVM or XenServer, follow the steps in . When finished, return here and continue with the next step - - - In the left navigation, choose Infrastructure. In Zones, click View All, then click the zone in which you want to add the cluster. - Click the Compute and Storage tab. In the Pods node, click View All. - Click View Clusters, then click Add Cluster. - The Add Cluster dialog is displayed. - In Hypervisor, choose OVM. - In Cluster, enter a name for the cluster. - Click Add. - -
diff --git a/docs/en-US/add-clusters-vsphere.xml b/docs/en-US/add-clusters-vsphere.xml deleted file mode 100644 index c3a0902be8f..00000000000 --- a/docs/en-US/add-clusters-vsphere.xml +++ /dev/null @@ -1,178 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Add Cluster: vSphere - Host management for vSphere is done through a combination of vCenter and the &PRODUCT; admin - UI. &PRODUCT; requires that all hosts be in a &PRODUCT; cluster, but the cluster may consist of - a single host. As an administrator you must decide if you would like to use clusters of one host - or of multiple hosts. Clusters of multiple hosts allow for features like live migration. - Clusters also require shared storage such as NFS or iSCSI. - For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding - the entire cluster to &PRODUCT;. Follow these requirements: - - - Do not put more than 8 hosts in a vSphere cluster - - - Make sure the hypervisor hosts do not have any VMs already running before you add them - to &PRODUCT;. - - - To add a vSphere cluster to &PRODUCT;: - - - Create the cluster of hosts in vCenter. Follow the vCenter instructions to do this. You - will create a cluster that looks something like this in vCenter. - - - - - - vsphereclient.png: vSphere client - - - - - Log in to the UI. - - - In the left navigation, choose Infrastructure. In Zones, click View More, then click the - zone in which you want to add the cluster. - - - Click the Compute tab, and click View All on Pods. Choose the pod to which you want to - add the cluster. - - - Click View Clusters. - - - Click Add Cluster. - - - In Hypervisor, choose VMware. - - - Provide the following information in the dialog. The fields below make reference to the - values from vCenter. - - - - - - addcluster.png: add a cluster - - - - - Cluster Name: Enter the name of the cluster you - created in vCenter. For example, "cloud.cluster.2.2.1" - - - vCenter Username: Enter the username that &PRODUCT; - should use to connect to vCenter. This user must have all the administrative - privileges. - - - CPU overcommit ratio: Enter the CPU overcommit - ratio for the cluster. The value you enter determines the CPU consumption of each VM in - the selected cluster. By increasing the over-provisioning ratio, more resource capacity - will be used. If no value is specified, the value is defaulted to 1, which implies no - over-provisioning is done. - - - RAM overcommit ratio: Enter the RAM overcommit - ratio for the cluster. The value you enter determines the memory consumption of each VM - in the selected cluster. By increasing the over-provisioning ratio, more resource - capacity will be used. If no value is specified, the value is defaulted to 1, which - implies no over-provisioning is done. - - - vCenter Host: Enter the hostname or IP address of - the vCenter server. - - - vCenter Password: Enter the password for the user - named above. - - - vCenter Datacenter: Enter the vCenter datacenter - that the cluster is in. For example, "cloud.dc.VM". - - - Override Public Traffic: Enable this option to - override the zone-wide public traffic for the cluster you are creating. - - - Public Traffic vSwitch Type: This option is - displayed only if you enable the Override Public Traffic option. Select a desirable - switch. If the vmware.use.dvswitch global parameter is true, the default option will be - VMware vNetwork Distributed Virtual Switch. - If you have enabled Nexus dvSwitch in the environment, the following parameters for - dvSwitch configuration are displayed: - - - Nexus dvSwitch IP Address: The IP address of the Nexus VSM appliance. - - - Nexus dvSwitch Username: The username required to access the Nexus VSM - appliance. - - - Nexus dvSwitch Password: The password associated with the username specified - above. - - - - - Override Guest Traffic: Enable this option to - override the zone-wide guest traffic for the cluster you are creating. - - - Guest Traffic vSwitch Type: This option is - displayed only if you enable the Override Guest Traffic option. Select a desirable - switch. - If the vmware.use.dvswitch global parameter is true, the default option will be - VMware vNetwork Distributed Virtual Switch. - If you have enabled Nexus dvSwitch in the environment, the following parameters for - dvSwitch configuration are displayed: - - - Nexus dvSwitch IP Address: The IP address of the Nexus VSM appliance. - - - Nexus dvSwitch Username: The username required to access the Nexus VSM - appliance. - - - Nexus dvSwitch Password: The password associated with the username specified - above. - - - - - There might be a slight delay while the cluster is provisioned. It will - automatically display in the UI. - - - - -
diff --git a/docs/en-US/add-gateway-vpc.xml b/docs/en-US/add-gateway-vpc.xml deleted file mode 100644 index 403302df532..00000000000 --- a/docs/en-US/add-gateway-vpc.xml +++ /dev/null @@ -1,227 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding a Private Gateway to a VPC - A private gateway can be added by the root admin only. The VPC private network has 1:1 - relationship with the NIC of the physical network. You can configure multiple private gateways - to a single VPC. No gateways with duplicated VLAN and IP are allowed in the same data - center. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to configure load balancing - rules. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - - - Click the Settings icon. - The following options are displayed. - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Private Gateways. - The Gateways page is displayed. - - - Click Add new gateway: - - - - - - add-new-gateway-vpc.png: adding a private gateway for the VPC. - - - - - Specify the following: - - - Physical Network: The physical network you have - created in the zone. - - - IP Address: The IP address associated with the VPC - gateway. - - - Gateway: The gateway through which the traffic is - routed to and from the VPC. - - - Netmask: The netmask associated with the VPC - gateway. - - - VLAN: The VLAN associated with the VPC - gateway. - - - Source NAT: Select this option to enable the source - NAT service on the VPC private gateway. - See . - - - ACL: Controls both ingress and egress traffic on a - VPC private gateway. By default, all the traffic is blocked. - See . - - - The new gateway appears in the list. You can repeat these steps to add more gateway for - this VPC. - - -
- Source NAT on Private Gateway - You might want to deploy multiple VPCs with the same super CIDR and guest tier CIDR. - Therefore, multiple guest VMs from different VPCs can have the same IPs to reach a enterprise - data center through the private gateway. In such cases, a NAT service need to be configured on - the private gateway to avoid IP conflicts. If Source NAT is enabled, the guest VMs in VPC - reaches the enterprise network via private gateway IP address by using the NAT service. - The Source NAT service on a private gateway can be enabled while adding the private - gateway. On deletion of a private gateway, source NAT rules specific to the private gateway - are deleted. - To enable source NAT on existing private gateways, delete them and create afresh with - source NAT. -
-
- ACL on Private Gateway - The traffic on the VPC private gateway is controlled by creating both ingress and egress - network ACL rules. The ACLs contains both allow and deny rules. As per the rule, all the - ingress traffic to the private gateway interface and all the egress traffic out from the - private gateway interface are blocked. - You can change this default behaviour while creating a private gateway. Alternatively, you - can do the following: - - - In a VPC, identify the Private Gateway you want to work with. - - - In the Private Gateway page, do either of the following: - - - Use the Quickview. See . - - - Use the Details tab. See through . - - - - - In the Quickview of the selected Private Gateway, click Replace ACL, select the ACL - rule, then click OK - - - Click the IP address of the Private Gateway you want to work with. - - - In the Detail tab, click the Replace ACL button. - - - - - replace-acl-icon.png: button to replace the default ACL behaviour. - - - The Replace ACL dialog is displayed. - - - select the ACL rule, then click OK. - Wait for few seconds. You can see that the new ACL rule is displayed in the Details - page. - - -
-
- Creating a Static Route - &PRODUCT; enables you to specify routing for the VPN connection you create. You can enter - one or CIDR addresses to indicate which traffic is to be routed back to the gateway. - - - In a VPC, identify the Private Gateway you want to work with. - - - In the Private Gateway page, click the IP address of the Private Gateway you want to - work with. - - - Select the Static Routes tab. - - - Specify the CIDR of destination network. - - - Click Add. - Wait for few seconds until the new route is created. - - -
-
- Blacklisting Routes - &PRODUCT; enables you to block a list of routes so that they are not assigned to any of - the VPC private gateways. Specify the list of routes that you want to blacklist in the - blacklisted.routes global parameter. Note that the parameter update affects - only new static route creations. If you block an existing static route, it remains intact and - continue functioning. You cannot add a static route if the route is blacklisted for the zone. - -
-
diff --git a/docs/en-US/add-ingress-egress-rules.xml b/docs/en-US/add-ingress-egress-rules.xml deleted file mode 100644 index 2490cec43cc..00000000000 --- a/docs/en-US/add-ingress-egress-rules.xml +++ /dev/null @@ -1,131 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding Ingress and Egress Rules to a Security Group - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network - - - In Select view, choose Security Groups, then click the security group you want . - - - To add an ingress rule, click the Ingress Rules tab and fill out the following fields to - specify what network traffic is allowed into VM instances in this security group. If no - ingress rules are specified, then no traffic will be allowed in, except for responses to any - traffic that has been allowed out through an egress rule. - - - Add by CIDR/Account. Indicate whether the source of - the traffic will be defined by IP address (CIDR) or an existing security group in a - &PRODUCT; account (Account). Choose Account if you want to allow incoming traffic from - all VMs in another security group - - - Protocol. The networking protocol that sources will - use to send traffic to the security group. TCP and UDP are typically used for data - exchange and end-user communications. ICMP is typically used to send error messages or - network monitoring data. - - - Start Port, End Port. (TCP, UDP only) A range of - listening ports that are the destination for the incoming traffic. If you are opening a - single port, use the same number in both fields. - - - ICMP Type, ICMP Code. (ICMP only) The type of - message and error code that will be accepted. - - - CIDR. (Add by CIDR only) To accept only traffic - from IP addresses within a particular address block, enter a CIDR or a comma-separated - list of CIDRs. The CIDR is the base IP address of the incoming traffic. For example, - 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0. - - - Account, Security Group. (Add by Account only) To - accept only traffic from another security group, enter the &PRODUCT; account and name of - a security group that has already been defined in that account. To allow traffic between - VMs within the security group you are editing now, enter the same name you used in step - 7. - - - The following example allows inbound HTTP access from anywhere: - - - - - - httpaccess.png: allows inbound HTTP access from anywhere - - - - - To add an egress rule, click the Egress Rules tab and fill out the following fields to - specify what type of traffic is allowed to be sent out of VM instances in this security - group. If no egress rules are specified, then all traffic will be allowed out. Once egress - rules are specified, the following types of traffic are allowed out: traffic specified in - egress rules; queries to DNS and DHCP servers; and responses to any traffic that has been - allowed in through an ingress rule - - - Add by CIDR/Account. Indicate whether the - destination of the traffic will be defined by IP address (CIDR) or an existing security - group in a &PRODUCT; account (Account). Choose Account if you want to allow outgoing - traffic to all VMs in another security group. - - - Protocol. The networking protocol that VMs will use - to send outgoing traffic. TCP and UDP are typically used for data exchange and end-user - communications. ICMP is typically used to send error messages or network monitoring - data. - - - Start Port, End Port. (TCP, UDP only) A range of - listening ports that are the destination for the outgoing traffic. If you are opening a - single port, use the same number in both fields. - - - ICMP Type, ICMP Code. (ICMP only) The type of - message and error code that will be sent - - - CIDR. (Add by CIDR only) To send traffic only to IP - addresses within a particular address block, enter a CIDR or a comma-separated list of - CIDRs. The CIDR is the base IP address of the destination. For example, 192.168.0.0/22. - To allow all CIDRs, set to 0.0.0.0/0. - - - Account, Security Group. (Add by Account only) To - allow traffic to be sent to another security group, enter the &PRODUCT; account and name - of a security group that has already been defined in that account. To allow traffic - between VMs within the security group you are editing now, enter its name. - - - - - Click Add. - - -
diff --git a/docs/en-US/add-ip-range.xml b/docs/en-US/add-ip-range.xml deleted file mode 100644 index 6da0668ec2b..00000000000 --- a/docs/en-US/add-ip-range.xml +++ /dev/null @@ -1,124 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Multiple Subnets in Shared Network - &PRODUCT; provides you with the flexibility to add guest IP ranges from different subnets in - Basic zones and security groups-enabled Advanced zones. For security groups-enabled Advanced - zones, it implies multiple subnets can be added to the same VLAN. With the addition of this - feature, you will be able to add IP address ranges from the same subnet or from a different one - when IP address are exhausted. This would in turn allows you to employ higher number of subnets - and thus reduce the address management overhead. You can delete the IP ranges you have - added. -
- Prerequisites and Guidelines - - - This feature can only be implemented: - - - on IPv4 addresses - - - if virtual router is the DHCP provider - - - on KVM, xenServer, and VMware hypervisors - - - - - Manually configure the gateway of the new subnet before adding the IP range. - - - &PRODUCT; supports only one gateway for a subnet; overlapping subnets are not - currently supported - - -
-
- Adding Multiple Subnets to a Shared Network - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Infrastructure. - - - On Zones, click View More, then click the zone to which you want to work with.. - - - Click Physical Network. - - - In the Guest node of the diagram, click Configure. - - - Click Networks. - - - Select the networks you want to work with. - - - Click View IP Ranges. - - - Click Add IP Range. - The Add IP Range dialog is displayed, as follows: - - - - - - add-ip-range.png: adding an IP range to a network. - - - - - Specify the following: - All the fields are mandatory. - - - Gateway: The gateway for the tier you create. - Ensure that the gateway is within the Super CIDR range that you specified while - creating the VPC, and is not overlapped with the CIDR of any existing tier within the - VPC. - - - Netmask: The netmask for the tier you create. - For example, if the VPC CIDR is 10.0.0.0/16 and the network tier CIDR is - 10.0.1.0/24, the gateway of the tier is 10.0.1.1, and the netmask of the tier is - 255.255.255.0. - - - Start IP/ End IP: A range of IP addresses that - are accessible from the Internet and will be allocated to guest VMs. Enter the first - and last IP addresses that define a range that &PRODUCT; can assign to guest VMs - . - - - - - Click OK. - - -
-
diff --git a/docs/en-US/add-iso.xml b/docs/en-US/add-iso.xml deleted file mode 100644 index 25986e02e92..00000000000 --- a/docs/en-US/add-iso.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding an ISO - To make additional operating system or other software available for use with guest VMs, you - can add an ISO. The ISO is typically thought of as an operating system image, but you can also - add ISOs for other types of software, such as desktop applications that you want to be installed - as part of a template. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation bar, click Templates. - - - In Select View, choose ISOs. - - - Click Add ISO. - - - In the Add ISO screen, provide the following: - - - Name: Short name for the ISO image. For example, - CentOS 6.2 64-bit. - - - Description: Display test for the ISO image. For - example, CentOS 6.2 64-bit. - - - URL: The URL that hosts the ISO image. The - Management Server must be able to access this location via HTTP. If needed you can place - the ISO image directly on the Management Server - - - Zone: Choose the zone where you want the ISO to be - available, or All Zones to make it available throughout &PRODUCT;. - - - Bootable: Whether or not a guest could boot off - this ISO image. For example, a CentOS ISO is bootable, a Microsoft Office ISO is not - bootable. - - - OS Type: This helps &PRODUCT; and the hypervisor - perform certain operations and make assumptions that improve the performance of the - guest. Select one of the following. - - - If the operating system of your desired ISO image is listed, choose it. - - - If the OS Type of the ISO is not listed or if the ISO is not bootable, choose - Other. - - - (XenServer only) If you want to boot from this ISO in PV mode, choose Other PV - (32-bit) or Other PV (64-bit) - - - (KVM only) If you choose an OS that is PV-enabled, the VMs created from this ISO - will have a SCSI (virtio) root disk. If the OS is not PV-enabled, the VMs will have - an IDE root disk. The PV-enabled types are: - - - - - Fedora 13 - Fedora 12 - Fedora 11 - - - Fedora 10 - Fedora 9 - Other PV - - - Debian GNU/Linux - CentOS 5.3 - CentOS 5.4 - - - CentOS 5.5 - Red Hat Enterprise Linux 5.3 - Red Hat Enterprise Linux 5.4 - - - Red Hat Enterprise Linux 5.5 - Red Hat Enterprise Linux 6 - - - - - - - - - It is not recommended to choose an older version of the OS than the version in the - image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will usually not - work. In these cases, choose Other. - - - - Extractable: Choose Yes if the ISO should be - available for extraction. - - - Public: Choose Yes if this ISO should be available - to other users. - - - Featured: Choose Yes if you would like this ISO to - be more prominent for users to select. The ISO will appear in the Featured ISOs list. - Only an administrator can make an ISO Featured. - - - - - Click OK. - The Management Server will download the ISO. Depending on the size of the ISO, this may - take a long time. The ISO status column will display Ready once it has been successfully - downloaded into secondary storage. Clicking Refresh updates the download percentage. - - - Important: Wait for the ISO to finish downloading. If - you move on to the next task and try to use the ISO right away, it will appear to fail. The - entire ISO must be available before &PRODUCT; can work with it. - - -
diff --git a/docs/en-US/add-load-balancer-rule.xml b/docs/en-US/add-load-balancer-rule.xml deleted file mode 100644 index ef3305e98e8..00000000000 --- a/docs/en-US/add-load-balancer-rule.xml +++ /dev/null @@ -1,102 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding a Load Balancer Rule - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click the name of the network where you want to load balance the traffic. - - - Click View IP Addresses. - - - Click the IP address for which you want to create the rule, then click the Configuration - tab. - - - In the Load Balancing node of the diagram, click View All. - In a Basic zone, you can also create a load balancing rule without acquiring or - selecting an IP address. &PRODUCT; internally assign an IP when you create the load - balancing rule, which is listed in the IP Addresses page when the rule is created. - To do that, select the name of the network, then click Add Load Balancer tab. Continue - with . - - - Fill in the following: - - - Name: A name for the load balancer rule. - - - Public Port: The port receiving incoming traffic to - be balanced. - - - Private Port: The port that the VMs will use to - receive the traffic. - - - Algorithm: Choose the load balancing algorithm you - want &PRODUCT; to use. &PRODUCT; supports a variety of well-known algorithms. If you are - not familiar with these choices, you will find plenty of information about them on the - Internet. - - - Stickiness: (Optional) Click Configure and choose - the algorithm for the stickiness policy. See . - - - AutoScale: Click Configure and complete the - AutoScale configuration as explained in . - - Health Check: (Optional; NetScaler load balancers only) Click - Configure and fill in the characteristics of the health check policy. See . - - Ping path (Optional): Sequence of destinations to which to send health check queries. - Default: / (all). - Response time (Optional): How long to wait for a response from the health check (2 - 60 seconds). - Default: 5 seconds. - Interval time (Optional): Amount of time between health checks (1 second - 5 minutes). - Default value is set in the global configuration parameter lbrule_health check_time_interval. - Healthy threshold (Optional): Number of consecutive health check successes - that are required before declaring an instance healthy. - Default: 2. - Unhealthy threshold (Optional): Number of consecutive health check failures that are required before declaring an instance unhealthy. - Default: 10. - - - - - Click Add VMs, then select two or more VMs that will divide the load of incoming - traffic, and click Apply. - The new load balancer rule appears in the list. You can repeat these steps to add more - load balancer rules for this IP address. - - -
diff --git a/docs/en-US/add-loadbalancer-rule-vpc.xml b/docs/en-US/add-loadbalancer-rule-vpc.xml deleted file mode 100644 index 90247b0a6f9..00000000000 --- a/docs/en-US/add-loadbalancer-rule-vpc.xml +++ /dev/null @@ -1,462 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Adding Load Balancing Rules on a VPC - In a VPC, you can configure two types of load balancing—external LB and internal LB. - External LB is nothing but a LB rule created to redirect the traffic received at a public IP of - the VPC virtual router. The traffic is load balanced within a tier based on your configuration. - Citrix NetScaler and VPC virtual router are supported for external LB. When you use internal LB - service, traffic received at a tier is load balanced across different VMs within that tier. For - example, traffic reached at Web tier is redirected to another VM in that tier. External load - balancing devices are not supported for internal LB. The service is provided by a internal LB VM - configured on the target tier. -
- Load Balancing Within a Tier (External LB) - A &PRODUCT; user or administrator may create load balancing rules that balance traffic - received at a public IP to one or more VMs that belong to a network tier that provides load - balancing service in a VPC. A user creates a rule, specifies an algorithm, and assigns the - rule to a set of VMs within a tier. -
- Enabling NetScaler as the LB Provider on a VPC Tier - - - Add and enable Netscaler VPX in dedicated mode. - Netscaler can be used in a VPC environment only if it is in dedicated mode. - - - Create a network offering, as given in . - - - Create a VPC with Netscaler as the Public LB provider. - For more information, see . - - - For the VPC, acquire an IP. - - - Create an external load balancing rule and apply, as given in . - - -
-
- Creating a Network Offering for External LB - To have external LB support on VPC, create a network offering as follows: - - - Log in to the &PRODUCT; UI as a user or admin. - - - From the Select Offering drop-down, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make the following choices: - - - Name: Any desired name for the network - offering. - - - Description: A short description of the - offering that can be displayed to users. - - - Network Rate: Allowed data transfer rate in MB - per second. - - - Traffic Type: The type of network traffic that - will be carried on the network. - - - Guest Type: Choose whether the guest network is - isolated or shared. - - - Persistent: Indicate whether the guest network - is persistent or not. The network that you can provision without having to deploy a - VM on it is termed persistent network. - - - VPC: This option indicate whether the guest - network is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a - private, isolated part of &PRODUCT;. A VPC can have its own virtual network topology - that resembles a traditional physical network. For more information on VPCs, see - . - - - Specify VLAN: (Isolated guest networks only) - Indicate whether a VLAN should be specified when this offering is used. - - - Supported Services: Select Load Balancer. Use - Netscaler or VpcVirtualRouter. - - - Load Balancer Type: Select Public LB from the - drop-down. - - - LB Isolation: Select Dedicated if Netscaler is - used as the external LB provider. - - - System Offering: Choose the system service - offering that you want virtual routers to use in this network. - - - Conserve mode: Indicate whether to use conserve - mode. In this mode, network resources are allocated only when the first virtual - machine starts in the network. - - - - - Click OK and the network offering is created. - - -
-
- Creating an External LB Rule - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC, for which you want to configure load - balancing rules. - The VPC page is displayed where all the tiers you created listed in a - diagram. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - In the Router node, select Public IP Addresses. - The IP Addresses page is displayed. - - - Click the IP address for which you want to create the rule, then click the - Configuration tab. - - - In the Load Balancing node of the diagram, click View All. - - - Select the tier to which you want to apply the rule. - - - Specify the following: - - - Name: A name for the load balancer rule. - - - Public Port: The port that receives the - incoming traffic to be balanced. - - - Private Port: The port that the VMs will use to - receive the traffic. - - - Algorithm. Choose the load balancing algorithm - you want &PRODUCT; to use. &PRODUCT; supports the following well-known - algorithms: - - - Round-robin - - - Least connections - - - Source - - - - - Stickiness. (Optional) Click Configure and - choose the algorithm for the stickiness policy. See Sticky Session Policies for Load - Balancer Rules. - - - Add VMs: Click Add VMs, then select two or more - VMs that will divide the load of incoming traffic, and click Apply. - - - - - The new load balancing rule appears in the list. You can repeat these steps to add more - load balancing rules for this IP address. -
-
-
- Load Balancing Across Tiers - &PRODUCT; supports sharing workload across different tiers within your VPC. Assume that - multiple tiers are set up in your environment, such as Web tier and Application tier. Traffic - to each tier is balanced on the VPC virtual router on the public side, as explained in . If you want the traffic coming from the Web tier to - the Application tier to be balanced, use the internal load balancing feature offered by - &PRODUCT;. -
- How Does Internal LB Work in VPC? - In this figure, a public LB rule is created for the public IP 72.52.125.10 with public - port 80 and private port 81. The LB rule, created on the VPC virtual router, is applied on - the traffic coming from the Internet to the VMs on the Web tier. On the Application tier two - internal load balancing rules are created. An internal LB rule for the guest IP 10.10.10.4 - with load balancer port 23 and instance port 25 is configured on the VM, InternalLBVM1. - Another internal LB rule for the guest IP 10.10.10.4 with load balancer port 45 and instance - port 46 is configured on the VM, InternalLBVM1. Another internal LB rule for the guest IP - 10.10.10.6, with load balancer port 23 and instance port 25 is configured on the VM, - InternalLBVM2. - - - - - - vpc-lb.png: Configuring internal LB for VPC - - -
-
- Guidelines - - Internal LB and Public LB are mutually exclusive on a tier. If the tier has LB on the public - side, then it can't have the Internal LB. - Internal LB is supported just on VPC networks in &PRODUCT; 4.2 release. - Only Internal LB VM can act as the Internal LB provider in &PRODUCT; 4.2 release. - Network upgrade is not supported from the network offering with Internal LB to the network - offering with Public LB. - Multiple tiers can have internal LB support in a VPC. - Only one tier can have Public LB support in a VPC. - -
-
- Enabling Internal LB on a VPC Tier - - - Create a network offering, as given in . - - - Create an internal load balancing rule and apply, as given in . - - -
-
- Creating a Network Offering for Internal LB - To have internal LB support on VPC, either use the default offering, - DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB, or create a network offering as - follows: - - - Log in to the &PRODUCT; UI as a user or admin. - - - From the Select Offering drop-down, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make the following choices: - - - Name: Any desired name for the network - offering. - - - Description: A short description of the - offering that can be displayed to users. - - - Network Rate: Allowed data transfer rate in MB - per second. - - - Traffic Type: The type of network traffic that - will be carried on the network. - - - Guest Type: Choose whether the guest network is - isolated or shared. - - - Persistent: Indicate whether the guest network - is persistent or not. The network that you can provision without having to deploy a - VM on it is termed persistent network. - - - VPC: This option indicate whether the guest - network is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a - private, isolated part of &PRODUCT;. A VPC can have its own virtual network topology - that resembles a traditional physical network. For more information on VPCs, see - . - - - Specify VLAN: (Isolated guest networks only) - Indicate whether a VLAN should be specified when this offering is used. - - - Supported Services: Select Load Balancer. - Select InternalLbVM from the provider list. - - - Load Balancer Type: Select Internal LB from the - drop-down. - - - System Offering: Choose the system service - offering that you want virtual routers to use in this network. - - - Conserve mode: Indicate whether to use conserve - mode. In this mode, network resources are allocated only when the first virtual - machine starts in the network. - - - - - Click OK and the network offering is created. - - -
-
- Creating an Internal LB Rule - When you create the Internal LB rule and applies to a VM, an Internal LB VM, which is - responsible for load balancing, is created. - You can view the created Internal LB VM in the Instances page if you navigate to - Infrastructure > Zones > - <zone_ name> > <physical_network_name> > Network Service - Providers > Internal LB VM. You can manage the - Internal LB VMs as and when required from the location. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Locate the VPC for which you want to configure internal LB, then click - Configure. - The VPC page is displayed where all the tiers you created listed in a - diagram. - - - Locate the Tier for which you want to configure an internal LB rule, click Internal - LB. - In the Internal LB page, click Add Internal LB. - - - In the dialog, specify the following: - - - Name: A name for the load balancer rule. - - - Description: A short description of the rule - that can be displayed to users. - - - Source IP Address: (Optional) The source IP - from which traffic originates. The IP is acquired from the CIDR of that particular - tier on which you want to create the Internal LB rule. If not specified, the IP - address is automatically allocated from the network CIDR. - For every Source IP, a new Internal LB VM is created for load balancing. - - - Source Port: The port associated with the - source IP. Traffic on this port is load balanced. - - - Instance Port: The port of the internal LB - VM. - - - Algorithm. Choose the load balancing algorithm - you want &PRODUCT; to use. &PRODUCT; supports the following well-known - algorithms: - - - Round-robin - - - Least connections - - - Source - - - - - - -
-
-
diff --git a/docs/en-US/add-members-to-projects.xml b/docs/en-US/add-members-to-projects.xml deleted file mode 100644 index 39c3edfb2c3..00000000000 --- a/docs/en-US/add-members-to-projects.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding Members to a Project - New members can be added to a project by the project’s administrator, the domain administrator of the domain where the project resides or any parent domain, or the &PRODUCT; root administrator. There are two ways to add members in &PRODUCT;, but only one way is enabled at a time: - - If invitations have been enabled, you can send invitations to new members. - If invitations are not enabled, you can add members directly through the UI. - - - -
- diff --git a/docs/en-US/add-more-clusters.xml b/docs/en-US/add-more-clusters.xml deleted file mode 100644 index 894b4d80737..00000000000 --- a/docs/en-US/add-more-clusters.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Add More Clusters (Optional) - You need to tell &PRODUCT; about the hosts that it will manage. Hosts exist inside clusters, - so before you begin adding hosts to the cloud, you must add at least one cluster. - - - - -
diff --git a/docs/en-US/add-password-management-to-templates.xml b/docs/en-US/add-password-management-to-templates.xml deleted file mode 100644 index 60de951a1e5..00000000000 --- a/docs/en-US/add-password-management-to-templates.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Adding Password Management to Your Templates - &PRODUCT; provides an optional password reset feature that allows users to set a temporary - admin or root password as well as reset the existing admin or root password from the &PRODUCT; - UI. - To enable the Reset Password feature, you will need to download an additional script to - patch your template. When you later upload the template into &PRODUCT;, you can specify whether - reset admin/root password feature should be enabled for this template. - The password management feature works always resets the account password on instance boot. - The script does an HTTP call to the virtual router to retrieve the account password that should - be set. As long as the virtual router is accessible the guest will have access to the account - password that should be used. When the user requests a password reset the management server - generates and sends a new password to the virtual router for the account. Thus an instance - reboot is necessary to effect any password changes. - If the script is unable to contact the virtual router during instance boot it will not set - the password but boot will continue normally. - - -
diff --git a/docs/en-US/add-portforward-rule-vpc.xml b/docs/en-US/add-portforward-rule-vpc.xml deleted file mode 100644 index 5b1bb49a0a3..00000000000 --- a/docs/en-US/add-portforward-rule-vpc.xml +++ /dev/null @@ -1,117 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding a Port Forwarding Rule on a VPC - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - In the Router node, select Public IP Addresses. - The IP Addresses page is displayed. - - - Click the IP address for which you want to create the rule, then click the Configuration - tab. - - - In the Port Forwarding node of the diagram, click View All. - - - Select the tier to which you want to apply the rule. - - - Specify the following: - - - Public Port: The port to which public traffic will - be addressed on the IP address you acquired in the previous step. - - - Private Port: The port on which the instance is - listening for forwarded public traffic. - - - Protocol: The communication protocol in use between - the two ports. - - - TCP - - - UDP - - - - - Add VM: Click Add VM. Select the name of the - instance to which this rule applies, and click Apply. - You can test the rule by opening an SSH session to the instance. - - - - -
diff --git a/docs/en-US/add-primary-storage.xml b/docs/en-US/add-primary-storage.xml deleted file mode 100644 index a43567f5562..00000000000 --- a/docs/en-US/add-primary-storage.xml +++ /dev/null @@ -1,108 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding Primary Storage - - Ensure that nothing stored on the server. Adding the server to CloudStack will destroy any - existing data. - - When you create a new zone, the first primary storage is added as part of that procedure. - You can add primary storage servers at any time, such as when adding a new cluster or adding - more servers to an existing cluster. - - - Log in to the &PRODUCT; UI. - - - In the left navigation, choose Infrastructure. In Zones, click View More, then click the - zone in which you want to add the primary storage. - - - Click the Compute tab. - - - In the Primary Storage node of the diagram, click View All. - - - Click Add Primary Storage. - - - Provide the following information in the dialog. The information required varies - depending on your choice in Protocol. - - - Pod. The pod for the storage device. - - - Cluster. The cluster for the storage device. - - - Name. The name of the storage device - - - Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS - or SharedMountPoint. For vSphere choose either VMFS (iSCSI or FiberChannel) or - NFS - - - Server (for NFS, iSCSI, or PreSetup). The IP address or DNS name of the storage - device - - - Server (for VMFS). The IP address or DNS name of the vCenter server. - - - Path (for NFS). In NFS this is the exported path from the server. - - - Path (for VMFS). In vSphere this is a combination of the datacenter name and the - datastore name. The format is "/" datacenter name "/" datastore name. For example, - "/cloud.dc.VM/cluster1datastore". - - - Path (for SharedMountPoint). With KVM this is the path on each host that is where - this primary storage is mounted. For example, "/mnt/primary". - - - SR Name-Label (for PreSetup). Enter the name-label of the SR that has been set up - outside &PRODUCT;. - - - Target IQN (for iSCSI). In iSCSI this is the IQN of the target. For example, - iqn.1986-03.com.sun:02:01ec9bb549-1271378984 - - - Lun # (for iSCSI). In iSCSI this is the LUN number. For example, 3. - - - Tags (optional). The comma-separated list of tags for this storage device. It should - be an equivalent set or superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be identical. For - example, if cluster A provides primary storage that has tags T1 and T2, all other clusters - in the Zone must also provide primary storage that has tags T1 and T2. - - - Click OK. - - -
diff --git a/docs/en-US/add-projects-members-from-ui.xml b/docs/en-US/add-projects-members-from-ui.xml deleted file mode 100644 index 670a0ec75ab..00000000000 --- a/docs/en-US/add-projects-members-from-ui.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding Project Members From the UI - The steps below tell how to add a new member to a project if the invitations feature is not enabled in the cloud. If the invitations feature is enabled cloud,as described in , use the procedure in . - - Log in to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select View, choose Projects. - Click the name of the project you want to work with. - Click the Accounts tab. The current members of the project are listed. - Type the account name of the new member you want to add, and click Add Account. You can add only people who have an account in this cloud and within the same domain as the project. - -
- diff --git a/docs/en-US/add-remove-nic-ui.xml b/docs/en-US/add-remove-nic-ui.xml deleted file mode 100644 index a671329eb00..00000000000 --- a/docs/en-US/add-remove-nic-ui.xml +++ /dev/null @@ -1,152 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Reconfiguring Networks in VMs - &PRODUCT; provides you the ability to move VMs between networks and reconfigure a VM's - network. You can remove a VM from a network and add to a new network. You can also change the - default network of a virtual machine. With this functionality, hybrid or traditional server - loads can be accommodated with ease. - This feature is supported on XenServer, VMware, and KVM hypervisors. -
- Prerequisites - Ensure that vm-tools are running on guest VMs for adding or removing networks to work on - VMware hypervisor. -
-
- Adding a Network - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, click Instances. - - - Choose the VM that you want to work with. - - - Click the NICs tab. - - - Click Add network to VM. - The Add network to VM dialog is displayed. - - - In the drop-down list, select the network that you would like to add this VM - to. - A new NIC is added for this network. You can view the following details in the NICs - page: - - - ID - - - Network Name - - - Type - - - IP Address - - - Gateway - - - Netmask - - - Is default - - - CIDR (for IPv6) - - - - -
-
- Removing a Network - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, click Instances. - - - Choose the VM that you want to work with. - - - Click the NICs tab. - - - Locate the NIC you want to remove. - - - Click Remove NIC button. - - - - - remove-nic.png: button to remove a NIC - - - - - Click Yes to confirm. - - -
-
- Selecting the Default Network - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, click Instances. - - - Choose the VM that you want to work with. - - - Click the NICs tab. - - - Locate the NIC you want to work with. - - - Click the Set default NIC button. - - - - - set-default-nic.png: button to set a NIC as default one. - - - - - Click Yes to confirm. - - -
-
diff --git a/docs/en-US/add-remove-nic.xml b/docs/en-US/add-remove-nic.xml deleted file mode 100644 index fb23390b31b..00000000000 --- a/docs/en-US/add-remove-nic.xml +++ /dev/null @@ -1,133 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Reconfiguring Networks in VMs - &PRODUCT; provides you the ability to move VMs between networks and reconfigure a VM's - network. You can remove a VM from a network and add to a new network. You can - also change the default network of a virtual machine. With this functionality, hybrid - or traditional server loads can be accommodated with ease. - This feature is supported on XenServer and KVM hypervisors. - The following APIs have been added to support this feature. These API calls can function - only while the VM is in running or stopped state. -
- Prerequisites - Ensure that vm-tools are running on guest VMs for adding or removing networks to work on VMware hypervisor. -
-
- addNicToVirtualMachine - The addNicToVirtualMachine API adds a new NIC to the specified VM on a selected - network. - - - - - parameter - description - Value - - - - - virtualmachineid - The unique ID of the VM to which the NIC is to be added. - true - - - networkid - The unique ID of the network the NIC that you add should apply - to. - true - - - ipaddress - The IP address of the VM on the network. - false - - - - - The network and VM must reside in the same zone. Two VMs with the same name cannot reside - in the same network. Therefore, adding a second VM that duplicates a name on a network will - fail. -
-
- removeNicFromVirtualMachine - The removeNicFromVirtualMachine API removes a NIC from the specified VM on a selected - network. - - - - - parameter - description - Value - - - - - virtualmachineid - The unique ID of the VM from which the NIC is to be removed. - - true - - - nicid - The unique ID of the NIC that you want to remove. - true - - - - - Removing the default NIC is not allowed. -
-
- updateDefaultNicForVirtualMachine - The updateDefaultNicForVirtualMachine API updates the specified NIC to be the default one - for a selected VM. - The NIC is only updated in the database. You must manually update the default NIC on the - VM. You get an alert to manually update the NIC. - - - - - parameter - description - Value - - - - - virtualmachineid - The unique ID of the VM for which you want to specify the default NIC. - - true - - - nicid - The unique ID of the NIC that you want to set as the default - one. - true - - - - -
-
diff --git a/docs/en-US/add-secondary-storage.xml b/docs/en-US/add-secondary-storage.xml deleted file mode 100644 index 318a6ea79b6..00000000000 --- a/docs/en-US/add-secondary-storage.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding Secondary Storage - - Be sure there is nothing stored on the server. Adding the server to CloudStack will - destroy any existing data. - - When you create a new zone, the first secondary storage is added as part of that procedure. - You can add secondary storage servers at any time to add more servers to an existing - zone. - - - If you are going to use Swift for cloud-wide secondary storage, you must add the Swift - storage to &PRODUCT; before you add the local zone secondary storage servers. - - - To prepare for local zone secondary storage, you should have created and mounted an NFS - share during Management Server installation. - - - Make sure you prepared the system VM template during Management Server - installation. - - - 4. Now that the secondary storage server for per-zone storage is prepared, add it to - &PRODUCT;. Secondary storage is added as part of the procedure for adding a new zone. - - -
diff --git a/docs/en-US/add-security-group.xml b/docs/en-US/add-security-group.xml deleted file mode 100644 index 85a6ba0b38a..00000000000 --- a/docs/en-US/add-security-group.xml +++ /dev/null @@ -1,49 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding a Security Group - A user or administrator can define a new security group. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network - - - In Select view, choose Security Groups. - - - Click Add Security Group. - - - Provide a name and description. - - - Click OK. - The new security group appears in the Security Groups Details tab. - - - To make the security group useful, continue to Adding Ingress and Egress Rules to a - Security Group. - - -
diff --git a/docs/en-US/add-tier.xml b/docs/en-US/add-tier.xml deleted file mode 100644 index 94a8237c066..00000000000 --- a/docs/en-US/add-tier.xml +++ /dev/null @@ -1,102 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding Tiers - Tiers are distinct locations within a VPC that act as isolated networks, which do not have - access to other tiers by default. Tiers are set up on different VLANs that can communicate with - each other by using a virtual router. Tiers provide inexpensive, low latency network - connectivity to other tiers within the VPC. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPC that you have created for the account is listed in the page. - - The end users can see their own VPCs, while root and domain admin can see any VPC they - are authorized to see. - - - - Click the Configure button of the VPC for which you want to set up tiers. - - - Click Create network. - The Add new tier dialog is displayed, as follows: - - - - - - add-tier.png: adding a tier to a vpc. - - - If you have already created tiers, the VPC diagram is displayed. Click Create Tier to - add a new tier. - - - Specify the following: - All the fields are mandatory. - - - Name: A unique name for the tier you create. - - - Network Offering: The following default network - offerings are listed: Internal LB, DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, - DefaultIsolatedNetworkOfferingForVpcNetworks - In a VPC, only one tier can be created by using LB-enabled network offering. - - - Gateway: The gateway for the tier you create. - Ensure that the gateway is within the Super CIDR range that you specified while creating - the VPC, and is not overlapped with the CIDR of any existing tier within the VPC. - - - VLAN: The VLAN ID for the tier that the root admin - creates. - This option is only visible if the network offering you selected is - VLAN-enabled. - For more information, see the Assigning VLANs to Isolated - Networks section in the &PRODUCT; Administration Guide. - For more information, see . - - - Netmask: The netmask for the tier you create. - For example, if the VPC CIDR is 10.0.0.0/16 and the network tier CIDR is - 10.0.1.0/24, the gateway of the tier is 10.0.1.1, and the netmask of the tier is - 255.255.255.0. - - - - - Click OK. - - - Continue with configuring access control list for the tier. - - -
diff --git a/docs/en-US/add-vm-tier-sharednw.xml b/docs/en-US/add-vm-tier-sharednw.xml deleted file mode 100644 index a68860419eb..00000000000 --- a/docs/en-US/add-vm-tier-sharednw.xml +++ /dev/null @@ -1,62 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Deploying VMs to VPC Tier and Shared Networks - &PRODUCT; allows you deploy VMs on a VPC tier and one or more shared networks. With this - feature, VMs deployed in a multi-tier application can receive monitoring services via a shared - network provided by a service provider. - - - Log in to the &PRODUCT; UI as an administrator. - - - In the left navigation, choose Instances. - - - Click Add Instance. - - - Select a zone. - - - Select a template or ISO, then follow the steps in the wizard. - - - Ensure that the hardware you have allows starting the selected service offering. - - - Under Networks, select the desired networks for the VM you are launching. - You can deploy a VM to a VPC tier and multiple shared networks. - - - - - - addvm-tier-sharednw.png: adding a VM to a VPC tier and shared network. - - - - - Click Next, review the configuration and click Launch. - Your VM will be deployed to the selected VPC tier and shared network. - - -
diff --git a/docs/en-US/add-vm-to-tier.xml b/docs/en-US/add-vm-to-tier.xml deleted file mode 100644 index c7d769d9d11..00000000000 --- a/docs/en-US/add-vm-to-tier.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Deploying VMs to the Tier - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you have created are listed. - - - Click Virtual Machines tab of the tier to which you want to add a VM. - - - - - - add-vm-vpc.png: adding a VM to a vpc. - - - The Add Instance page is displayed. - Follow the on-screen instruction to add an instance. For information on adding an - instance, see the Installation Guide. - - -
diff --git a/docs/en-US/add-vpc.xml b/docs/en-US/add-vpc.xml deleted file mode 100644 index b8034c4b4c8..00000000000 --- a/docs/en-US/add-vpc.xml +++ /dev/null @@ -1,80 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Adding a Virtual Private Cloud - When creating the VPC, you simply provide the zone and a set of IP addresses for the VPC - network address space. You specify this set of addresses in the form of a Classless Inter-Domain - Routing (CIDR) block. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - - - Click Add VPC. The Add VPC page is displayed as follows: - - - - - - add-vpc.png: adding a vpc. - - - Provide the following information: - - - Name: A short name for the VPC that you are - creating. - - - Description: A brief description of the VPC. - - - Zone: Choose the zone where you want the VPC to be - available. - - - Super CIDR for Guest Networks: Defines the CIDR - range for all the tiers (guest networks) within a VPC. When you create a tier, ensure - that its CIDR is within the Super CIDR value you enter. The CIDR must be RFC1918 - compliant. - - - DNS domain for Guest Networks: If you want to - assign a special domain name, specify the DNS suffix. This parameter is applied to all - the tiers within the VPC. That implies, all the tiers you create in the VPC belong to - the same DNS domain. If the parameter is not specified, a DNS domain name is generated - automatically. - - - Public Load Balancer Provider: You have two - options: VPC Virtual Router and Netscaler. - - - - Click OK. - -
diff --git a/docs/en-US/added-API-commands-4-0.xml b/docs/en-US/added-API-commands-4-0.xml deleted file mode 100644 index 2d86ba4d6dc..00000000000 --- a/docs/en-US/added-API-commands-4-0.xml +++ /dev/null @@ -1,164 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Added API Commands in 4.0.0-incubating - - - createCounter (Adds metric counter) - - - deleteCounter (Deletes a counter) - - - listCounters (List the counters) - - - createCondition (Creates a condition) - - - deleteCondition (Removes a condition) - - - listConditions (List Conditions for the specific user) - - - createTags. Add tags to one or more resources. Example: - command=createTags -&resourceIds=1,10,12 -&resourceType=userVm -&tags[0].key=region -&tags[0].value=canada -&tags[1].key=city -&tags[1].value=Toronto - - - deleteTags. Remove tags from one or more resources. Example: - command=deleteTags -&resourceIds=1,12 -&resourceType=Snapshot -&tags[0].key=city - - - listTags (Show currently defined resource tags) - - - createVPC (Creates a VPC) - - - listVPCs (Lists VPCs) - - - deleteVPC (Deletes a VPC) - - - updateVPC (Updates a VPC) - - - restartVPC (Restarts a VPC) - - - createVPCOffering (Creates VPC offering) - - - updateVPCOffering (Updates VPC offering) - - - deleteVPCOffering (Deletes VPC offering) - - - listVPCOfferings (Lists VPC offerings) - - - createPrivateGateway (Creates a private gateway) - - - listPrivateGateways (List private gateways) - - - deletePrivateGateway (Deletes a Private gateway) - - - createNetworkACL (Creates a ACL rule the given network (the network has to belong to - VPC)) - - - deleteNetworkACL (Deletes a Network ACL) - - - listNetworkACLs (Lists all network ACLs) - - - createStaticRoute (Creates a static route) - - - deleteStaticRoute (Deletes a static route) - - - listStaticRoutes (Lists all static routes) - - - createVpnCustomerGateway (Creates site to site vpn customer gateway) - - - createVpnGateway (Creates site to site vpn local gateway) - - - createVpnConnection (Create site to site vpn connection) - - - deleteVpnCustomerGateway (Delete site to site vpn customer gateway) - - - deleteVpnGateway (Delete site to site vpn gateway) - - - deleteVpnConnection (Delete site to site vpn connection) - - - updateVpnCustomerGateway (Update site to site vpn customer gateway) - - - resetVpnConnection (Reset site to site vpn connection) - - - listVpnCustomerGateways (Lists site to site vpn customer gateways) - - - listVpnGateways (Lists site 2 site vpn gateways) - - - listVpnConnections (Lists site to site vpn connection gateways) - - - enableCiscoNexusVSM (Enables Nexus 1000v dvSwitch in &PRODUCT;.) - - - disableCiscoNexusVSM (Disables Nexus 1000v dvSwitch in &PRODUCT;.) - - - deleteCiscoNexusVSM (Deletes Nexus 1000v dvSwitch in &PRODUCT;.) - - - listCiscoNexusVSMs (Lists the control VLAN ID, packet VLAN ID, and data VLAN ID, as well - as the IP address of the Nexus 1000v dvSwitch.) - - -
diff --git a/docs/en-US/added-API-commands-4-1.xml b/docs/en-US/added-API-commands-4-1.xml deleted file mode 100644 index 006c65a5616..00000000000 --- a/docs/en-US/added-API-commands-4-1.xml +++ /dev/null @@ -1,73 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Added API Commands in 4.1 - - - createEgressFirewallRules (creates an egress firewall rule on the guest network.) - - - deleteEgressFirewallRules (deletes a egress firewall rule on the guest network.) - - - listEgressFirewallRules (lists the egress firewall rules configured for a guest - network.) - - - resetSSHKeyForVirtualMachine (Resets the SSHkey for virtual machine.) - - - addBaremetalHost (Adds a new host.) - - - addNicToVirtualMachine (Adds a new NIC to the specified VM on a selected - network.) - - - removeNicFromVirtualMachine (Removes the specified NIC from a selected VM.) - - - updateDefaultNicForVirtualMachine (Updates the specified NIC to be the default one for a - selected VM.) - - - addRegion (Registers a Region into another Region.) - - - updateRegion (Updates Region details: ID, Name, Endpoint, User API Key, and User Secret - Key.) - - - removeRegion (Removes a Region from current Region.) - - - listRegions (Get all the Regions. They can be filtered by using the ID or Name.) - - - getUser (This API can only be used by the Admin. Get user details by using the API Key.) - - - addRegion (Add a region) - removeRegion (Delete a region) - updateRegion (Modify attributes of a region) - listRegions (List regions) - -
diff --git a/docs/en-US/added-API-commands-4.2.xml b/docs/en-US/added-API-commands-4.2.xml deleted file mode 100644 index 14a5f64b8ee..00000000000 --- a/docs/en-US/added-API-commands-4.2.xml +++ /dev/null @@ -1,554 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Added API Commands in 4.2 - - - addImageStore - Adds all types of secondary storage providers, S3/Swift/NFS. - - - createSecondaryStagingStore - Adds a staging secondary storage in each zone. - - - listImageStores - Lists all secondary storages, S3/Swift/NFS. - - - listSecondaryStagingStores - Lists all staging secondary storages. - - - addIpToNic - Adds an IP address to the NIC from the guest subnet. The request parameters are: nicid, - ipaddress. - The response parameters are: nicid, ipaddress, networkid - - - removeIpFromNic - Removes the reserved IP for the NIC. The request parameters is: id. - The response parameters are: true, false - - - listNics - Lists the NIC details of the user VM; the API response also contains the Secondary IP - addresses of the NIC. The request parameters are: nicid, virtualmachineid. - The response parameters are: id, ipaddress, secondaryips, gateway, netmask, macaddr, - broadcasturi, isolationuri, isdefault, - - - deleteAlerts - Deletes the specified alerts. The request parameters are: ids (allowed to pass one or - more IDs separated by comma); type (string); olderthan (yyyy-mm-dd format). - The response parameters are: true, false - - - archiveAlerts - Archives the specified alerts. The request parameters are: ids (allowed to pass one or - more IDs separated by comma); type (string); olderthan (yyyy-mm-dd format). - The response parameters are: true, false - - - deleteEvents - Deletes the specified events. The request parameters are: ids (allowed to pass one or - more IDs separated by comma); type (string); olderthan (yyyy-mm-dd format). - The response parameters are: true, false - - - archiveEvents - Archives the specified events. The request parameters are: ids (allowed to pass one or - more IDs separated by comma); type (string); olderthan (yyyy-mm-dd format). - The response parameters are: true, false - - - createGlobalLoadBalancerRule - Creates a GSLB rule. The request parameters are name (the name of the global load - balancer rule); domain name ( the preferred domain name for the service); lb algorithm (the - algorithm used to load balance the traffic across the zones); session persistence (source IP - and HTTP cookie); account name; and domain Id. - - - assignToGlobalLoadBalancerRule - Assigns a load balancing rule or list of load balancing rules to GSLB. The request - parameters are: id (the UUID of global load balancer rule); loadbalancerrulelist (the list - load balancer rules that will be assigned to global load balancer rule. These are second - tier load balancing rules created with createLoadBalancerRule API. Weight is optional, the - default is 1). - - - removeFromGlobalLoadBalancerRule - Removes a load balancer rule association with global load balancer rule. The request - parameters are id (the UUID of global load balancer rule); loadbalancerrulelist (the list - load balancer rules that will be assigned to global load balancer rule). - - - deleteGlobalLoadBalancerRule - Deletes a global load balancer rule. The request parameters is: id (the unique ID of the - global load balancer rule). - - - listGlobalLoadBalancerRule - Lists load balancer rules. - The request parameters are: account (lists resources by account. Use with the domainid - parameter); domainid (lists only resources belonging to the domain specified); id (the - unique ID of the global load balancer rule); isrecursive (defaults to false; but if true, - lists all the resources from the parent specified by the domainid); keyword (lists by - keyword); listall (if set to false, lists only resources belonging to the command's caller; - if set to true, lists resources that the caller is authorized to see. Default value is - false); page; pagesize; projectid (lists objects by project); regionid ; tags (lists - resources by tags: key/value pairs). - - - updateGlobalLoadBalancerRule - Updates global load balancer rules. - The request parameters are: id (the unique ID of the global load balancer rule); account - (lists resources by account. Use with the domainid parameter); description (the description - of the load balancer rule); domainid (lists only resources belonging to the domain - specified); gslblbmethod (the load balancer algorithm that is used to distributed traffic - across the zones participating in global server load balancing, if not specified defaults to - round robin); gslbstickysessionmethodname (the session sticky method; if not specified - defaults to sourceip); isrecursive (defaults to false, but if true, lists all resources from - the parent specified by the domainid till leaves); keyword (lists by keyword); listall (if - set to false, list only those resources belonging to the command's caller; if set to true, - lists resources that the caller is authorized to see. Default value is false); page; - pagesize; projectid (lists objects by project); regionid; tags (lists resources by tags: - key/value pairs) - - - createPortableIpRange - Creates portable IP addresses in the portable public IP address pool. - The request parameters are region id, start ip, end ip, netmask, gateway, and - vlan. - - - deletePortableIpRange - Deletes portable IP addresses from the portable public IP address pool. - The request parameters is portable ip address range id. - - - listPortableIpRange - Lists portable IP addresses in the portable public IP address pool associated with a - Region. - The request parameters are elastic ip id and region id. - - - createVMSnapshot - Creates a virtual machine snapshot. - - - deleteVMSnapshot - Deletes a virtual machine snapshot. - - - listVMSnapshot - Shows a virtual machine snapshot. - - - revertToVMSnapshot - Returns a virtual machine to the state and data saved in a given snapshot. - - - createLBHealthCheckPolicy - Creates a new health check policy for a load balancer rule. - - - deleteLBHealthCheckPolicy - Deletes an existing health check policy from a load balancer rule. - - - listLBHealthCheckPolicies - Displays the health check policy for a load balancer rule. - - - createEgressFirewallRules - Creates an egress firewall rule on the guest network. - - - deleteEgressFirewallRules - Deletes a egress firewall rule on the guest network. - - - listEgressFirewallRules - Lists the egress firewall rules configured for a guest network. - - - resetSSHKeyForVirtualMachine - Resets the SSHkey for virtual machine. - - - addBaremetalHost - Adds a new host. Technically, this API command was present in v3.0.6, but its - functionality was disabled. - - - addBaremetalDhcp - Adds a DHCP server for bare metal hosts. - - - addBaremetalPxePingServer - Adds a PXE PING server for bare metal hosts. - - - addBaremetalPxeKickStartServer (Adds a PXE server for bare metal hosts) - - - listBaremetalDhcp - Shows the DHCP servers currently defined for bare metal hosts. - - - listBaremetalPxePingServer - Shows the PXE PING servers currently defined for bare metal hosts. - - - addNicToVirtualMachine - Adds a new NIC to the specified VM on a selected network. - - - removeNicFromVirtualMachine - Removes the specified NIC from a selected VM. - - - updateDefaultNicForVirtualMachine - Updates the specified NIC to be the default one for a selected VM. - - - addRegion - Registers a Region into another Region. - - - updateRegion - Updates Region details: ID, Name, Endpoint, User API Key, and User Secret Key. - - - removeRegion - Removes a Region from current Region. - - - listRegions - Get all the Regions. They can be filtered by using the ID or Name. - - - getUser - This API can only be used by the Admin. Get user account details by using the API - Key. - - - getApiLimit - Shows number of remaining APIs for the invoking user in current window. - - - resetApiLimit - For root admin, if account ID parameter is passed, it will reset count for that - particular account, otherwise it will reset all counters. - - - lockAccount - Locks an account. - - - lockUser - Locks a user account. - - - scaleVirtualMachine - Scales the virtual machine to a new service offering. - - - migrateVirtualMachineWithVolume - Attempts migrating VM with its volumes to a different host. - - - dedicatePublicIpRange - Dedicates a Public IP range to an account. - - - releasePublicIpRange - Releases a Public IP range back to the system pool. - - - dedicateGuestVlanRange - Dedicates a guest VLAN range to an account. - - - releaseDedicatedGuestVlanRange - Releases a dedicated guest VLAN range to the system. - - - listDedicatedGuestVlanRanges - Lists dedicated guest VLAN ranges. - - - updatePortForwardingRule - Updates a port forwarding rule. Only the private port and the VM can be updated. - - - scaleSystemVm - Scales the service offering for a systemVM, console proxy, or secondary storage. - - - listDeploymentPlanners - Lists all the deployment planners available. - - - addS3 - Adds a Amazon Simple Storage Service instance. - - - listS3s - Lists all the Amazon Simple Storage Service instances. - - - findHostsForMigration - Finds hosts suitable for migrating a VM to. - - - releaseHostReservation - Releases host reservation. - - - resizeVolume - Resizes a volume. - - - updateVolume - Updates the volume. - - - listStorageProviders - Lists storage providers. - - - findStoragePoolsForMigration - Lists storage pools available for migrating a volume. - - - createEgressFirewallRule - Creates a egress firewall rule for a given network. - - - deleteEgressFirewallRule - Deletes an egress firewall rule. - - - listEgressFirewallRules - Lists all egress firewall rules for network. - - - updateNetworkACLItem - Updates ACL item with specified ID. - - - createNetworkACLList - Creates a Network ACL for the given VPC. - - - deleteNetworkACLList - Deletes a Network ACL. - - - replaceNetworkACLList - Replaces ACL associated with a Network or private gateway. - - - listNetworkACLLists - Lists all network ACLs. - - - addResourceDetail - Adds detail for the Resource. - - - removeResourceDetail - Removes details of the resource. - - - listResourceDetails - Lists resource details. - - - addNiciraNvpDevice - Adds a Nicira NVP device. - - - deleteNiciraNvpDevice - Deletes a Nicira NVP device. - - - listNiciraNvpDevices - Lists Nicira NVP devices. - - - listNiciraNvpDeviceNetworks - Lists network that are using a Nicira NVP device. - - - addBigSwitchVnsDevice - Adds a BigSwitch VNS device. - - - deleteBigSwitchVnsDevice - Deletes a BigSwitch VNS device. - - - listBigSwitchVnsDevices - Lists BigSwitch VNS devices. - - - configureSimulator - Configures a simulator. - - - listApis - Lists all the available APIs on the server, provided by the API Discovery plugin. - - - getApiLimit - Gets the API limit count for the caller. - - - resetApiLimit - Resets the API count. - - - assignToGlobalLoadBalancerRule - Assigns load balancer rule or list of load balancer rules to a global load balancer - rules. - - - removeFromGlobalLoadBalancerRule - Removes a load balancer rule association with global load balancer rule. - - - listVMSnapshot - Lists virtual machine snapshot by conditions. - - - createLoadBalancer - Creates a load balancer. - - - listLoadBalancers - Lists load balancers. - - - deleteLoadBalancer - Deletes a load balancer. - - - configureInternalLoadBalancerElement - Configures an Internal Load Balancer element. - - - createInternalLoadBalancerElement - Creates an Internal Load Balancer element. - - - listInternalLoadBalancerElements - Lists all available Internal Load Balancer elements. - - - createAffinityGroup - Creates an affinity or anti-affinity group. - - - deleteAffinityGroup - Deletes an affinity group. - - - listAffinityGroups - Lists all the affinity groups. - - - updateVMAffinityGroup - Updates the affinity or anti-affinity group associations of a VM. The VM has to be - stopped and restarted for the new properties to take effect. - - - listAffinityGroupTypes - Lists affinity group types available. - - - stopInternalLoadBalancerVM - Stops an Internal LB VM. - - - startInternalLoadBalancerVM - Starts an existing Internal LB VM. - - - listInternalLoadBalancerVMs - Lists internal LB VMs. - - - listNetworkIsolationMethods - Lists supported methods of network isolation. - - - dedicateZone - Dedicates a zone. - - - dedicatePod - Dedicates a pod. - - - dedicateCluster - Dedicates an existing cluster. - - - dedicateHost - Dedicates a host. - - - releaseDedicatedZone - Releases dedication of zone. - - - releaseDedicatedPod - Releases dedication for the pod. - - - releaseDedicatedCluster - Releases dedication for cluster. - - - releaseDedicatedHost - Releases dedication for host. - - - listDedicatedZones - Lists dedicated zones. - - - listDedicatedPods - Lists dedicated pods. - - - listDedicatedClusters - Lists dedicated clusters. - - - listDedicatedHosts - Lists dedicated hosts. - - -
diff --git a/docs/en-US/added-API-commands.xml b/docs/en-US/added-API-commands.xml deleted file mode 100644 index 99635de4697..00000000000 --- a/docs/en-US/added-API-commands.xml +++ /dev/null @@ -1,195 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Added API commands in 3.0 -
- Added in 3.0.2 - - - changeServiceForSystemVm - Changes the service offering for a system VM (console proxy or secondary storage). The - system VM must be in a "Stopped" state for this command to take effect. - - -
-
- Added in 3.0.1 - - - changeServiceForSystemVm - Changes the service offering for a system VM (console proxy or secondary storage). The - system VM must be in a "Stopped" state for this command to take effect. - - -
-
- Added in 3.0.0 - - - - - - - - assignVirtualMachine (Move a user VM to another user under same - domain.) - restoreVirtualMachine (Restore a VM to original template or specific - snapshot) - createLBStickinessPolicy (Creates a Load Balancer stickiness policy - ) - - - deleteLBStickinessPolicy (Deletes a LB stickiness policy.) - listLBStickinessPolicies (Lists LBStickiness policies.) - ldapConfig (Configure the LDAP context for this site.) - - - addSwift (Adds Swift.) - listSwifts (List Swift.) - migrateVolume (Migrate volume) - - - updateStoragePool (Updates a storage pool.) - authorizeSecurityGroupEgress (Authorizes a particular egress rule for this - security group) - revokeSecurityGroupEgress (Deletes a particular egress rule from this - security group) - - - createNetworkOffering (Creates a network offering.) - deleteNetworkOffering (Deletes a network offering.) - createProject (Creates a project) - - - deleteProject (Deletes a project) - updateProject (Updates a project) - activateProject (Activates a project) - - - suspendProject (Suspends a project) - listProjects (Lists projects and provides detailed information for listed - projects) - addAccountToProject (Adds account to a project) - - - deleteAccountFromProject (Deletes account from the project) - listProjectAccounts (Lists project's accounts) - listProjectInvitations (Lists an account's invitations to join - projects) - - - updateProjectInvitation (Accepts or declines project - invitation) - deleteProjectInvitation (Deletes a project invitation) - updateHypervisorCapabilities (Updates a hypervisor - capabilities.) - - - listHypervisorCapabilities (Lists all hypervisor - capabilities.) - createPhysicalNetwork (Creates a physical network) - deletePhysicalNetwork (Deletes a Physical Network.) - - - listPhysicalNetworks (Lists physical networks) - updatePhysicalNetwork (Updates a physical network) - listSupportedNetworkServices (Lists all network services provided by - &PRODUCT; or for the given Provider.) - - - addNetworkServiceProvider (Adds a network serviceProvider to a physical - network) - deleteNetworkServiceProvider (Deletes a Network Service - Provider.) - listNetworkServiceProviders (Lists network serviceproviders for a given - physical network.) - - - updateNetworkServiceProvider (Updates a network serviceProvider of a physical - network) - addTrafficType (Adds traffic type to a physical network) - deleteTrafficType (Deletes traffic type of a physical network) - - - listTrafficTypes (Lists traffic types of a given physical - network.) - updateTrafficType (Updates traffic type of a physical network) - listTrafficTypeImplementors (Lists implementors of implementor of a network - traffic type or implementors of all network traffic types) - - - createStorageNetworkIpRange (Creates a Storage network IP - range.) - deleteStorageNetworkIpRange (Deletes a storage network IP - Range.) - listStorageNetworkIpRange (List a storage network IP range.) - - - updateStorageNetworkIpRange (Update a Storage network IP range, only allowed - when no IPs in this range have been allocated.) - listUsageTypes (List Usage Types) - addF5LoadBalancer (Adds a F5 BigIP load balancer device) - - - configureF5LoadBalancer (configures a F5 load balancer device) - deleteF5LoadBalancer ( delete a F5 load balancer device) - listF5LoadBalancers (lists F5 load balancer devices) - - - listF5LoadBalancerNetworks (lists network that are using a F5 load balancer - device) - addSrxFirewall (Adds a SRX firewall device) - deleteSrxFirewall ( delete a SRX firewall device) - - - listSrxFirewalls (lists SRX firewall devices in a physical - network) - listSrxFirewallNetworks (lists network that are using SRX firewall - device) - addNetscalerLoadBalancer (Adds a netscaler load balancer - device) - - - deleteNetscalerLoadBalancer ( delete a netscaler load balancer - device) - configureNetscalerLoadBalancer (configures a netscaler load balancer - device) - listNetscalerLoadBalancers (lists netscaler load balancer - devices) - - - listNetscalerLoadBalancerNetworks (lists network that are using a netscaler - load balancer device) - createVirtualRouterElement (Create a virtual router element.) - configureVirtualRouterElement (Configures a virtual router - element.) - - - listVirtualRouterElements (Lists all available virtual router - elements.) - - - - - - -
-
diff --git a/docs/en-US/added-error-codes.xml b/docs/en-US/added-error-codes.xml deleted file mode 100644 index ae7389122f9..00000000000 --- a/docs/en-US/added-error-codes.xml +++ /dev/null @@ -1,138 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Added &PRODUCT; Error Codes - You can now find the &PRODUCT;-specific error code in the exception response for each type of exception. The following list of error codes is added to the new class named CSExceptionErrorCode. - - - - - - - - 4250 : "com.cloud.utils.exception.CloudRuntimeException" - 4255 : "com.cloud.utils.exception.ExceptionUtil" - 4260 : "com.cloud.utils.exception.ExecutionException" - - - 4265 : "com.cloud.utils.exception.HypervisorVersionChangedException" - 4270 : "com.cloud.utils.exception.RuntimeCloudException" - 4275 : "com.cloud.exception.CloudException" - - - 4280 : "com.cloud.exception.AccountLimitException" - 4285 : "com.cloud.exception.AgentUnavailableException" - 4290 : "com.cloud.exception.CloudAuthenticationException" - - - 4295 : "com.cloud.exception.CloudExecutionException" - 4300 : "com.cloud.exception.ConcurrentOperationException" - 4305 : "com.cloud.exception.ConflictingNetworkSettingsException" - - - 4310 : "com.cloud.exception.DiscoveredWithErrorException" - 4315 : "com.cloud.exception.HAStateException" - 4320 : "com.cloud.exception.InsufficientAddressCapacityException" - - - 4325 : "com.cloud.exception.InsufficientCapacityException" - 4330 : "com.cloud.exception.InsufficientNetworkCapacityException" - 4335 : "com.cloud.exception.InsufficientServerCapacityException" - - - 4340 : "com.cloud.exception.InsufficientStorageCapacityException" - 4345 : "com.cloud.exception.InternalErrorException" - 4350 : "com.cloud.exception.InvalidParameterValueException" - - - 4355 : "com.cloud.exception.ManagementServerException" - 4360 : "com.cloud.exception.NetworkRuleConflictException" - 4365 : "com.cloud.exception.PermissionDeniedException" - - - 4370 : "com.cloud.exception.ResourceAllocationException" - 4375 : "com.cloud.exception.ResourceInUseException" - 4380 : "com.cloud.exception.ResourceUnavailableException" - - - 4385 : "com.cloud.exception.StorageUnavailableException" - 4390 : "com.cloud.exception.UnsupportedServiceException" - 4395 : "com.cloud.exception.VirtualMachineMigrationException" - - - 4400 : "com.cloud.exception.AccountLimitException" - 4405 : "com.cloud.exception.AgentUnavailableException" - 4410 : "com.cloud.exception.CloudAuthenticationException" - - - 4415 : "com.cloud.exception.CloudException" - 4420 : "com.cloud.exception.CloudExecutionException" - 4425 : "com.cloud.exception.ConcurrentOperationException" - - - 4430 : "com.cloud.exception.ConflictingNetworkSettingsException" - 4435 : "com.cloud.exception.ConnectionException" - 4440 : "com.cloud.exception.DiscoveredWithErrorException" - - - 4445 : "com.cloud.exception.DiscoveryException" - 4450 : "com.cloud.exception.HAStateException" - 4455 : "com.cloud.exception.InsufficientAddressCapacityException" - - - 4460 : "com.cloud.exception.InsufficientCapacityException" - 4465 : "com.cloud.exception.InsufficientNetworkCapacityException" - 4470 : "com.cloud.exception.InsufficientServerCapacityException" - - - 4475 : "com.cloud.exception.InsufficientStorageCapacityException" - 4480 : "com.cloud.exception.InsufficientVirtualNetworkCapcityException" - 4485 : "com.cloud.exception.InternalErrorException" - - - 4490 : "com.cloud.exception.InvalidParameterValueException" - 4495 : "com.cloud.exception.ManagementServerException" - 4500 : "com.cloud.exception.NetworkRuleConflictException" - - - 4505 : "com.cloud.exception.PermissionDeniedException" - 4510 : "com.cloud.exception.ResourceAllocationException" - 4515 : "com.cloud.exception.ResourceInUseException" - - - 4520 : "com.cloud.exception.ResourceUnavailableException" - 4525 : "com.cloud.exception.StorageUnavailableException" - 4530 : "com.cloud.exception.UnsupportedServiceException" - - - 4535 : "com.cloud.exception.VirtualMachineMigrationException" - 9999 : "org.apache.cloudstack.api.ServerApiException" - - - - - -
- diff --git a/docs/en-US/adding-IP-addresses-for-the-public-network.xml b/docs/en-US/adding-IP-addresses-for-the-public-network.xml deleted file mode 100644 index abf4d0233cc..00000000000 --- a/docs/en-US/adding-IP-addresses-for-the-public-network.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding IP Addresses for the Public Network - These instructions assume you have already logged in to the &PRODUCT; UI. - - In the left navigation, choose Infrastructure. In Zones, click View More, then click the desired zone . - Click the Network tab. - In the Public node of the diagram, click Configure. - Click the IP Ranges tab. - Provide the following information: - - Gateway. The gateway in use for these IP addresses - Netmask. The netmask associated with this IP range - VLAN. The VLAN that will be used for public traffic - Start IP/End IP. A range of IP addresses that are assumed to be accessible from the Internet and will be allocated for access to guest networks. - - - Click Add. - - - -
diff --git a/docs/en-US/additional-installation-options.xml b/docs/en-US/additional-installation-options.xml deleted file mode 100644 index 622ef03d07e..00000000000 --- a/docs/en-US/additional-installation-options.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Additional Installation Options - The next few sections describe &PRODUCT; features above and beyond the basic deployment options. - - - - diff --git a/docs/en-US/admin-alerts.xml b/docs/en-US/admin-alerts.xml deleted file mode 100644 index e98f79de06f..00000000000 --- a/docs/en-US/admin-alerts.xml +++ /dev/null @@ -1,128 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Administrator Alerts - The system provides alerts and events to help with the management of the cloud. Alerts are notices to an administrator, generally delivered by e-mail, notifying the administrator that an error has occurred in the cloud. Alert behavior is configurable. - Events track all of the user and administrator actions in the cloud. For example, every guest VM start creates an associated event. Events are stored in the Management Server’s database. - Emails will be sent to administrators under the following circumstances: - - The Management Server cluster runs low on CPU, memory, or storage resources - The Management Server loses heartbeat from a Host for more than 3 minutes - The Host cluster runs low on CPU, memory, or storage resources - -
- - Sending Alerts to External SNMP and Syslog Managers - In addition to showing administrator alerts on the Dashboard in the &PRODUCT; UI and - sending them in email, &PRODUCT; can also send the same alerts to external SNMP or - Syslog management software. This is useful if you prefer to use an SNMP or Syslog - manager to monitor your cloud. - The alerts which can be sent are listed in . You can also - display the most up to date list by calling the API command listAlerts. -
- SNMP Alert Details - The supported protocol is SNMP version 2. - Each SNMP trap contains the following information: message, podId, dataCenterId, clusterId, and generationTime. -
-
- Syslog Alert Details - &PRODUCT; generates a syslog message for every alert. Each syslog message incudes - the fields alertType, message, podId, dataCenterId, and clusterId, in the following - format. If any field does not have a valid value, it will not be included. - Date severity_level Management_Server_IP_Address/Name alertType:: value dataCenterId:: value podId:: value clusterId:: value message:: value - For example: - Mar 4 10:13:47 WARN localhost alertType:: managementNode message:: Management server node 127.0.0.1 is up -
-
- Configuring SNMP and Syslog Managers - To configure one or more SNMP managers or Syslog managers to receive alerts from - &PRODUCT;: - - For an SNMP manager, install the &PRODUCT; MIB file on your SNMP manager system. - This maps the SNMP OIDs to trap types that can be more easily read by users. - The file must be publicly available. - For more information on how to install this file, consult the documentation provided with the SNMP manager. - - Edit the file /etc/cloudstack/management/log4j-cloud.xml. - # vi /etc/cloudstack/management/log4j-cloud.xml - - - Add an entry using the syntax shown below. Follow the appropriate example - depending on whether you are adding an SNMP manager or a Syslog manager. To specify - multiple external managers, separate the IP addresses and other configuration values - with commas (,). - - The recommended maximum number of SNMP or Syslog managers is 20 for - each. - - - The following example shows how to configure two SNMP managers at IP addresses - 10.1.1.1 and 10.1.1.2. Substitute your own IP addresses, ports, and communities. Do - not change the other values (name, threshold, class, and layout values). - <appender name="SNMP" class="org.apache.cloudstack.alert.snmp.SnmpTrapAppender"> - <param name="Threshold" value="WARN"/> <!-- Do not edit. The alert feature assumes WARN. --> - <param name="SnmpManagerIpAddresses" value="10.1.1.1,10.1.1.2"/> - <param name="SnmpManagerPorts" value="162,162"/> - <param name="SnmpManagerCommunities" value="public,public"/> - <layout class="org.apache.cloudstack.alert.snmp.SnmpEnhancedPatternLayout"> <!-- Do not edit --> - <param name="PairDelimeter" value="//"/> - <param name="KeyValueDelimeter" value="::"/> - </layout> -</appender> - The following example shows how to configure two Syslog managers at IP - addresses 10.1.1.1 and 10.1.1.2. Substitute your own IP addresses. You can - set Facility to any syslog-defined value, such as LOCAL0 - LOCAL7. Do not - change the other values. - <appender name="ALERTSYSLOG"> - <param name="Threshold" value="WARN"/> - <param name="SyslogHosts" value="10.1.1.1,10.1.1.2"/> - <param name="Facility" value="LOCAL6"/> - <layout> - <param name="ConversionPattern" value=""/> - </layout> -</appender> - - - If your cloud has multiple Management Server nodes, repeat these steps to edit - log4j-cloud.xml on every instance. - - - If you have made these changes while the Management Server is running, wait a - few minutes for the change to take effect. - - - Troubleshooting: If no alerts appear at the - configured SNMP or Syslog manager after a reasonable amount of time, it is likely that - there is an error in the syntax of the <appender> entry in log4j-cloud.xml. Check - to be sure that the format and settings are correct. -
-
- Deleting an SNMP or Syslog Manager - To remove an external SNMP manager or Syslog manager so that it no longer receives - alerts from &PRODUCT;, remove the corresponding entry from the file - /etc/cloudstack/management/log4j-cloud.xml. -
-
-
diff --git a/docs/en-US/admin-guide.xml b/docs/en-US/admin-guide.xml deleted file mode 100644 index f1b0327e9d1..00000000000 --- a/docs/en-US/admin-guide.xml +++ /dev/null @@ -1,92 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Administrator Guide - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/adv-zone-topology-req.xml b/docs/en-US/adv-zone-topology-req.xml deleted file mode 100644 index 3764e926ebe..00000000000 --- a/docs/en-US/adv-zone-topology-req.xml +++ /dev/null @@ -1,25 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Advanced Zone Topology Requirements - With Advanced Networking, separate subnets must be used for private and public - networks. -
diff --git a/docs/en-US/advanced-zone-configuration.xml b/docs/en-US/advanced-zone-configuration.xml deleted file mode 100644 index 451b5454eb2..00000000000 --- a/docs/en-US/advanced-zone-configuration.xml +++ /dev/null @@ -1,385 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Advanced Zone Configuration - - - After you select Advanced in the Add Zone wizard and click Next, you will be asked to - enter the following details. Then click Next. - - - Name. A name for the zone. - - - DNS 1 and 2. These are DNS servers for use by guest - VMs in the zone. These DNS servers will be accessed via the public network you will add - later. The public IP addresses for the zone must have a route to the DNS server named - here. - - - Internal DNS 1 and Internal DNS 2. These are DNS - servers for use by system VMs in the zone(these are VMs used by &PRODUCT; itself, such - as virtual routers, console proxies,and Secondary Storage VMs.) These DNS servers will - be accessed via the management traffic network interface of the System VMs. The private - IP address you provide for the pods must have a route to the internal DNS server named - here. - - - Network Domain. (Optional) If you want to assign a - special domain name to the guest VM network, specify the DNS suffix. - - - Guest CIDR. This is the CIDR that describes the IP - addresses in use in the guest virtual networks in this zone. For example, 10.1.1.0/24. - As a matter of good practice you should set different CIDRs for different zones. This - will make it easier to set up VPNs between networks in different zones. - - - Hypervisor. (Introduced in version 3.0.1) Choose - the hypervisor for the first cluster in the zone. You can add clusters with different - hypervisors later, after you finish adding the zone. - - - Public. A public zone is available to all users. A - zone that is not public will be assigned to a particular domain. Only users in that - domain will be allowed to create guest VMs in this zone. - - - - - Choose which traffic types will be carried by the physical network. - The traffic types are management, public, guest, and storage traffic. For more - information about the types, roll over the icons to display their tool tips, or see . This screen starts out with one network - already configured. If you have multiple physical networks, you need to add more. Drag and - drop traffic types onto a greyed-out network and it will become active. You can move the - traffic icons from one network to another; for example, if the default traffic types shown - for Network 1 do not match your actual setup, you can move them down. You can also change - the network names if desired. - - - (Introduced in version 3.0.1) Assign a network traffic label to each traffic type on - each physical network. These labels must match the labels you have already defined on the - hypervisor host. To assign each label, click the Edit button under the traffic type icon - within each physical network. A popup dialog appears where you can type the label, then - click OK. - These traffic labels will be defined only for the hypervisor selected for the first - cluster. For all other hypervisors, the labels can be configured after the zone is - created. - (VMware only) If you have enabled Nexus dvSwitch in the environment, you must specify - the corresponding Ethernet port profile names as network traffic label for each traffic type - on the physical network. For more information on Nexus dvSwitch, see Configuring a vSphere - Cluster with Nexus 1000v Virtual Switch in the Installation Guide. If you have enabled - VMware dvSwitch in the environment, you must specify the corresponding Switch name as - network traffic label for each traffic type on the physical network. For more information, - see Configuring a VMware Datacenter with VMware Distributed Virtual Switch in the - Installation Guide. - - - Click Next. - - - Configure the IP range for public Internet traffic. Enter the following details, then - click Add. If desired, you can repeat this step to add more public Internet IP ranges. When - done, click Next. - - - Gateway. The gateway in use for these IP - addresses. - - - Netmask. The netmask associated with this IP - range. - - - VLAN. The VLAN that will be used for public - traffic. - - - Start IP/End IP. A range of IP addresses that are - assumed to be accessible from the Internet and will be allocated for access to guest - networks. - - - - - In a new zone, &PRODUCT; adds the first pod for you. You can always add more pods later. - For an overview of what a pod is, see . - To configure the first pod, enter the following, then click Next: - - - Pod Name. A name for the pod. - - - Reserved system gateway. The gateway for the hosts - in that pod. - - - Reserved system netmask. The network prefix that - defines the pod's subnet. Use CIDR notation. - - - Start/End Reserved System IP. The IP range in the - management network that &PRODUCT; uses to manage various system VMs, such as Secondary - Storage VMs, Console Proxy VMs, and DHCP. For more information, see . - - - - - Specify a range of VLAN IDs to carry guest traffic for each physical network (see VLAN - Allocation Example ), then click Next. - - - In a new pod, &PRODUCT; adds the first cluster for you. You can always add more clusters - later. For an overview of what a cluster is, see . - To configure the first cluster, enter the following, then click Next: - - - Hypervisor. (Version 3.0.0 only; in 3.0.1, this - field is read only) Choose the type of hypervisor software that all hosts in this - cluster will run. If you choose VMware, additional fields appear so you can give - information about a vSphere cluster. For vSphere servers, we recommend creating the - cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add - Cluster: vSphere . - - - Cluster name. Enter a name for the cluster. This - can be text of your choosing and is not used by &PRODUCT;. - - - - - In a new cluster, &PRODUCT; adds the first host for you. You can always add more hosts - later. For an overview of what a host is, see . - - When you deploy &PRODUCT;, the hypervisor host must not have any VMs already - running. - - Before you can configure the host, you need to install the hypervisor software on the - host. You will need to know which version of the hypervisor software version is supported by - &PRODUCT; and what additional configuration is required to ensure the host will work with - &PRODUCT;. To find these installation details, see: - - - Citrix XenServer Installation for &PRODUCT; - - - VMware vSphere Installation and Configuration - - - KVM Installation and Configuration - - - - To configure the first host, enter the following, then click Next: - - - Host Name. The DNS name or IP address of the - host. - - - Username. Usually root. - - - Password. This is the password for the user named - above (from your XenServer or KVM install). - - - Host Tags. (Optional) Any labels that you use to - categorize hosts for ease of maintenance. For example, you can set to the cloud's HA tag - (set in the ha.tag global configuration parameter) if you want this host to be used only - for VMs with the "high availability" feature enabled. For more information, see - HA-Enabled Virtual Machines as well as HA for Hosts, both in the Administration - Guide. - - - - - In a new cluster, &PRODUCT; adds the first primary storage server for you. You can - always add more servers later. For an overview of what primary storage is, see . - To configure the first primary storage server, enter the following, then click - Next: - - - Name. The name of the storage device. - - - Protocol. For XenServer, choose either NFS, iSCSI, - or PreSetup. For KVM, choose NFS, SharedMountPoint, CLVM, and RBD. For vSphere choose - either VMFS (iSCSI or FiberChannel) or NFS. The remaining fields in the screen vary - depending on what you choose here. - - - - - - - NFS - - - - Server. The IP address or DNS name of - the storage device. - - - Path. The exported path from the - server. - - - Tags (optional). The comma-separated - list of tags for this storage device. It should be an equivalent set or - superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be - identical. For example, if cluster A provides primary storage that has tags T1 - and T2, all other clusters in the Zone must also provide primary storage that - has tags T1 and T2. - - - - iSCSI - - - - Server. The IP address or DNS name of - the storage device. - - - Target IQN. The IQN of the target. - For example, iqn.1986-03.com.sun:02:01ec9bb549-1271378984. - - - Lun. The LUN number. For example, - 3. - - - Tags (optional). The comma-separated - list of tags for this storage device. It should be an equivalent set or - superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be - identical. For example, if cluster A provides primary storage that has tags T1 - and T2, all other clusters in the Zone must also provide primary storage that - has tags T1 and T2. - - - - preSetup - - - - Server. The IP address or DNS name of - the storage device. - - - SR Name-Label. Enter the name-label - of the SR that has been set up outside &PRODUCT;. - - - Tags (optional). The comma-separated - list of tags for this storage device. It should be an equivalent set or - superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be - identical. For example, if cluster A provides primary storage that has tags T1 - and T2, all other clusters in the Zone must also provide primary storage that - has tags T1 and T2. - - - - SharedMountPoint - - - - Path. The path on each host that is - where this primary storage is mounted. For example, "/mnt/primary". - - - Tags (optional). The comma-separated - list of tags for this storage device. It should be an equivalent set or - superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be - identical. For example, if cluster A provides primary storage that has tags T1 - and T2, all other clusters in the Zone must also provide primary storage that - has tags T1 and T2. - - - - VMFS - - - - Server. The IP address or DNS name of - the vCenter server. - - - Path. A combination of the datacenter - name and the datastore name. The format is "/" datacenter name "/" - datastore name. For example, "/cloud.dc.VM/cluster1datastore". - - - Tags (optional). The comma-separated - list of tags for this storage device. It should be an equivalent set or - superset of the tags on your disk offerings. - - - The tag sets on primary storage across clusters in a Zone must be - identical. For example, if cluster A provides primary storage that has tags T1 - and T2, all other clusters in the Zone must also provide primary storage that - has tags T1 and T2. - - - - - - - - - - In a new zone, &PRODUCT; adds the first secondary storage server for you. For an - overview of what secondary storage is, see . - Before you can fill out this screen, you need to prepare the secondary storage by - setting up NFS shares and installing the latest &PRODUCT; System VM template. See Adding - Secondary Storage : - - - NFS Server. The IP address of the server or fully - qualified domain name of the server. - - - Path. The exported path from the server. - - - - - Click Launch. - - -
diff --git a/docs/en-US/advanced-zone-guest-ip-addresses.xml b/docs/en-US/advanced-zone-guest-ip-addresses.xml deleted file mode 100644 index 66bc0826683..00000000000 --- a/docs/en-US/advanced-zone-guest-ip-addresses.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Advanced Zone Guest IP Addresses - When advanced networking is used, the administrator can create additional networks for use - by the guests. These networks can span the zone and be available to all accounts, or they can be - scoped to a single account, in which case only the named account may create guests that attach - to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The - administrator may provision thousands of these networks if desired. Additionally, the - administrator can reserve a part of the IP address space for non-&PRODUCT; VMs and - servers. -
diff --git a/docs/en-US/advanced-zone-network-traffic-types.xml b/docs/en-US/advanced-zone-network-traffic-types.xml deleted file mode 100644 index 4d1f46592e0..00000000000 --- a/docs/en-US/advanced-zone-network-traffic-types.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Advanced Zone Network Traffic Types - When advanced networking is used, there can be multiple physical networks in the zone. Each physical network can carry one or more traffic types, and you need to let &PRODUCT; know which type of network traffic you want each network to carry. The traffic types in an advanced zone are: - - Guest. When end users run VMs, they generate guest traffic. The guest VMs communicate with each other over a network that can be referred to as the guest network. This network can be isolated or shared. In an isolated guest network, the administrator needs to reserve VLAN ranges to provide isolation for each &PRODUCT; account’s network (potentially a large number of VLANs). In a shared guest network, all guest VMs share a single network. - Management. When &PRODUCT;’s internal resources communicate with each other, they generate management traffic. This includes communication between hosts, system VMs (VMs used by &PRODUCT; to perform various tasks in the cloud), and any other component that communicates directly with the &PRODUCT; Management Server. You must configure the IP range for the system VMs to use. - Public. Public traffic is generated when VMs in the cloud access the Internet. Publicly accessible IPs must be allocated for this purpose. End users can use the &PRODUCT; UI to acquire these IPs to implement NAT between their guest network and the public network, as described in “Acquiring a New IP Address†in the Administration Guide. - Storage. While labeled "storage" this is specifically about secondary storage, and doesn't affect traffic for primary storage. This includes traffic such as VM templates and snapshots, which is sent between the secondary storage VM and secondary storage servers. &PRODUCT; uses a separate Network Interface Controller (NIC) named storage NIC for storage network traffic. Use of a storage NIC that always operates on a high bandwidth network allows fast template and snapshot copying. You must configure the IP range to use for the storage network. - - These traffic types can each be on a separate physical network, or they can be combined with certain restrictions. When you use the Add Zone wizard in the UI to create a new zone, you are guided into making only valid choices. -
diff --git a/docs/en-US/advanced-zone-physical-network-configuration.xml b/docs/en-US/advanced-zone-physical-network-configuration.xml deleted file mode 100644 index cfc6184c000..00000000000 --- a/docs/en-US/advanced-zone-physical-network-configuration.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Advanced Zone Physical Network Configuration - Within a zone that uses advanced networking, you need to tell the Management Server how the - physical network is set up to carry different kinds of traffic in isolation. - - - -
diff --git a/docs/en-US/advanced-zone-public-ip-addresses.xml b/docs/en-US/advanced-zone-public-ip-addresses.xml deleted file mode 100644 index 82b71d1f23a..00000000000 --- a/docs/en-US/advanced-zone-public-ip-addresses.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Advanced Zone Public IP Addresses - When advanced networking is used, the administrator can create additional networks for use by the guests. These networks can span the zone and be available to all accounts, or they can be scoped to a single account, in which case only the named account may create guests that attach to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The administrator may provision thousands of these networks if desired. -
diff --git a/docs/en-US/alerts.xml b/docs/en-US/alerts.xml deleted file mode 100644 index ebea4b808a4..00000000000 --- a/docs/en-US/alerts.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Alerts - The following is the list of alert type numbers. The current alerts can be found by calling listAlerts. - MEMORY = 0 - CPU = 1 - STORAGE =2 - STORAGE_ALLOCATED = 3 - PUBLIC_IP = 4 - PRIVATE_IP = 5 - HOST = 6 - USERVM = 7 - DOMAIN_ROUTER = 8 - CONSOLE_PROXY = 9 - ROUTING = 10// lost connection to default route (to the gateway) - STORAGE_MISC = 11 // lost connection to default route (to the gateway) - USAGE_SERVER = 12 // lost connection to default route (to the gateway) - MANAGMENT_NODE = 13 // lost connection to default route (to the gateway) - DOMAIN_ROUTER_MIGRATE = 14 - CONSOLE_PROXY_MIGRATE = 15 - USERVM_MIGRATE = 16 - VLAN = 17 - SSVM = 18 - USAGE_SERVER_RESULT = 19 - STORAGE_DELETE = 20; - UPDATE_RESOURCE_COUNT = 21; //Generated when we fail to update the resource count - USAGE_SANITY_RESULT = 22; - DIRECT_ATTACHED_PUBLIC_IP = 23; - LOCAL_STORAGE = 24; - RESOURCE_LIMIT_EXCEEDED = 25; //Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only - diff --git a/docs/en-US/allocators.xml b/docs/en-US/allocators.xml deleted file mode 100644 index d8ce2b8612b..00000000000 --- a/docs/en-US/allocators.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Allocators - &PRODUCT; enables administrators to write custom allocators that will choose the Host to place a new guest and the storage host from which to allocate guest virtual disk images. -
diff --git a/docs/en-US/api-calls.xml b/docs/en-US/api-calls.xml deleted file mode 100644 index af4073ac60b..00000000000 --- a/docs/en-US/api-calls.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Calling the &PRODUCT; API - - - - - - - diff --git a/docs/en-US/api-overview.xml b/docs/en-US/api-overview.xml deleted file mode 100644 index a541049e116..00000000000 --- a/docs/en-US/api-overview.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - &PRODUCT; API - The &PRODUCT; API is a low level API that has been used to implement the &PRODUCT; web UIs. - It is also a good basis for implementing other popular APIs such as EC2/S3 and emerging DMTF - standards. - Many &PRODUCT; API calls are asynchronous. These will return a Job ID immediately when - called. This Job ID can be used to query the status of the job later. Also, status calls on - impacted resources will provide some indication of their state. - The API has a REST-like query basis and returns results in XML or JSON. - See the - Developer’s Guide and the API - Reference. - - - - diff --git a/docs/en-US/api-reference.xml b/docs/en-US/api-reference.xml deleted file mode 100644 index 9a1acc145bd..00000000000 --- a/docs/en-US/api-reference.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -
- API Reference Documentation - You can find all the API reference documentation at the below site: - http://cloudstack.apache.org/docs/api/ -
- diff --git a/docs/en-US/api-throttling.xml b/docs/en-US/api-throttling.xml deleted file mode 100644 index 908e22389a8..00000000000 --- a/docs/en-US/api-throttling.xml +++ /dev/null @@ -1,67 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Limiting the Rate of API Requests - You can limit the rate at which API requests can be placed for each - account. This is useful to avoid malicious attacks on the Management Server, prevent - performance degradation, and provide fairness to all accounts. - If the number of API calls exceeds the threshold, an error message is returned for any additional API calls. - The caller will have to retry these API calls at another time. -
- Configuring the API Request Rate - To control the API request rate, use the following global configuration - settings: - - api.throttling.enabled - Enable/Disable API throttling. By default, this setting is false, so - API throttling is not enabled. - api.throttling.interval (in seconds) - Time interval during which the number of API requests is to be counted. - When the interval has passed, the API count is reset to 0. - api.throttling.max - Maximum number of APIs that can be placed within the api.throttling.interval period. - api.throttling.cachesize - Cache size for storing API counters. - Use a value higher than the total number of accounts managed by the cloud. - One cache entry is needed for each account, to store the running API total for that account. - - -
-
- Limitations on API Throttling - The following limitations exist in the current implementation of this feature. - Even with these limitations, &PRODUCT; is still able to effectively use API throttling to - avoid malicious attacks causing denial of service. - - - In a deployment with multiple Management Servers, - the cache is not synchronized across them. - In this case, &PRODUCT; might not be able to - ensure that only the exact desired number of API requests are allowed. - In the worst case, the number of API calls that might be allowed is - (number of Management Servers) * (api.throttling.max). - - The API commands resetApiLimit and getApiLimit are limited to the - Management Server where the API is invoked. - - -
-
\ No newline at end of file diff --git a/docs/en-US/append-displayname-vms.xml b/docs/en-US/append-displayname-vms.xml deleted file mode 100644 index 592a6e863e8..00000000000 --- a/docs/en-US/append-displayname-vms.xml +++ /dev/null @@ -1,84 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Appending a Display Name to the Guest VM’s Internal Name - Every guest VM has an internal name. The host uses the internal name to identify the guest - VMs. &PRODUCT; gives you an option to provide a guest VM with a display name. You can set this - display name as the internal name so that the vCenter can use it to identify the guest VM. A new - global parameter, vm.instancename.flag, has now been added to achieve this functionality. - The default format of the internal name is - i-<user_id>-<vm_id>-<instance.name>, where instance.name is a global - parameter. However, If vm.instancename.flag is set to true, and if a display name is provided - during the creation of a guest VM, the display name is appended to the internal name of the - guest VM on the host. This makes the internal name format as - i-<user_id>-<vm_id>-<displayName>. The default value of vm.instancename.flag - is set to false. This feature is intended to make the correlation between instance names and - internal names easier in large data center deployments. - The following table explains how a VM name is displayed in different scenarios. - - - - - - - - - - User-Provided Display Name - vm.instancename.flag - Hostname on the VM - Name on vCenter - Internal Name - - - - - Yes - True - Display name - i-<user_id>-<vm_id>-displayName - i-<user_id>-<vm_id>-displayName - - - No - True - UUID - i-<user_id>-<vm_id>-<instance.name> - i-<user_id>-<vm_id>-<instance.name> - - - Yes - False - Display name - i-<user_id>-<vm_id>-<instance.name> - i-<user_id>-<vm_id>-<instance.name> - - - No - False - UUID - i-<user_id>-<vm_id>-<instance.name> - i-<user_id>-<vm_id>-<instance.name> - - - - -
diff --git a/docs/en-US/asynchronous-commands-example.xml b/docs/en-US/asynchronous-commands-example.xml deleted file mode 100644 index 330f1255679..00000000000 --- a/docs/en-US/asynchronous-commands-example.xml +++ /dev/null @@ -1,106 +0,0 @@ - - -
- Example - - The following shows an example of using an asynchronous command. Assume the API command: - command=deployVirtualMachine&zoneId=1&serviceOfferingId=1&diskOfferingId=1&templateId=1 - - CloudStack will immediately return a job ID and any other additional data. - - <deployvirtualmachineresponse> - <jobid>1</jobid> - <id>100</id> - </deployvirtualmachineresponse> - - Using the job ID, you can periodically poll for the results by using the queryAsyncJobResult command. - command=queryAsyncJobResult&jobId=1 - Three possible results could come from this query. - Job is still pending: - - <queryasyncjobresult> - <jobid>1</jobid> - <jobstatus>0</jobstatus> - <jobprocstatus>1</jobprocstatus> - </queryasyncjobresult> - - Job has succeeded: - - <queryasyncjobresultresponse cloud-stack-version="3.0.1.6"> - <jobid>1</jobid> - <jobstatus>1</jobstatus> - <jobprocstatus>0</jobprocstatus> - <jobresultcode>0</jobresultcode> - <jobresulttype>object</jobresulttype> - <jobresult> - <virtualmachine> - <id>450</id> - <name>i-2-450-VM</name> - <displayname>i-2-450-VM</displayname> - <account>admin</account> - <domainid>1</domainid> - <domain>ROOT</domain> - <created>2011-03-10T18:20:25-0800</created> - <state>Running</state> - <haenable>false</haenable> - <zoneid>1</zoneid> - <zonename>San Jose 1</zonename> - <hostid>2</hostid> - <hostname>905-13.sjc.lab.vmops.com</hostname> - <templateid>1</templateid> - <templatename>CentOS 5.3 64bit LAMP</templatename> - <templatedisplaytext>CentOS 5.3 64bit LAMP</templatedisplaytext> - <passwordenabled>false</passwordenabled> - <serviceofferingid>1</serviceofferingid> - <serviceofferingname>Small Instance</serviceofferingname> - <cpunumber>1</cpunumber> - <cpuspeed>500</cpuspeed> - <memory>512</memory> - <guestosid>12</guestosid> - <rootdeviceid>0</rootdeviceid> - <rootdevicetype>NetworkFilesystem</rootdevicetype> - <nic> - <id>561</id> - <networkid>205</networkid> - <netmask>255.255.255.0</netmask> - <gateway>10.1.1.1</gateway> - <ipaddress>10.1.1.225</ipaddress> - <isolationuri>vlan://295</isolationuri> - <broadcasturi>vlan://295</broadcasturi> - <traffictype>Guest</traffictype> - <type>Virtual</type> - <isdefault>true</isdefault> - </nic> - <hypervisor>XenServer</hypervisor> - </virtualmachine> - </jobresult> - </queryasyncjobresultresponse> - - Job has failed: - - <queryasyncjobresult> - <jobid>1</jobid> - <jobstatus>2</jobstatus> - <jobprocstatus>0</jobprocstatus> - <jobresultcode>551</jobresultcode> - <jobresulttype>text</jobresulttype> - <jobresult>Unable to deploy virtual machine id = 100 due to not enough capacity</jobresult> - </queryasyncjobresult> - -
diff --git a/docs/en-US/asynchronous-commands.xml b/docs/en-US/asynchronous-commands.xml deleted file mode 100644 index 4c9b59cbc43..00000000000 --- a/docs/en-US/asynchronous-commands.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Asynchronous Commands - Asynchronous commands were introduced in &PRODUCT; 2.x. Commands are designated as asynchronous when they can potentially take a long period of time to complete such as creating a snapshot or disk volume. They differ from synchronous commands by the following: - - - They are identified in the API Reference by an (A). - They will immediately return a job ID to refer to the job that will be responsible in processing the command. - If executed as a "create" resource command, it will return the resource ID as well as the job ID. - You can periodically check the status of the job by making a simple API call to the command, queryAsyncJobResult and passing in the job ID. - - - - -
diff --git a/docs/en-US/attach-iso-to-vm.xml b/docs/en-US/attach-iso-to-vm.xml deleted file mode 100644 index 8e0d4247f9b..00000000000 --- a/docs/en-US/attach-iso-to-vm.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Attaching an ISO to a VM - - In the left navigation, click Instances. - Choose the virtual machine you want to work with. - Click the Attach ISO button. - - - - - iso.png: depicts adding an iso image - - - In the Attach ISO dialog box, select the desired ISO. - Click OK. - -
diff --git a/docs/en-US/attaching-volume.xml b/docs/en-US/attaching-volume.xml deleted file mode 100644 index bb9196a93bb..00000000000 --- a/docs/en-US/attaching-volume.xml +++ /dev/null @@ -1,61 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Attaching a Volume - You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when - you first create a new volume, when you are moving an existing volume from one VM to another, or - after you have migrated a volume from one storage pool to another. - - - Log in to the &PRODUCT; UI as a user or admin. - - - In the left navigation, click Storage. - - - In Select View, choose Volumes. - - - Click the volume name in the Volumes list, then click the Attach Disk button - - - - - AttachDiskButton.png: button to attach a volume - - - - - - In the Instance popup, choose the VM to which you want to attach the volume. You will - only see instances to which you are allowed to attach volumes; for example, a user will see - only instances created by that user, but the administrator will have more choices. - - - - When the volume has been attached, you should be able to see it by clicking Instances, - the instance name, and View Volumes. - - -
diff --git a/docs/en-US/automatic-snapshot-creation-retention.xml b/docs/en-US/automatic-snapshot-creation-retention.xml deleted file mode 100644 index 54fbe68e5bb..00000000000 --- a/docs/en-US/automatic-snapshot-creation-retention.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Automatic Snapshot Creation and Retention - (Supported for the following hypervisors: XenServer, - VMware vSphere, and KVM) - Users can set up a recurring snapshot policy to automatically create multiple snapshots of a - disk at regular intervals. Snapshots can be created on an hourly, daily, weekly, or monthly - interval. One snapshot policy can be set up per disk volume. For example, a user can set up a - daily snapshot at 02:30. - With each snapshot schedule, users can also specify the number of scheduled snapshots to be - retained. Older snapshots that exceed the retention limit are automatically deleted. This - user-defined limit must be equal to or lower than the global limit set by the &PRODUCT; - administrator. See . The limit applies only to those - snapshots that are taken as part of an automatic recurring snapshot policy. Additional manual - snapshots can be created and retained. -
\ No newline at end of file diff --git a/docs/en-US/autoscale.xml b/docs/en-US/autoscale.xml deleted file mode 100644 index 26e795b7bf5..00000000000 --- a/docs/en-US/autoscale.xml +++ /dev/null @@ -1,286 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring AutoScale - AutoScaling allows you to scale your back-end services or application VMs up or down - seamlessly and automatically according to the conditions you define. With AutoScaling enabled, - you can ensure that the number of VMs you are using seamlessly scale up when demand increases, - and automatically decreases when demand subsides. Thus it helps you save compute costs by - terminating underused VMs automatically and launching new VMs when you need them, without the - need for manual intervention. - NetScaler AutoScaling is designed to seamlessly launch or terminate VMs based on - user-defined conditions. Conditions for triggering a scaleup or scaledown action can vary from a - simple use case like monitoring the CPU usage of a server to a complex use case of monitoring a - combination of server's responsiveness and its CPU usage. For example, you can configure - AutoScaling to launch an additional VM whenever CPU usage exceeds 80 percent for 15 minutes, or - to remove a VM whenever CPU usage is less than 20 percent for 30 minutes. - &PRODUCT; uses the NetScaler load balancer to monitor all aspects of a system's health and - work in unison with &PRODUCT; to initiate scale-up or scale-down actions. - - AutoScale is supported on NetScaler Release 10 Build 73.e and beyond. - - - Prerequisites - Before you configure an AutoScale rule, consider the following: - - - - Ensure that the necessary template is prepared before configuring AutoScale. When a VM - is deployed by using a template and when it comes up, the application should be up and - running. - - If the application is not running, the NetScaler device considers the VM as - ineffective and continues provisioning the VMs unconditionally until the resource limit is - exhausted. - - - - Deploy the templates you prepared. Ensure that the applications come up on the first - boot and is ready to take the traffic. Observe the time requires to deploy the template. - Consider this time when you specify the quiet time while configuring AutoScale. - - - The AutoScale feature supports the SNMP counters that can be used to define conditions - for taking scale up or scale down actions. To monitor the SNMP-based counter, ensure that - the SNMP agent is installed in the template used for creating the AutoScale VMs, and the - SNMP operations work with the configured SNMP community and port by using standard SNMP - managers. For example, see to configure SNMP on a RHEL - machine. - - - Ensure that the endpointe.url parameter present in the Global Settings is set to the - Management Server API URL. For example, http://10.102.102.22:8080/client/api. In a - multi-node Management Server deployment, use the virtual IP address configured in the load - balancer for the management server’s cluster. Additionally, ensure that the NetScaler device - has access to this IP address to provide AutoScale support. - If you update the endpointe.url, disable the AutoScale functionality of the load - balancer rules in the system, then enable them back to reflect the changes. For more - information see - - - If the API Key and Secret Key are regenerated for an AutoScale user, ensure that the - AutoScale functionality of the load balancers that the user participates in are disabled and - then enabled to reflect the configuration changes in the NetScaler. - - - In an advanced Zone, ensure that at least one VM should be present before configuring a - load balancer rule with AutoScale. Having one VM in the network ensures that the network is - in implemented state for configuring AutoScale. - - - - Configuration - Specify the following: - - - - - - - autoscaleateconfig.png: Configuring AutoScale - - - - - Template: A template consists of a base OS image and - application. A template is used to provision the new instance of an application on a scaleup - action. When a VM is deployed from a template, the VM can start taking the traffic from the - load balancer without any admin intervention. For example, if the VM is deployed for a Web - service, it should have the Web server running, the database connected, and so on. - - - Compute offering: A predefined set of virtual hardware - attributes, including CPU speed, number of CPUs, and RAM size, that the user can select when - creating a new virtual machine instance. Choose one of the compute offerings to be used - while provisioning a VM instance as part of scaleup action. - - - Min Instance: The minimum number of active VM instances - that is assigned to a load balancing rule. The active VM instances are the application - instances that are up and serving the traffic, and are being load balanced. This parameter - ensures that a load balancing rule has at least the configured number of active VM instances - are available to serve the traffic. - - If an application, such as SAP, running on a VM instance is down for some reason, the - VM is then not counted as part of Min Instance parameter, and the AutoScale feature - initiates a scaleup action if the number of active VM instances is below the configured - value. Similarly, when an application instance comes up from its earlier down state, this - application instance is counted as part of the active instance count and the AutoScale - process initiates a scaledown action when the active instance count breaches the Max - instance value. - - - - Max Instance: Maximum number of active VM instances - that should be assigned to a load balancing rule. This - parameter defines the upper limit of active VM instances that can be assigned to a load - balancing rule. - Specifying a large value for the maximum instance parameter might result in provisioning - large number of VM instances, which in turn leads to a single load balancing rule exhausting - the VM instances limit specified at the account or domain level. - - If an application, such as SAP, running on a VM instance is down for some reason, the - VM is not counted as part of Max Instance parameter. So there may be scenarios where the - number of VMs provisioned for a scaleup action might be more than the configured Max - Instance value. Once the application instances in the VMs are up from an earlier down - state, the AutoScale feature starts aligning to the configured Max Instance value. - - - - Specify the following scale-up and scale-down policies: - - - Duration: The duration, in seconds, for which the - conditions you specify must be true to trigger a scaleup action. The conditions defined - should hold true for the entire duration you specify for an AutoScale action to be invoked. - - - - Counter: The performance counters expose the state of - the monitored instances. By default, &PRODUCT; offers four performance counters: Three SNMP - counters and one NetScaler counter. The SNMP counters are Linux User CPU, Linux System CPU, - and Linux CPU Idle. The NetScaler counter is ResponseTime. The root administrator can add - additional counters into &PRODUCT; by using the &PRODUCT; API. - - - Operator: The following five relational operators are - supported in AutoScale feature: Greater than, Less than, Less than or equal to, Greater than - or equal to, and Equal to. - - - Threshold: Threshold value to be used for the counter. - Once the counter defined above breaches the threshold value, the AutoScale feature initiates - a scaleup or scaledown action. - - - Add: Click Add to add the condition. - - - Additionally, if you want to configure the advanced settings, click Show advanced settings, - and specify the following: - - - Polling interval: Frequency in which the conditions, - combination of counter, operator and threshold, are to be evaluated before taking a scale up - or down action. The default polling interval is 30 seconds. - - - Quiet Time: This is the cool down period after an - AutoScale action is initiated. The time includes the time taken to complete provisioning a - VM instance from its template and the time taken by an application to be ready to serve - traffic. This quiet time allows the fleet to come up to a stable state before any action can - take place. The default is 300 seconds. - - - Destroy VM Grace Period: The duration in seconds, after - a scaledown action is initiated, to wait before the VM is destroyed as part of scaledown - action. This is to ensure graceful close of any pending sessions or transactions being - served by the VM marked for destroy. The default is 120 seconds. - - - Security Groups: Security groups provide a way to - isolate traffic to the VM instances. A security group is a group of VMs that filter their - incoming and outgoing traffic according to a set of rules, called ingress and egress rules. - These rules filter network traffic according to the IP address that is attempting to - communicate with the VM. - - - Disk Offerings: A predefined set of disk size for - primary data storage. - - - SNMP Community: The SNMP community string to be used by - the NetScaler device to query the configured counter value from the provisioned VM - instances. Default is public. - - - SNMP Port: The port number on which the SNMP agent that - run on the provisioned VMs is listening. Default port is 161. - - - User: This is the user that the NetScaler device use to - invoke scaleup and scaledown API calls to the cloud. If no option is specified, the user who - configures AutoScaling is applied. Specify another user name to override. - - - Apply: Click Apply to create the AutoScale - configuration. - - - - Disabling and Enabling an AutoScale Configuration - If you want to perform any maintenance operation on the AutoScale VM instances, disable - the AutoScale configuration. When the AutoScale configuration is disabled, no scaleup or - scaledown action is performed. You can use this downtime for the maintenance activities. To - disable the AutoScale configuration, click the Disable AutoScale - - - - - EnableDisable.png: button to enable or disable AutoScale. - - button. - - The button toggles between enable and disable, depending on whether AutoScale is currently - enabled or not. After the maintenance operations are done, you can enable the AutoScale - configuration back. To enable, open the AutoScale configuration page again, then click the - Enable AutoScale - - - - - EnableDisable.png: button to enable or disable AutoScale. - - button. - - Updating an AutoScale Configuration - You can update the various parameters and add or delete the conditions in a scaleup or - scaledown rule. Before you update an AutoScale configuration, ensure that you disable the - AutoScale load balancer rule by clicking the Disable AutoScale button. - - After you modify the required AutoScale parameters, click Apply. To apply the new AutoScale - policies, open the AutoScale configuration page again, then click the Enable AutoScale - button. - - Runtime Considerations - - - - - An administrator should not assign a VM to a load balancing rule which is configured for - AutoScale. - - - Before a VM provisioning is completed if NetScaler is shutdown or restarted, the - provisioned VM cannot be a part of the load balancing rule though the intent was to assign - it to a load balancing rule. To workaround, rename the AutoScale provisioned VMs based on - the rule name or ID so at any point of time the VMs can be reconciled to its load balancing - rule. - - - Making API calls outside the context of AutoScale, such as destroyVM, on an autoscaled - VM leaves the load balancing configuration in an inconsistent state. Though VM is destroyed - from the load balancer rule, NetScaler continues to show the VM as a service assigned to a - rule. - - -
diff --git a/docs/en-US/aws-api-examples.xml b/docs/en-US/aws-api-examples.xml deleted file mode 100644 index ee3b44a5bde..00000000000 --- a/docs/en-US/aws-api-examples.xml +++ /dev/null @@ -1,145 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Examples - There are many tools available to interface with a AWS compatible API. In this section we provide - a few examples that users of &PRODUCT; can build upon. - -
- Boto Examples - Boto is one of them. It is a Python package available at https://github.com/boto/boto. - In this section we provide two examples of Python scripts that use Boto and have been tested with the - &PRODUCT; AWS API Interface. - First is an EC2 example. Replace the Access and Secret Keys with your own and - update the endpoint. - - - An EC2 Boto example - #!/usr/bin/env python - -import sys -import os -import boto -import boto.ec2 - -region = boto.ec2.regioninfo.RegionInfo(name="ROOT",endpoint="localhost") -apikey='GwNnpUPrO6KgIdZu01z_ZhhZnKjtSdRwuYd4DvpzvFpyxGMvrzno2q05MB0ViBoFYtdqKd' -secretkey='t4eXLEYWw7chBhDlaKf38adCMSHx_wlds6JfSx3z9fSpSOm0AbP9Moj0oGIzy2LSC8iw' - -def main(): - '''Establish connection to EC2 cloud''' - conn =boto.connect_ec2(aws_access_key_id=apikey, - aws_secret_access_key=secretkey, - is_secure=False, - region=region, - port=7080, - path="/awsapi", - api_version="2010-11-15") - - '''Get list of images that I own''' - images = conn.get_all_images() - print images - myimage = images[0] - '''Pick an instance type''' - vm_type='m1.small' - reservation = myimage.run(instance_type=vm_type,security_groups=['default']) - -if __name__ == '__main__': - main() - - - - Second is an S3 example. Replace the Access and Secret keys with your own, - as well as the endpoint of the service. Be sure to also update the file paths to something - that exists on your machine. - - - An S3 Boto Example - #!/usr/bin/env python - -import sys -import os -from boto.s3.key import Key -from boto.s3.connection import S3Connection -from boto.s3.connection import OrdinaryCallingFormat - -apikey='ChOw-pwdcCFy6fpeyv6kUaR0NnhzmG3tE7HLN2z3OB_s-ogF5HjZtN4rnzKnq2UjtnHeg_yLA5gOw' -secretkey='IMY8R7CJQiSGFk4cHwfXXN3DUFXz07cCiU80eM3MCmfLs7kusgyOfm0g9qzXRXhoAPCH-IRxXc3w' - -cf=OrdinaryCallingFormat() - -def main(): - '''Establish connection to S3 service''' - conn =S3Connection(aws_access_key_id=apikey,aws_secret_access_key=secretkey, \ - is_secure=False, \ - host='localhost', \ - port=7080, \ - calling_format=cf, \ - path="/awsapi/rest/AmazonS3") - - try: - bucket=conn.create_bucket('cloudstack') - k = Key(bucket) - k.key = 'test' - try: - k.set_contents_from_filename('/Users/runseb/Desktop/s3cs.py') - except: - print 'could not write file' - pass - except: - bucket = conn.get_bucket('cloudstack') - k = Key(bucket) - k.key = 'test' - try: - k.get_contents_to_filename('/Users/runseb/Desktop/foobar') - except: - print 'Could not get file' - pass - - try: - bucket1=conn.create_bucket('teststring') - k=Key(bucket1) - k.key('foobar') - k.set_contents_from_string('This is my silly test') - except: - bucket1=conn.get_bucket('teststring') - k = Key(bucket1) - k.key='foobar' - k.get_contents_as_string() - -if __name__ == '__main__': - main() - - - - -
- -
- JClouds Examples - -
- -
diff --git a/docs/en-US/aws-ec2-configuration.xml b/docs/en-US/aws-ec2-configuration.xml deleted file mode 100644 index f0f2d0f6cdc..00000000000 --- a/docs/en-US/aws-ec2-configuration.xml +++ /dev/null @@ -1,109 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Enabling the EC2 and S3 Compatible Interface - - The software that provides AWS API compatibility is installed along with &PRODUCT;. You must enable the services and perform some setup steps prior to using it. - - - Set the global configuration parameters for each service to true. - See . - Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings. - You can do this through the &PRODUCT; UI as described in the Administration Guide. - Be sure you have included the Amazon default service offering, m1.small. As well as any EC2 instance types that you will use. - - If you did not already do so when you set the configuration parameter in step , restart the Management Server. - # service cloudstack-management restart - - - The following sections provides details to perform these steps - -
- Enabling the Services - To enable the EC2 and S3 compatible services you need to set the configuration variables enable.ec2.api - and enable.s3.api to true. You do not have to enable both at the same time. Enable the ones you need. - This can be done via the &PRODUCT; GUI by going in Global Settings or via the API. - The snapshot below shows you how to use the GUI to enable these services - - - - - - - - Use the GUI to set the configuration variable to true - - - - - Using the &PRODUCT; API, the easiest is to use the so-called integration port on which you can make - unauthenticated calls. In Global Settings set the port to 8096 and subsequently call the updateConfiguration method. - The following urls shows you how: - - - - http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true - http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true - - - - Once you have enabled the services, restart the server. -
- -
- Creating EC2 Compatible Service Offerings - You will also need to define compute service offerings with names compatible with the - Amazon EC2 instance types API names (e.g m1.small,m1.large). This can be done via the &PRODUCT; GUI. - Go under Service Offerings select Compute offering and either create - a new compute offering or modify an existing one, ensuring that the name matches an EC2 instance type API name. The snapshot below shows you how: - - - - - - - Use the GUI to set the name of a compute service offering to an EC2 instance - type API name. - - - -
-
- Modifying the AWS API Port - - (Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows: - - Edit the files /etc/cloudstack/management/server.xml, /etc/cloudstack/management/server-nonssl.xml, - and /etc/cloudstack/management/server-ssl.xml. - In each file, find the tag <Service name="Catalina7080">. Under this tag, - locate <Connector executor="tomcatThreadPool-internal" port= ....<. - Change the port to whatever port you want to use, then save the files. - Restart the Management Server. - - If you re-install &PRODUCT;, you will have to re-enable the services and if need be update the port. - -
- -
diff --git a/docs/en-US/aws-ec2-introduction.xml b/docs/en-US/aws-ec2-introduction.xml deleted file mode 100644 index 4cf071bcbb2..00000000000 --- a/docs/en-US/aws-ec2-introduction.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Amazon Web Services Compatible Interface - &PRODUCT; can translate Amazon Web Services (AWS) API calls to native &PRODUCT; API calls - so that users can continue using existing AWS-compatible tools. This translation service runs as - a separate web application in the same tomcat server as the management server of &PRODUCT;, - listening on a different port. The Amazon Web Services (AWS) compatible interface provides the - EC2 SOAP and Query APIs as well as the S3 REST API. - - This service was previously enabled by separate software called CloudBridge. It is now - fully integrated with the &PRODUCT; management server. - - - The compatible interface for the EC2 Query API and the S3 API are Work In Progress. The S3 compatible API offers a way to store data on the management server file system, it is not an implementation of the S3 backend. - - Limitations - - - Supported only in zones that use basic networking. - - - Available in fresh installations of &PRODUCT;. Not available through upgrade of previous versions. - - - Features such as Elastic IP (EIP) and Elastic Load Balancing (ELB) are only available in an infrastructure - with a Citrix NetScaler device. Users accessing a Zone with a NetScaler device will need to use a - NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering). - - -
diff --git a/docs/en-US/aws-ec2-requirements.xml b/docs/en-US/aws-ec2-requirements.xml deleted file mode 100644 index 62e94b1ac9f..00000000000 --- a/docs/en-US/aws-ec2-requirements.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Supported API Version - - The EC2 interface complies with Amazon's WDSL version dated November 15, 2010, available at - http://ec2.amazonaws.com/doc/2010-11-15/. - The interface is compatible with the EC2 command-line - tools EC2 tools v. 1.3.6230, which can be downloaded at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip. - - - Work is underway to support a more recent version of the EC2 API -
diff --git a/docs/en-US/aws-ec2-supported-commands.xml b/docs/en-US/aws-ec2-supported-commands.xml deleted file mode 100644 index 7cdbcad8095..00000000000 --- a/docs/en-US/aws-ec2-supported-commands.xml +++ /dev/null @@ -1,396 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Supported AWS API Calls - The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatible interface is enabled. - For a few commands, there are differences between the &PRODUCT; and Amazon EC2 versions, and these differences are noted. The underlying SOAP call for each command is also given, for those who have built tools using those calls. - - - Elastic IP API mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-allocate-address - AllocateAddress - associateIpAddress - - - ec2-associate-address - AssociateAddress - enableStaticNat - - - ec2-describe-addresses - DescribeAddresses - listPublicIpAddresses - - - ec2-diassociate-address - DisassociateAddress - disableStaticNat - - - ec2-release-address - ReleaseAddress - disassociateIpAddress - - - -
- - Availability Zone API mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-describe-availability-zones - DescribeAvailabilityZones - listZones - - - -
- - Images API mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-create-image - CreateImage - createTemplate - - - ec2-deregister - DeregisterImage - DeleteTemplate - - - ec2-describe-images - DescribeImages - listTemplates - - - ec2-register - RegisterImage - registerTemplate - - - -
- - Image Attributes API mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-describe-image-attribute - DescribeImageAttribute - listTemplatePermissions - - - ec2-modify-image-attribute - ModifyImageAttribute - updateTemplatePermissions - - - ec2-reset-image-attribute - ResetImageAttribute - updateTemplatePermissions - - - -
- - Instances API mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-describe-instances - DescribeInstances - listVirtualMachines - - - ec2-run-instances - RunInstances - deployVirtualMachine - - - ec2-reboot-instances - RebootInstances - rebootVirtualMachine - - - ec2-start-instances - StartInstances - startVirtualMachine - - - ec2-stop-instances - StopInstances - stopVirtualMachine - - - ec2-terminate-instances - TerminateInstances - destroyVirtualMachine - - - -
- - Instance Attributes Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-describe-instance-attribute - DescribeInstanceAttribute - listVirtualMachines - - - -
- - Keys Pairs Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-add-keypair - CreateKeyPair - createSSHKeyPair - - - ec2-delete-keypair - DeleteKeyPair - deleteSSHKeyPair - - - ec2-describe-keypairs - DescribeKeyPairs - listSSHKeyPairs - - - ec2-import-keypair - ImportKeyPair - registerSSHKeyPair - - - -
- - Passwords API Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-get-password - GetPasswordData - getVMPassword - - - -
- - Security Groups API Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-authorize - AuthorizeSecurityGroupIngress - authorizeSecurityGroupIngress - - - ec2-add-group - CreateSecurityGroup - createSecurityGroup - - - ec2-delete-group - DeleteSecurityGroup - deleteSecurityGroup - - - ec2-describe-group - DescribeSecurityGroups - listSecurityGroups - - - ec2-revoke - RevokeSecurityGroupIngress - revokeSecurityGroupIngress - - - -
- - Snapshots API Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-create-snapshot - CreateSnapshot - createSnapshot - - - ec2-delete-snapshot - DeleteSnapshot - deleteSnapshot - - - ec2-describe-snapshots - DescribeSnapshots - listSnapshots - - - -
- - Volumes API Mapping - - - - - EC2 command - SOAP call - &PRODUCT; API call - - - - - ec2-attach-volume - AttachVolume - attachVolume - - - ec2-create-volume - CreateVolume - createVolume - - - ec2-delete-volume - DeleteVolume - deleteVolume - - - ec2-describe-volume - DescribeVolume - listVolumes - - - ec2-detach-volume - DetachVolume - detachVolume - - - -
-
diff --git a/docs/en-US/aws-ec2-timeouts.xml b/docs/en-US/aws-ec2-timeouts.xml deleted file mode 100644 index 73d0c16c4df..00000000000 --- a/docs/en-US/aws-ec2-timeouts.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using Timeouts to Ensure AWS API Command Completion - The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can specify a custom timeouts. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command: - - - - - - - --connection-timeout TIMEOUT - Specifies a connection timeout (in seconds). - Example: --connection-timeout 30 - - - - --request-timeout TIMEOUT - Specifies a request timeout (in seconds). - Example: --request-timeout 45 - - - - - - Example: - ec2-run-instances 2 –z us-test1 –n 1-3 --connection-timeout 120 --request-timeout 120 - The timeouts optional arguments are not specific to &PRODUCT;. -
diff --git a/docs/en-US/aws-ec2-user-setup.xml b/docs/en-US/aws-ec2-user-setup.xml deleted file mode 100644 index a2d89187feb..00000000000 --- a/docs/en-US/aws-ec2-user-setup.xml +++ /dev/null @@ -1,105 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- AWS API User Setup - In general, users need not be aware that they are using a translation service provided by &PRODUCT;. - They only need to send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native &PRODUCT; API. Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools - and scripts and use them with their &PRODUCT; deployment, by specifying the endpoint of the - management server and using the proper user credentials. In order to do this, each user must - perform the following configuration steps: - - - - Generate user credentials. - - - Register with the service. - - - For convenience, set up environment variables for the EC2 SOAP command-line tools. - - - -
- AWS API User Registration - Each user must perform a one-time registration. The user follows these steps: - - - Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator: - - - The &PRODUCT; server's publicly available DNS name or IP address - The user account's Access key and Secret key - - - - Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below. - - - $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem - - - - Register the user X.509 certificate and Access/Secret keys with the AWS compatible service. - If you have the source code of &PRODUCT; go to the awsapi-setup/setup directory and use the Python script - cloudstack-aws-api-register. If you do not have the source then download the script using the following command. - - - wget -O cloudstack-aws-api-register "https://git-wip-us.apache.org/repos/asf?p=cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=4.1" - - - Then execute it, using the access and secret keys that were obtained in step . An example is shown below. - - $ cloudstack-aws-api-register --apikey=User’s &PRODUCT; API key --secretkey=User’s &PRODUCT; Secret key --cert=/path/to/cert.pem --url=http://&PRODUCT;.server:7080/awsapi - - - - - - A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but note that the certificate would be uploaded to the &PRODUCT; management server database. - - -
-
- AWS API Command-Line Tools Setup - To use the EC2 command-line tools, the user must perform these steps: - - - Be sure you have the right version of EC2 Tools. - The supported version is available at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip. - - - - Set up the EC2 environment variables. This can be done every time you use the service or you can set them up in the proper shell profile. Replace the endpoint (i.e EC2_URL) with the proper address of your &PRODUCT; management server and port. In a bash shell do the following. - - - $ export EC2_CERT=/path/to/cert.pem - $ export EC2_PRIVATE_KEY=/path/to/private_key.pem - $ export EC2_URL=http://localhost:7080/awsapi - $ export EC2_HOME=/path/to/EC2_tools_directory - - - -
-
diff --git a/docs/en-US/aws-interface-compatibility.xml b/docs/en-US/aws-interface-compatibility.xml deleted file mode 100644 index 2c85c24b36a..00000000000 --- a/docs/en-US/aws-interface-compatibility.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Amazon Web Services Compatible Interface - - - - - - - - diff --git a/docs/en-US/basic-adv-networking.xml b/docs/en-US/basic-adv-networking.xml deleted file mode 100644 index 46f0650e69f..00000000000 --- a/docs/en-US/basic-adv-networking.xml +++ /dev/null @@ -1,113 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Basic and Advanced Networking - &PRODUCT; provides two styles of networking:. - - Basic - For AWS-style networking. Provides a single network where guest isolation can be provided - through layer-3 means such as security groups (IP address source filtering). - - - Advanced - For more sophisticated network topologies. This network model provides the most - flexibility in defining guest networks, but requires more configuration steps than basic - networking. - - Each zone has either basic or advanced networking. Once the choice of networking model for a - zone has been made and configured in &PRODUCT;, it can not be changed. A zone is either - basic or advanced for its entire lifetime. - The following table compares the networking features in the two networking models. - - - - - Networking Feature - Basic Network - Advanced Network - - - - - Number of networks - Single network - Multiple networks - - - Firewall type - Physical - Physical and Virtual - - - Load balancer - Physical - Physical and Virtual - - - Isolation type - Layer 3 - Layer 2 and Layer 3 - - - VPN support - No - Yes - - - Port forwarding - Physical - Physical and Virtual - - - 1:1 NAT - Physical - Physical and Virtual - - - Source NAT - No - Physical and Virtual - - - Userdata - Yes - Yes - - - Network usage monitoring - sFlow / netFlow at physical router - Hypervisor and Virtual Router - - - DNS and DHCP - Yes - Yes - - - - - The two types of networking may be in use in the same cloud. However, a given zone must use - either Basic Networking or Advanced Networking. - Different types of network traffic can be segmented on the same physical network. Guest - traffic can also be segmented by account. To isolate traffic, you can use separate VLANs. If you - are using separate VLANs on a single physical network, make sure the VLAN tags are in separate - numerical ranges. -
diff --git a/docs/en-US/basic-zone-configuration.xml b/docs/en-US/basic-zone-configuration.xml deleted file mode 100644 index 79d4ab8ce1b..00000000000 --- a/docs/en-US/basic-zone-configuration.xml +++ /dev/null @@ -1,319 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Basic Zone Configuration - - - After you select Basic in the Add Zone wizard and click Next, you will be asked to enter - the following details. Then click Next. - - - Name. A name for the zone. - - - DNS 1 and 2. These are DNS servers for use by guest - VMs in the zone. These DNS servers will be accessed via the public network you will add - later. The public IP addresses for the zone must have a route to the DNS server named - here. - - - Internal DNS 1 and Internal DNS 2. These are DNS - servers for use by system VMs in the zone (these are VMs used by &PRODUCT; itself, such - as virtual routers, console proxies, and Secondary Storage VMs.) These DNS servers will - be accessed via the management traffic network interface of the System VMs. The private - IP address you provide for the pods must have a route to the internal DNS server named - here. - - - Hypervisor. (Introduced in version 3.0.1) Choose - the hypervisor for the first cluster in the zone. You can add clusters with different - hypervisors later, after you finish adding the zone. - - - Network Offering. Your choice here determines what - network services will be available on the network for guest VMs. - - - - - - - Network Offering - Description - - - - - DefaultSharedNetworkOfferingWithSGService - If you want to enable security groups for guest traffic isolation, - choose this. (See Using Security Groups to Control Traffic to - VMs.) - - - DefaultSharedNetworkOffering - If you do not need security groups, choose this. - - - DefaultSharedNetscalerEIPandELBNetworkOffering - If you have installed a Citrix NetScaler appliance as part of your - zone network, and you will be using its Elastic IP and Elastic Load Balancing - features, choose this. With the EIP and ELB features, a basic zone with - security groups enabled can offer 1:1 static NAT and load - balancing. - - - - - - - Network Domain. (Optional) If you want to assign a - special domain name to the guest VM network, specify the DNS suffix. - - - Public. A public zone is available to all users. A - zone that is not public will be assigned to a particular domain. Only users in that - domain will be allowed to create guest VMs in this zone. - - - - - Choose which traffic types will be carried by the physical network. - The traffic types are management, public, guest, and storage traffic. For more - information about the types, roll over the icons to display their tool tips, or see Basic - Zone Network Traffic Types. This screen starts out with some traffic types already assigned. - To add more, drag and drop traffic types onto the network. You can also change the network - name if desired. - - - Assign a network traffic label to each traffic type on the physical network. These - labels must match the labels you have already defined on the hypervisor host. To assign each - label, click the Edit button under the traffic type icon. A popup dialog appears where you - can type the label, then click OK. - These traffic labels will be defined only for the hypervisor selected for the first - cluster. For all other hypervisors, the labels can be configured after the zone is - created. - - - Click Next. - - - (NetScaler only) If you chose the network offering for NetScaler, you have an additional - screen to fill out. Provide the requested details to set up the NetScaler, then click - Next. - - - IP address. The NSIP (NetScaler IP) address of the - NetScaler device. - - - Username/Password. The authentication credentials - to access the device. &PRODUCT; uses these credentials to access the device. - - - Type. NetScaler device type that is being added. It - could be NetScaler VPX, NetScaler MPX, or NetScaler SDX. For a comparison of the types, - see About Using a NetScaler Load Balancer. - - - Public interface. Interface of NetScaler that is - configured to be part of the public network. - - - Private interface. Interface of NetScaler that is - configured to be part of the private network. - - - Number of retries. Number of times to attempt a - command on the device before considering the operation failed. Default is 2. - - - Capacity. Number of guest networks/accounts that - will share this NetScaler device. - - - Dedicated. When marked as dedicated, this device - will be dedicated to a single account. When Dedicated is checked, the value in the - Capacity field has no significance – implicitly, its value is 1. - - - - - (NetScaler only) Configure the IP range for public traffic. The IPs in this range will - be used for the static NAT capability which you enabled by selecting the network offering - for NetScaler with EIP and ELB. Enter the following details, then click Add. If desired, you - can repeat this step to add more IP ranges. When done, click Next. - - - Gateway. The gateway in use for these IP - addresses. - - - Netmask. The netmask associated with this IP - range. - - - VLAN. The VLAN that will be used for public - traffic. - - - Start IP/End IP. A range of IP addresses that are - assumed to be accessible from the Internet and will be allocated for access to guest - VMs. - - - - - In a new zone, &PRODUCT; adds the first pod for you. You can always add more pods later. - For an overview of what a pod is, see . - To configure the first pod, enter the following, then click Next: - - - Pod Name. A name for the pod. - - - Reserved system gateway. The gateway for the hosts - in that pod. - - - Reserved system netmask. The network prefix that - defines the pod's subnet. Use CIDR notation. - - - Start/End Reserved System IP. The IP range in the - management network that &PRODUCT; uses to manage various system VMs, such as Secondary - Storage VMs, Console Proxy VMs, and DHCP. For more information, see System Reserved IP - Addresses. - - - - - Configure the network for guest traffic. Provide the following, then click Next: - - - Guest gateway. The gateway that the guests should - use. - - - Guest netmask. The netmask in use on the subnet the - guests will use. - - - Guest start IP/End IP. Enter the first and last IP - addresses that define a range that &PRODUCT; can assign to guests. - - - We strongly recommend the use of multiple NICs. If multiple NICs are used, they - may be in a different subnet. - - - If one NIC is used, these IPs should be in the same CIDR as the pod CIDR. - - - - - - - In a new pod, &PRODUCT; adds the first cluster for you. You can always add more clusters - later. For an overview of what a cluster is, see About Clusters. - To configure the first cluster, enter the following, then click Next: - - - Hypervisor. (Version 3.0.0 only; in 3.0.1, this - field is read only) Choose the type of hypervisor software that all hosts in this - cluster will run. If you choose VMware, additional fields appear so you can give - information about a vSphere cluster. For vSphere servers, we recommend creating the - cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add - Cluster: vSphere. - - - Cluster name. Enter a name for the cluster. This - can be text of your choosing and is not used by &PRODUCT;. - - - - - In a new cluster, &PRODUCT; adds the first host for you. You can always add more hosts - later. For an overview of what a host is, see About Hosts. - - When you add a hypervisor host to &PRODUCT;, the host must not have any VMs already - running. - - Before you can configure the host, you need to install the hypervisor software on the - host. You will need to know which version of the hypervisor software version is supported by - &PRODUCT; and what additional configuration is required to ensure the host will work with - &PRODUCT;. To find these installation details, see: - - - Citrix XenServer Installation and Configuration - - - VMware vSphere Installation and Configuration - - - KVM vSphere Installation and Configuration - - - - To configure the first host, enter the following, then click Next: - - - Host Name. The DNS name or IP address of the - host. - - - Username. The username is root. - - - Password. This is the password for the user named - above (from your XenServer or KVM install). - - - Host Tags. (Optional) Any labels that you use to - categorize hosts for ease of maintenance. For example, you can set this to the cloud's - HA tag (set in the ha.tag global configuration parameter) if you want this host to be - used only for VMs with the "high availability" feature enabled. For more information, - see HA-Enabled Virtual Machines as well as HA for Hosts. - - - - - In a new cluster, &PRODUCT; adds the first primary storage server for you. You can - always add more servers later. For an overview of what primary storage is, see About Primary - Storage. - To configure the first primary storage server, enter the following, then click - Next: - - - Name. The name of the storage device. - - - Protocol. For XenServer, choose either NFS, iSCSI, - or PreSetup. For KVM, choose NFS, SharedMountPoint,CLVM, or RBD. For vSphere choose - either VMFS (iSCSI or FiberChannel) or NFS. The remaining fields in the screen vary - depending on what you choose here. - - - - -
diff --git a/docs/en-US/basic-zone-guest-ip-addresses.xml b/docs/en-US/basic-zone-guest-ip-addresses.xml deleted file mode 100644 index 5143f71f17e..00000000000 --- a/docs/en-US/basic-zone-guest-ip-addresses.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Basic Zone Guest IP Addresses - When basic networking is used, &PRODUCT; will assign IP addresses in the CIDR of the pod to the guests in that pod. The administrator must add a Direct IP range on the pod for this purpose. These IPs are in the same VLAN as the hosts. -
diff --git a/docs/en-US/basic-zone-network-traffic-types.xml b/docs/en-US/basic-zone-network-traffic-types.xml deleted file mode 100644 index 850373658b4..00000000000 --- a/docs/en-US/basic-zone-network-traffic-types.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Basic Zone Network Traffic Types - When basic networking is used, there can be only one physical network in the zone. That physical network carries the following traffic types: - - Guest. When end users run VMs, they generate guest traffic. The guest VMs communicate with each other over a network that can be referred to as the guest network. Each pod in a basic zone is a broadcast domain, and therefore each pod has a different IP range for the guest network. The administrator must configure the IP range for each pod. - Management. When &PRODUCT;'s internal resources communicate with each other, they generate management traffic. This includes communication between hosts, system VMs (VMs used by &PRODUCT; to perform various tasks in the cloud), and any other component that communicates directly with the &PRODUCT; Management Server. You must configure the IP range for the system VMs to use. - We strongly recommend the use of separate NICs for management traffic and guest traffic. - Public. Public traffic is generated when VMs in the cloud access the Internet. Publicly accessible IPs must be allocated for this purpose. End users can use the &PRODUCT; UI to acquire these IPs to implement NAT between their guest network and the public network, as described in Acquiring a New IP Address. - Storage. While labeled "storage" this is specifically about secondary storage, and doesn't affect traffic for primary storage. This includes traffic such as VM templates and snapshots, which is sent between the secondary storage VM and secondary storage servers. &PRODUCT; uses a separate Network Interface Controller (NIC) named storage NIC for storage network traffic. Use of a storage NIC that always operates on a high bandwidth network allows fast template and snapshot copying. You must configure the IP range to use for the storage network. - - In a basic network, configuring the physical network is fairly straightforward. In most cases, you only need to configure one guest network to carry traffic that is generated by guest VMs. If you use a NetScaler load balancer and enable its elastic IP and elastic load balancing (EIP and ELB) features, you must also configure a network to carry public traffic. &PRODUCT; takes care of presenting the necessary network configuration steps to you in the UI when you add a new zone. -
diff --git a/docs/en-US/basic-zone-physical-network-configuration.xml b/docs/en-US/basic-zone-physical-network-configuration.xml deleted file mode 100644 index 4b1d24f2657..00000000000 --- a/docs/en-US/basic-zone-physical-network-configuration.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Basic Zone Physical Network Configuration - In a basic network, configuring the physical network is fairly straightforward. You only need to configure one guest network to carry traffic that is generated by guest VMs. When you first add a zone to &PRODUCT;, you set up the guest network through the Add Zone screens. - -
diff --git a/docs/en-US/best-practices-for-vms.xml b/docs/en-US/best-practices-for-vms.xml deleted file mode 100644 index 164932ac79a..00000000000 --- a/docs/en-US/best-practices-for-vms.xml +++ /dev/null @@ -1,67 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Best Practices for Virtual Machines - For VMs to work as expected and provide excellent service, follow these guidelines. -
- Monitor VMs for Max Capacity - The &PRODUCT; administrator should monitor the total number of VM instances in each - cluster, and disable allocation to the cluster if the total is approaching the maximum that - the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of - one or more hosts failing, which would increase the VM load on the other hosts as the VMs - are automatically redeployed. Consult the documentation for your chosen hypervisor to find - the maximum permitted number of VMs per host, then use &PRODUCT; global configuration - settings to set this as the default limit. Monitor the VM activity in each cluster at all - times. Keep the total number of VMs below a safe level that allows for the occasional host - failure. For example, if there are N hosts in the cluster, and you want to allow for one - host in the cluster to be down at any given time, the total number of VM instances you can - permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this - number of VMs, use the &PRODUCT; UI to disable allocation of more VMs to the - cluster. -
-
- Install Required Tools and Drivers - Be sure the following are installed on each VM: - - For XenServer, install PV drivers and Xen tools on each VM. - This will enable live migration and clean guest shutdown. - Xen tools are required in order for dynamic CPU and RAM scaling to work. - For vSphere, install VMware Tools on each VM. - This will enable console view to work properly. - VMware Tools are required in order for dynamic CPU and RAM scaling to work. - - To be sure that Xen tools or VMware Tools is installed, use one of the following techniques: - - Create each VM from a template that already has the tools installed; or, - When registering a new template, the administrator or user can indicate whether tools are - installed on the template. This can be done through the UI - or using the updateTemplate API; or, - If a user deploys a virtual machine with a template that does not have - Xen tools or VMware Tools, and later installs the tools on the VM, - then the user can inform &PRODUCT; using the updateVirtualMachine API. - After installing the tools and updating the virtual machine, stop - and start the VM. - -
-
diff --git a/docs/en-US/best-practices-primary-storage.xml b/docs/en-US/best-practices-primary-storage.xml deleted file mode 100644 index 279b95c0de1..00000000000 --- a/docs/en-US/best-practices-primary-storage.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Best Practices for Primary Storage - - The speed of primary storage will impact guest performance. If possible, choose smaller, higher RPM drives or SSDs for primary storage. - There are two ways CloudStack can leverage primary storage: - Static: This is CloudStack's traditional way of handling storage. In this model, a preallocated amount of storage (ex. a volume from a SAN) is given to CloudStack. CloudStack then permits many of its volumes to be created on this storage (can be root and/or data disks). If using this technique, ensure that nothing is stored on the storage. Adding the storage to &PRODUCT; will destroy any existing data. - Dynamic: This is a newer way for CloudStack to manage storage. In this model, a storage system (rather than a preallocated amount of storage) is given to CloudStack. CloudStack, working in concert with a storage plug-in, dynamically creates volumes on the storage system and each volume on the storage system maps to a single CloudStack volume. This is highly useful for features such as storage Quality of Service. Currently this feature is supported for data disks (Disk Offerings). - -
diff --git a/docs/en-US/best-practices-secondary-storage.xml b/docs/en-US/best-practices-secondary-storage.xml deleted file mode 100644 index 3d535c326e9..00000000000 --- a/docs/en-US/best-practices-secondary-storage.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Best Practices for Secondary Storage - - Each Zone can have one or more secondary storage servers. Multiple secondary storage servers provide increased scalability to the system. - Secondary storage has a high read:write ratio and is expected to consist of larger drives with lower IOPS than primary storage. - Ensure that nothing is stored on the server. Adding the server to &PRODUCT; will destroy any existing data. - -
diff --git a/docs/en-US/best-practices-templates.xml b/docs/en-US/best-practices-templates.xml deleted file mode 100644 index 4e2992c021d..00000000000 --- a/docs/en-US/best-practices-templates.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Best Practices for Templates - If you plan to use large templates (100 GB or larger), be sure you have a 10-gigabit network to support the large templates. A slower network can lead to timeouts and other errors when large templates are used. -
diff --git a/docs/en-US/best-practices-virtual-router.xml b/docs/en-US/best-practices-virtual-router.xml deleted file mode 100644 index 060d8680992..00000000000 --- a/docs/en-US/best-practices-virtual-router.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Best Practices for Virtual Routers - - WARNING: Restarting a virtual router from a hypervisor console deletes all the iptables rules. To work around this issue, stop the virtual router and start it from the &PRODUCT; UI. - WARNING: Do not use the destroyRouter API when only one router is available in the network, because restartNetwork API with the cleanup=false parameter can't recreate it later. If you want to destroy and recreate the single router available in the network, use the restartNetwork API with the cleanup=true parameter. - - - - -
diff --git a/docs/en-US/best-practices.xml b/docs/en-US/best-practices.xml deleted file mode 100644 index 41d7cde9036..00000000000 --- a/docs/en-US/best-practices.xml +++ /dev/null @@ -1,82 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Best Practices - Deploying a cloud is challenging. There are many different technology choices to make, and &PRODUCT; is flexible enough in its configuration that there are many possible ways to combine and configure the chosen technology. This section contains suggestions and requirements about cloud deployments. - These should be treated as suggestions and not absolutes. However, we do encourage anyone planning to build a cloud outside of these guidelines to seek guidance and advice on the project mailing lists. -
- Process Best Practices - - - A staging system that models the production environment is strongly advised. It is critical if customizations have been applied to &PRODUCT;. - - - Allow adequate time for installation, a beta, and learning the system. Installs with basic networking can be done in hours. Installs with advanced networking usually take several days for the first attempt, with complicated installations taking longer. For a full production system, allow at least 4-8 weeks for a beta to work through all of the integration issues. You can get help from fellow users on the cloudstack-users mailing list. - - -
-
- Setup Best Practices - - - Each host should be configured to accept connections only from well-known entities such as the &PRODUCT; Management Server or your network monitoring software. - - - Use multiple clusters per pod if you need to achieve a certain switch density. - - - Primary storage mountpoints or LUNs should not exceed 6 TB in size. It is better to have multiple smaller primary storage elements per cluster than one large one. - - - When exporting shares on primary storage, avoid data loss by restricting the range of IP addresses that can access the storage. See "Linux NFS on Local Disks and DAS" or "Linux NFS on iSCSI". - - - NIC bonding is straightforward to implement and provides increased reliability. - - - 10G networks are generally recommended for storage access when larger servers that can support relatively more VMs are used. - - - Host capacity should generally be modeled in terms of RAM for the guests. Storage and CPU may be overprovisioned. RAM may not. RAM is usually the limiting factor in capacity designs. - - - (XenServer) Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see http://support.citrix.com/article/CTX126531. The article refers to XenServer 5.6, but the same information applies to XenServer 6.0. - - -
-
- Maintenance Best Practices - - - Monitor host disk space. Many host failures occur because the host's root disk fills up from logs that were not rotated adequately. - - - Monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster and keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation to the cluster. - - - The lack of up-do-date hotfixes can lead to data corruption and lost VMs. - Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. -
-
diff --git a/docs/en-US/build-deb.xml b/docs/en-US/build-deb.xml deleted file mode 100644 index dca31d23a28..00000000000 --- a/docs/en-US/build-deb.xml +++ /dev/null @@ -1,123 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building DEB packages - - In addition to the bootstrap dependencies, you'll also need to install - several other dependencies. Note that we recommend using Maven 3, which - is not currently available in 12.04.1 LTS. So, you'll also need to add a - PPA repository that includes Maven 3. After running the command - add-apt-repository, you will be prompted to continue and - a GPG key will be added. - - -$ sudo apt-get update -$ sudo apt-get install python-software-properties -$ sudo add-apt-repository ppa:natecarlson/maven3 -$ sudo apt-get update -$ sudo apt-get install ant debhelper openjdk-6-jdk tomcat6 libws-commons-util-java genisoimage python-mysqldb libcommons-codec-java libcommons-httpclient-java liblog4j1.2-java maven3 - - - While we have defined, and you have presumably already installed the - bootstrap prerequisites, there are a number of build time prerequisites - that need to be resolved. &PRODUCT; uses maven for dependency resolution. - You can resolve the buildtime depdencies for CloudStack by running: - -$ mvn3 -P deps - - Now that we have resolved the dependencies we can move on to building &PRODUCT; - and packaging them into DEBs by issuing the following command. - - -$ dpkg-buildpackage -uc -us - - - - This command will build 16 Debian packages. You should have all of the following: - - -cloud-agent_4.0.0-incubating_amd64.deb -cloud-agent-deps_4.0.0-incubating_amd64.deb -cloud-agent-libs_4.0.0-incubating_amd64.deb -cloud-awsapi_4.0.0-incubating_amd64.deb -cloud-cli_4.0.0-incubating_amd64.deb -cloud-client_4.0.0-incubating_amd64.deb -cloud-client-ui_4.0.0-incubating_amd64.deb -cloud-core_4.0.0-incubating_amd64.deb -cloud-deps_4.0.0-incubating_amd64.deb -cloud-python_4.0.0-incubating_amd64.deb -cloud-scripts_4.0.0-incubating_amd64.deb -cloud-server_4.0.0-incubating_amd64.deb -cloud-setup_4.0.0-incubating_amd64.deb -cloud-system-iso_4.0.0-incubating_amd64.deb -cloud-usage_4.0.0-incubating_amd64.deb -cloud-utils_4.0.0-incubating_amd64.deb - - -
- Setting up an APT repo - - After you've created the packages, you'll want to copy them to a system where you can serve the packages over HTTP. You'll create a directory for the packages and then use dpkg-scanpackages to create Packages.gz, which holds information about the archive structure. Finally, you'll add the repository to your system(s) so you can install the packages using APT. - - The first step is to make sure that you have the dpkg-dev package installed. This should have been installed when you pulled in the debhelper application previously, but if you're generating Packages.gz on a different system, be sure that it's installed there as well. - -$ sudo apt-get install dpkg-dev - -The next step is to copy the DEBs to the directory where they can be served over HTTP. We'll use /var/www/cloudstack/repo in the examples, but change the directory to whatever works for you. - - -sudo mkdir -p /var/www/cloudstack/repo/binary -sudo cp *.deb /var/www/cloudstack/repo/binary -sudo cd /var/www/cloudstack/repo/binary -sudo dpkg-scanpackages . /dev/null | tee Packages | gzip -9 > Packages.gz - - -Note: Override Files - You can safely ignore the warning about a missing override file. - - -Now you should have all of the DEB packages and Packages.gz in the binary directory and available over HTTP. (You may want to use wget or curl to test this before moving on to the next step.) - -
-
- Configuring your machines to use the APT repository - - Now that we have created the repository, you need to configure your machine - to make use of the APT repository. You can do this by adding a repository file - under /etc/apt/sources.list.d. Use your preferred editor to - create /etc/apt/sources.list.d/cloudstack.list with this - line: - - deb http://server.url/cloudstack/repo binary ./ - - Now that you have the repository info in place, you'll want to run another - update so that APT knows where to find the &PRODUCT; packages. - -$ sudo apt-get update - -You can now move on to the instructions under Install on Ubuntu. - -
-
diff --git a/docs/en-US/build-nonoss.xml b/docs/en-US/build-nonoss.xml deleted file mode 100644 index dbcab99e9bb..00000000000 --- a/docs/en-US/build-nonoss.xml +++ /dev/null @@ -1,49 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building Non-OSS - If you need support for the VMware, NetApp, F5, NetScaler, SRX, or any other non-Open Source Software (nonoss) plugins, you'll need to download a few components on your own and follow a slightly different procedure to build from source. - Why Non-OSS? - Some of the plugins supported by &PRODUCT; cannot be distributed with &PRODUCT; for licensing reasons. In some cases, some of the required libraries/JARs are under a proprietary license. In other cases, the required libraries may be under a license that's not compatible with Apache's licensing guidelines for third-party products. - - - - To build the Non-OSS plugins, you'll need to have the requisite JARs installed under the deps directory. - Because these modules require dependencies that can't be distributed with &PRODUCT; you'll need to download them yourself. Links to the most recent dependencies are listed on the How to build CloudStack page on the wiki. - - You may also need to download vhd-util when using XenServer hypervisors, which was removed due to licensing issues. You'll copy vhd-util to the scripts/vm/hypervisor/xenserver/ directory. - - - Once you have all the dependencies copied over, you'll be able to build &PRODUCT; with the nonoss option: - - $ mvn clean - $ mvn install -Dnonoss - - - - Once you've built &PRODUCT; with the nonoss profile, you can package it using the or instructions. - - -
diff --git a/docs/en-US/build-rpm.xml b/docs/en-US/build-rpm.xml deleted file mode 100644 index c15074293a6..00000000000 --- a/docs/en-US/build-rpm.xml +++ /dev/null @@ -1,96 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building RPMs from Source - As mentioned previously in , you will need to install several prerequisites before you can build packages for &PRODUCT;. Here we'll assume you're working with a 64-bit build of CentOS or Red Hat Enterprise Linux. - # yum groupinstall "Development Tools" - # yum install java-1.6.0-openjdk-devel.x86_64 genisoimage mysql mysql-server ws-commons-util MySQL-python tomcat6 createrepo - Next, you'll need to install build-time dependencies for CloudStack with - Maven. We're using Maven 3, so you'll want to - grab a Maven 3 tarball - and uncompress it in your home directory (or whatever location you prefer): - $ tar zxvf apache-maven-3.0.4-bin.tar.gz - $ export PATH=/usr/local/apache-maven-3.0.4//bin:$PATH - Maven also needs to know where Java is, and expects the JAVA_HOME environment - variable to be set: - $ export JAVA_HOME=/usr/lib/jvm/jre-1.6.0-openjdk.x86_64/ - Verify that Maven is installed correctly: - $ mvn --version - You probably want to ensure that your environment variables will survive a logout/reboot. - Be sure to update ~/.bashrc with the PATH and JAVA_HOME variables. - - Building RPMs for &PRODUCT; is fairly simple. Assuming you already have the source downloaded and have uncompressed the tarball into a local directory, you're going to be able to generate packages in just a few minutes. - Packaging has Changed - If you've created packages for &PRODUCT; previously, you should be aware that the process has changed considerably since the project has moved to using Apache Maven. Please be sure to follow the steps in this section closely. - -
- Generating RPMS - Now that we have the prerequisites and source, you will cd to the packaging/centos63/ directory. - $ cd packaging/centos63 - Generating RPMs is done using the package.sh script: - $./package.sh - - That will run for a bit and then place the finished packages in dist/rpmbuild/RPMS/x86_64/. - You should see seven RPMs in that directory: - - cloudstack-agent-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-awsapi-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-cli-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-common-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-docs-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-management-4.1.0-SNAPSHOT.el6.x86_64.rpm - cloudstack-usage-4.1.0-SNAPSHOT.el6.x86_64.rpm - -
- Creating a yum repo - - While RPMs is a useful packaging format - it's most easily consumed from Yum repositories over a network. The next step is to create a Yum Repo with the finished packages: - $ mkdir -p ~/tmp/repo - $ cp dist/rpmbuild/RPMS/x86_64/*rpm ~/tmp/repo/ - $ createrepo ~/tmp/repo - - - The files and directories within ~/tmp/repo can now be uploaded to a web server and serve as a yum repository. - -
-
- Configuring your systems to use your new yum repository - - Now that your yum repository is populated with RPMs and metadata - we need to configure the machines that need to install &PRODUCT;. - Create a file named /etc/yum.repos.d/cloudstack.repo with this information: - - [apache-cloudstack] - name=Apache CloudStack - baseurl=http://webserver.tld/path/to/repo - enabled=1 - gpgcheck=0 - - - Completing this step will allow you to easily install &PRODUCT; on a number of machines across the network. - -
-
-
diff --git a/docs/en-US/building-devcloud.xml b/docs/en-US/building-devcloud.xml deleted file mode 100644 index f3c4d19a5d9..00000000000 --- a/docs/en-US/building-devcloud.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building DevCloud - The DevCloud appliance can be downloaded from the wiki at . It can also be built from scratch. Code is being developed to provide this alternative build. It is based on veewee, Vagrant and Puppet. - The goal is to automate the DevCloud build and make this automation capability available to all within the source release of &PRODUCT; - This is under heavy development. The code is located in the source tree under tools/devcloud - A preliminary wiki page describes the build at https://cwiki.apache.org/confluence/display/CLOUDSTACK/Building+DevCloud - -
diff --git a/docs/en-US/building-documentation.xml b/docs/en-US/building-documentation.xml deleted file mode 100644 index 8ee63b06ec0..00000000000 --- a/docs/en-US/building-documentation.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building &PRODUCT; Documentation - To build a specific guide, go to the source tree of the documentation in /docs and identify the guide you want to build. - Currently there are four guides plus the release notes, all defined in publican configuration files: - - publican-adminguide.cfg - publican-devguide.cfg - publican-installation.cfg - publican-plugin-niciranvp.cfg - publican-release-notes.cfg - - To build the Developer guide for example, do the following: - publican build --config=publican-devguide.cfg --formats=pdf --langs=en-US - A pdf file will be created in tmp/en-US/pdf, you may choose to build the guide in a different format like html. In that case just replace the format value. - -
diff --git a/docs/en-US/building-marvin.xml b/docs/en-US/building-marvin.xml deleted file mode 100644 index e33c4cb2248..00000000000 --- a/docs/en-US/building-marvin.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building and Installing Marvin - Marvin is built with Maven and is dependent on APIdoc. To build it do the following in the root tree of &PRODUCT;: - mvn -P developer -pl :cloud-apidoc - mvn -P developer -pl :cloud-marvin - If successful the build will have created the cloudstackAPI Python package under tools/marvin/marvin/cloudstackAPI as well as a gziped Marvin package under tools/marvin dist. To install the Python Marvin module do the following in tools/marvin: - sudo python ./setup.py install - The dependencies will be downloaded the Python module installed and you should be able to use Marvin in Python. Check that you can import the module before starting to use it. - $ python -Python 2.7.3 (default, Nov 17 2012, 19:54:34) -[GCC 4.2.1 Compatible Apple Clang 4.1 ((tags/Apple/clang-421.11.66))] on darwin -Type "help", "copyright", "credits" or "license" for more information. ->>> import marvin ->>> from marvin.cloudstackAPI import * ->>> - - You could also install it using pip or easy_install using the local distribution package in tools/marvin/dist : - pip install tools/marvin/dist/Marvin-0.1.0.tar.gz - Or: - easy_install tools/marvin/dist/Marvin-0.1.0.tar.gz - -
diff --git a/docs/en-US/building-prerequisites.xml b/docs/en-US/building-prerequisites.xml deleted file mode 100644 index d97ca40f2a3..00000000000 --- a/docs/en-US/building-prerequisites.xml +++ /dev/null @@ -1,66 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - -
- Build Procedure Prerequisites - In this section we will assume that you are using the Ubuntu Linux distribution with the Advanced Packaging Tool (APT). If you are using a different distribution or OS and a different packaging tool, adapt the following instructions to your environment. To build &PRODUCT; you will need: - - - git, http://git-scm.com - sudo apt-get install git-core - - - maven, http://maven.apache.org - sudo apt-get install maven - Make sure that you installed maven 3 - $ mvn --version -Apache Maven 3.0.4 -Maven home: /usr/share/maven -Java version: 1.6.0_24, vendor: Sun Microsystems Inc. -Java home: /usr/lib/jvm/java-6-openjdk-amd64/jre -Default locale: en_US, platform encoding: UTF-8 -OS name: "linux", version: "3.2.0-33-generic", arch: "amd64", family: "unix" - - - java - set the JAVA_HOME environment variable - $ export JAVA_HOME=/usr/lib/jvm/java-6-openjdk - - - - In addition, to deploy and run &PRODUCT; in a development environment you will need: - - - Mysql - sudo apt-get install mysql-server-5.5 - Start the mysqld service and create a cloud user with cloud as a password - - - Tomcat 6 - sudo apt-get install tomcat6 - - - -
diff --git a/docs/en-US/building-translation.xml b/docs/en-US/building-translation.xml deleted file mode 100644 index dd66365cd9d..00000000000 --- a/docs/en-US/building-translation.xml +++ /dev/null @@ -1,75 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Translating &PRODUCT; Documentation - Now that you know how to build the documentation with Publican, let's move on to building it in different languages. Publican helps us - build the documentation in various languages by using Portable Object Template (POT) files and Portable Objects (PO) files for each language. - - The POT files are generated by parsing all the DocBook files in the language of origin, en-US for us, and creating a long list of strings - for each file that needs to be translated. The translation can be done by hand directly in the PO files of each target language or via the - transifex service. - - - Transifex is a free service to help translate documents and organize distributed teams - of translators. Anyone interested in helping with the translation should get an account on Transifex - - - Three &PRODUCT; projects exist on Transifex. It is recommended to tour those projects to become familiar with Transifex: - - https://www.transifex.com/projects/p/ACS_DOCS/ - https://www.transifex.com/projects/p/ACS_Runbook/ - https://www.transifex.com/projects/p/CloudStackUI/ - - - - - The pot directory should already exist in the source tree. If you want to build an up to date translation, you might have to update it to include any pot file that was not previously generated. - To register new resources on transifex, you will need to be an admin of the transifex &PRODUCT; site. Send an email to the developer list if you want access. - - First we need to generate the .pot files for all the DocBook xml files needed for a particular guide. This is well explained at the publican website in a section on - how to prepare a document for translation. - The basic command to execute to build the pot files for the developer guide is: - publican update_pot --config=publican-devguide.cfg - This will create a pot directory with pot files in it, one for each corresponding xml files needed to build the guide. Once generated, all pots files need to be configured for translation using transifex this is best done by using the transifex client that you can install with the following command (For RHEL and its derivatives): - yum install transifex-client - The transifex client is also available via PyPi and you can install it like this: - easy_install transifex-client - Once you have installed the transifex client you can run the settx.sh script in the docs directory. This will create the .tx/config file used by transifex to push and pull all translation strings. - All the resource files need to be uploaded to transifex, this is done with the transifex client like so: - tx push -s - Once the translators have completed translation of the documentation, the translated strings can be pulled from transifex like so: - tx pull -a - If you wish to push specific resource files or pull specific languages translation strings, you can do so with the transifex client. A complete documentation of - the client is available on the client website - When you pull new translation strings a directory will be created corresponding to the language of the translation. This directory will contain PO files that will be used by Publican to create the documentation in that specific language. For example assuming that you pull the French translation whose language code is fr-FR, you will build the documentation with publican: - publican build --config=publican-devguide.cfg --formats=html --langs=fr-FR - - - Some languages like Chinese or Japanese will not render well in pdf format and html should be used. - - - - -
diff --git a/docs/en-US/building-with-maven-deploy.xml b/docs/en-US/building-with-maven-deploy.xml deleted file mode 100644 index e4b9801aa30..00000000000 --- a/docs/en-US/building-with-maven-deploy.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Deployment and Testing Steps - Deploying the &PRODUCT; code that you compiled is a two step process: - - If you have not configured the database or modified its properties do: - mvn -P developer -pl developer -Ddeploydb - - Then you need to run the &PRODUCT; management server. To attach a debugger to it, do: - export MAVEN_OPTS="-Xmx1024 -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n" - mvn -pl :cloud-client-ui jetty:run - - - When dealing with the database, remember that you may wipe it entirely and lose any data center configuration that you may have set previously. -
- diff --git a/docs/en-US/building-with-maven-steps.xml b/docs/en-US/building-with-maven-steps.xml deleted file mode 100644 index 1c15bfa96e1..00000000000 --- a/docs/en-US/building-with-maven-steps.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building Steps - &PRODUCT; uses git for source version control, first make sure you have the source code by pulling it: - git clone https://git-wip-us.apache.org/repos/asf/cloudstack.git - Several Project Object Models (POM) are defined to deal with the various build targets of &PRODUCT;. Certain features require some packages that are not compatible with the Apache license and therefore need to be downloaded on your own. Check the wiki for additional information https://cwiki.apache.org/CLOUDSTACK/building-with-maven.html. In order to build all the open source targets of &PRODUCT; do: - mvn clean install - The resulting jar files will be in the target directory of the subdirectory of the compiled module. -
- diff --git a/docs/en-US/building-with-maven.xml b/docs/en-US/building-with-maven.xml deleted file mode 100644 index 5363b1d754a..00000000000 --- a/docs/en-US/building-with-maven.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Using Maven to Build &PRODUCT; - - - - - - diff --git a/docs/en-US/castor-with-cs.xml b/docs/en-US/castor-with-cs.xml deleted file mode 100644 index 7bf676b9c62..00000000000 --- a/docs/en-US/castor-with-cs.xml +++ /dev/null @@ -1,86 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Using the CAStor Back-end Storage with &PRODUCT; - This section describes how to use a CAStor cluster as the back-end storage system for a - &PRODUCT; S3 front-end. The CAStor back-end storage for &PRODUCT; extends the existing storage - classes and allows the storage configuration attribute to point to a CAStor cluster. - This feature makes use of the &PRODUCT; server's local disk to spool files before writing - them to CAStor when handling the PUT operations. However, a file must be successfully written - into the CAStor cluster prior to the return of a success code to the S3 client to ensure that - the transaction outcome is correctly reported. - - The S3 multipart file upload is not supported in this release. You are prompted with - proper error message if a multipart upload is attempted. - - To configure CAStor: - - - Install &PRODUCT; by following the instructions given in the INSTALL.txt file. - - You can use the S3 storage system in &PRODUCT; without setting up and installing the - compute components. - - - - Enable the S3 API by setting "enable.s3.api = true" in the Global parameter section in - the UI and register a user. - For more information, see S3 API in - &PRODUCT;. - - - Edit the cloud-bridge.properties file and modify the "storage.root" parameter. - - - Set "storage.root" to the key word "castor". - - - Specify a CAStor tenant domain to which content is written. If the domain is not - specified, the CAStor default domain, specified by the "cluster" parameter in CAStor's - node.cfg file, will be used. - - - Specify a list of node IP addresses, or set "zeroconf" and the cluster - name. When using a static IP list with a large cluster, it is not necessary to include - every node, only a few is required to initialize the client software. - For example: - storage.root=castor domain=cloudstack 10.1.1.51 10.1.1.52 10.1.1.53 - In this example, the configuration file directs &PRODUCT; to write the S3 files to - CAStor instead of to a file system, where the CAStor domain name is cloudstack, and the - CAStor node IP addresses are those listed. - - - (Optional) The last value is a port number on which to communicate with the CAStor - cluster. If not specified, the default is 80. - #Static IP list with optional port -storage.root=castor domain=cloudstack 10.1.1.51 10.1.1.52 10.1.1.53 80 -#Zeroconf locator for cluster named "castor.example.com" -storage.root=castor domain=cloudstack zeroconf=castor.example.com - - - - - Create the tenant domain within the CAStor storage cluster. If you omit this step before - attempting to store content, you will get HTTP 412 errors in the awsapi.log. - - -
diff --git a/docs/en-US/change-console-proxy-ssl-certificate-domain.xml b/docs/en-US/change-console-proxy-ssl-certificate-domain.xml deleted file mode 100644 index 3fd05018e99..00000000000 --- a/docs/en-US/change-console-proxy-ssl-certificate-domain.xml +++ /dev/null @@ -1,49 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Changing the Console Proxy SSL Certificate and Domain - If the administrator prefers, it is possible for the URL of the customer's console session to show a domain other than realhostip.com. The administrator can customize the displayed domain by selecting a different domain and uploading a new SSL certificate and private key. The domain must run a DNS service that is capable of resolving queries for addresses of the form aaa-bbb-ccc-ddd.your.domain to an IPv4 IP address in the form aaa.bbb.ccc.ddd, for example, 202.8.44.1. To change the console proxy domain, SSL certificate, and private key: - - Set up dynamic name resolution or populate all possible DNS names in your public IP range into your existing DNS server with the format aaa-bbb-ccc-ddd.company.com -> aaa.bbb.ccc.ddd. - Generate the private key and certificate signing request (CSR). When you are using openssl to generate private/public key pairs and CSRs, for the private key that you are going to paste into the &PRODUCT; UI, be sure to convert it into PKCS#8 format. - - Generate a new 2048-bit private keyopenssl genrsa -des3 -out yourprivate.key 2048 - Generate a new certificate CSRopenssl req -new -key yourprivate.key -out yourcertificate.csr - Head to the website of your favorite trusted Certificate Authority, purchase an SSL certificate, and submit the CSR. You should receive a valid certificate in return - Convert your private key format into PKCS#8 encrypted format.openssl pkcs8 -topk8 -in yourprivate.key -out yourprivate.pkcs8.encrypted.key - Convert your PKCS#8 encrypted private key into the PKCS#8 format that is compliant with &PRODUCT;openssl pkcs8 -in yourprivate.pkcs8.encrypted.key -out yourprivate.pkcs8.key - - - In the Update SSL Certificate screen of the &PRODUCT; UI, paste the following - - The Certificate you generated in the previous steps. - The Private key you generated in the previous steps. - The desired new domain name; for example, company.com - - - The desired new domain name; for example, company.comThis stops all currently running console proxy VMs, then restarts them with the new certificate and key. Users might notice a brief interruption in console availability - - The Management Server will generate URLs of the form "aaa-bbb-ccc-ddd.company.com" after this change is made. New console requests will be served with the new DNS domain name, certificate, and key -
diff --git a/docs/en-US/change-database-config.xml b/docs/en-US/change-database-config.xml deleted file mode 100644 index 567b9e41d04..00000000000 --- a/docs/en-US/change-database-config.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Changing the Database Configuration - The &PRODUCT; Management Server stores database configuration information (e.g., hostname, port, credentials) in the file /etc/cloudstack/management/db.properties. To effect a change, edit this file on each Management Server, then restart the Management Server. -
diff --git a/docs/en-US/change-database-password.xml b/docs/en-US/change-database-password.xml deleted file mode 100644 index 863984e269c..00000000000 --- a/docs/en-US/change-database-password.xml +++ /dev/null @@ -1,76 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Changing the Database Password - You may need to change the password for the MySQL account used by CloudStack. If so, you'll need to change the password in MySQL, and then add the encrypted password to /etc/cloudstack/management/db.properties. - - - Before changing the password, you'll need to stop CloudStack's management server and the usage engine if you've deployed that component. - -# service cloudstack-management stop -# service cloudstack-usage stop - - - - Next, you'll update the password for the CloudStack user on the MySQL server. - -# mysql -u root -p - - At the MySQL shell, you'll change the password and flush privileges: - -update mysql.user set password=PASSWORD("newpassword123") where User='cloud'; -flush privileges; -quit; - - - - The next step is to encrypt the password and copy the encrypted password to CloudStack's database configuration (/etc/cloudstack/management/db.properties). - - # java -classpath /usr/share/cloudstack-common/lib/jasypt-1.9.0.jar \ -org.jasypt.intf.cli.JasyptPBEStringEncryptionCLI encrypt.sh \ -input="newpassword123" password="`cat /etc/cloudstack/management/key`" \ -verbose=false - - -File encryption type - Note that this is for the file encryption type. If you're using the web encryption type then you'll use password="management_server_secret_key" - - - - Now, you'll update /etc/cloudstack/management/db.properties with the new ciphertext. Open /etc/cloudstack/management/db.properties in a text editor, and update these parameters: - -db.cloud.password=ENC(encrypted_password_from_above) -db.usage.password=ENC(encrypted_password_from_above) - - - - After copying the new password over, you can now start CloudStack (and the usage engine, if necessary). - - # service cloudstack-management start - # service cloud-usage start - - - -
diff --git a/docs/en-US/change-host-password.xml b/docs/en-US/change-host-password.xml deleted file mode 100644 index 7221fe62417..00000000000 --- a/docs/en-US/change-host-password.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Changing Host Password - The password for a XenServer Node, KVM Node, or vSphere Node may be changed in the database. Note that all Nodes in a Cluster must have the same password. - To change a Node's password: - - Identify all hosts in the cluster. - Change the password on all hosts in the cluster. Now the password for the host and the password known to &PRODUCT; will not match. Operations on the cluster will fail until the two passwords match. - - Get the list of host IDs for the host in the cluster where you are changing the password. You will need to access the database to determine these host IDs. For each hostname "h" (or vSphere cluster) that you are changing the password for, execute: - mysql> select id from cloud.host where name like '%h%'; - This should return a single ID. Record the set of such IDs for these hosts. - Update the passwords for the host in the database. In this example, we change the passwords for hosts with IDs 5, 10, and 12 to "password". - mysql> update cloud.host set password='password' where id=5 or id=10 or id=12; - -
diff --git a/docs/en-US/change-network-offering-on-guest-network.xml b/docs/en-US/change-network-offering-on-guest-network.xml deleted file mode 100644 index de3a80ecddc..00000000000 --- a/docs/en-US/change-network-offering-on-guest-network.xml +++ /dev/null @@ -1,68 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the Network Offering on a Guest Network - A user or administrator can change the network offering that is associated with an existing - guest network. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - If you are changing from a network offering that uses the &PRODUCT; virtual router to - one that uses external devices as network service providers, you must first stop all the VMs - on the network. - - - In the left navigation, choose Network. - - - Click the name of the network you want to modify. - - - In the Details tab, click Edit. - - - - - EditButton.png: button to edit a network - - - - - In Network Offering, choose the new network offering, then click Apply. - A prompt is displayed asking whether you want to keep the existing CIDR. This is to let - you know that if you change the network offering, the CIDR will be affected. - If you upgrade between virtual router as a provider and an external network device as - provider, acknowledge the change of CIDR to continue, so choose Yes. - - - Wait for the update to complete. Don’t try to restart VMs until the network change is - complete. - - - If you stopped any VMs, restart them. - - -
diff --git a/docs/en-US/change-to-behavior-of-list-commands.xml b/docs/en-US/change-to-behavior-of-list-commands.xml deleted file mode 100644 index 69b9e4d2beb..00000000000 --- a/docs/en-US/change-to-behavior-of-list-commands.xml +++ /dev/null @@ -1,108 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Change to Behavior of List Commands - There was a major change in how our List* API commands work in CloudStack 3.0 compared to - 2.2.x. The rules below apply only for managed resources – those that belong to an account, - domain, or project. They are irrelevant for the List* commands displaying unmanaged (system) - resources, such as hosts, clusters, and external network resources. - When no parameters are passed in to the call, the caller sees only resources owned by the - caller (even when the caller is the administrator). Previously, the administrator saw everyone - else's resources by default. - When accountName and domainId are passed in: - - - The caller sees the resources dedicated to the account specified. - - - If the call is executed by a regular user, the user is authorized to specify only the - user's own account and domainId. - - - If the caller is a domain administrator, CloudStack performs an authorization check to - see whether the caller is permitted to view resources for the given account and - domainId. - - - When projectId is passed in, only resources belonging to that project are listed. - When domainId is passed in, the call returns only resources belonging to the domain - specified. To see the resources of subdomains, use the parameter isRecursive=true. Again, the - regular user can see only resources owned by that user, the root administrator can list - anything, and a domain administrator is authorized to see only resources of the administrator's - own domain and subdomains. - To see all resources the caller is authorized to see, except for Project resources, use the - parameter listAll=true. - To see all Project resources the caller is authorized to see, use the parameter - projectId=-1. - There is one API command that doesn't fall under the rules above completely: the - listTemplates command. This command has its own flags defining the list rules: - - - - - - - listTemplates Flag - Description - - - - - featured - Returns templates that have been marked as featured and - public. - - - self - Returns templates that have been registered or created by the calling - user. - - - selfexecutable - Same as self, but only returns templates that are ready to be deployed - with. - - - sharedexecutable - Ready templates that have been granted to the calling user by another - user. - - - executable - Templates that are owned by the calling user, or public templates, that can - be used to deploy a new VM. - - - community - Returns templates that have been marked as public but not - featured. - - - all - Returns all templates (only usable by admins). - - - - - The &PRODUCT; UI on a general view will display all resources that the logged-in user is - authorized to see, except for project resources. To see the project resources, select the - project view. -
diff --git a/docs/en-US/changed-API-commands-4.2.xml b/docs/en-US/changed-API-commands-4.2.xml deleted file mode 100644 index 8fda9cc13bd..00000000000 --- a/docs/en-US/changed-API-commands-4.2.xml +++ /dev/null @@ -1,1129 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Changed API Commands in 4.2 - - - - - - - API Commands - Description - - - - - listNetworkACLs - The following new request parameters are added: aclid (optional), action - (optional), protocol (optional) - The following new response parameters are added: aclid, action, - number - - - copyTemplate - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - listRouters - - The following new response parameters are added: ip6dns1, ip6dns2, - role - - - updateConfiguration - The following new request parameters are added: accountid (optional), - clusterid (optional), storageid (optional), zoneid (optional) - The following new response parameters are added: id, scope - - - listVolumes - The following request parameter is removed: details - The following new response parameter is added: displayvolume - - - suspendProject - - The following new response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - listRemoteAccessVpns - - The following new response parameters are added: id - - - registerTemplate - The following new request parameters are added: imagestoreuuid (optional), - isdynamicallyscalable (optional), isrouting (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - addTrafficMonitor - - The following response parameters are removed: privateinterface, privatezone, - publicinterface, publiczone, usageinterface, username - - - createTemplate - The following response parameters are removed: clusterid, clustername, - disksizeallocated, disksizetotal, disksizeused, ipaddress, path, podid, podname, - state, tags, type - The following new response parameters are added: account, accountid, bootable, - checksum, crossZones, details, displaytext, domain, domainid, format, hostid, - hostname, hypervisor, isdynamicallyscalable, isextractable, isfeatured, ispublic, - isready, ostypeid, ostypename, passwordenabled, project, projectid, removed, size, - sourcetemplateid, sshkeyenabled, status, templatetag, templatetype, - tags - - - listLoadBalancerRuleInstances - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - migrateVolume - The following new request parameters is added: livemigrate (optional) - The following new response parameters is added: displayvolume - - - createAccount - The following new request parameters are added: accountid (optional), userid - (optional) - The following new response parameters are added: accountdetails, cpuavailable, - cpulimit, cputotal, defaultzoneid, ipavailable, iplimit, iptotal, iscleanuprequired, - isdefault, memoryavailable, memorylimit, memorytotal, name, networkavailable, - networkdomain, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, projectavailable, projectlimit, - projecttotal, receivedbytes, secondarystorageavailable, secondarystoragelimit, - secondarystoragetotal, sentbytes, snapshotavailable, snapshotlimit, snapshottotal, - templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, vmrunning, - vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, vpcavailable, vpclimit, - vpctotal, user - The following parameters are removed: account, accountid, apikey, created, email, - firstname, lastname, secretkey, timezone, username - - - updatePhysicalNetwork - The following new request parameters is added: removevlan (optional) - - - - listTrafficMonitors - - The following response parameters are removed: privateinterface, privatezone, - publicinterface, publiczone, usageinterface, username - - - attachIso - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - listProjects - The following new request parameters are added: cpuavailable, cpulimit, - cputotal, ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - enableAccount - - The following new response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - listPublicIpAddresses - - The following new response parameters are added: isportable, vmipaddress - - - - enableStorageMaintenance - - The following new response parameters are added: hypervisor, scope, - suitableformigration - - - listLoadBalancerRules - The following new request parameters is added: networkid (optional) - The following new response parameters is added: networkid - - - stopRouter - - The following new response parameters are added: ip6dns1, ip6dns2, role - - - - listClusters - - The following new response parameters are added: cpuovercommitratio, - memoryovercommitratio - - - attachVolume - - The following new response parameter is added: displayvolume - - - updateVPCOffering - The following request parameters is made mandatory: id - - - resetSSHKeyForVirtualMachine - The following new request parameter is added: keypair (required) - The following parameter is removed: name - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - updateCluster - The following new request parameters are removed: cpuovercommitratio, - memoryovercommitratio - The following new response parameters are removed: cpuovercommitratio, - memoryovercommitratio - - - listPrivateGateways - The following new response parameters are added: aclid, sourcenatsupported - - - - ldapConfig - The following new request parameters are added: listall (optional) - The following parameters has been made optional: searchbase, hostname, - queryfilter - The following new response parameter is added: ssl - - - listTemplates - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - listNetworks - - The following new response parameters are added: aclid, displaynetwork, ip6cidr, - ip6gateway, ispersistent, networkcidr, reservediprange - - - restartNetwork - - The following new response parameters are added: isportable, vmipaddress - - - - prepareTemplate - - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - rebootVirtualMachine - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - changeServiceForRouter - The following new request parameters are added: aclid (optional), action - (optional), protocol (optional) - The following new response parameters are added: id, scope - - - updateZone - The following new request parameters are added: ip6dns1 (optional), ip6dns2 - (optional) - The following new response parameters are added: ip6dns1, ip6dns2 - - - ldapRemove - - The following new response parameters are added: ssl - - - updateServiceOffering - - The following new response parameters are added: deploymentplanner, isvolatile - - - - updateStoragePool - - The following new response parameters are added: hypervisor, scope, - suitableformigration - - - listFirewallRules - The following request parameter is removed: traffictype - The following new response parameters are added: networkid - - - updateUser - - The following new response parameters are added: iscallerchilddomain, isdefault - - - - updateProject - - The following new response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - updateTemplate - The following new request parameters are added: isdynamicallyscalable - (optional), isrouting (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - disableUser - - The following new response parameters are added: iscallerchilddomain, isdefault - - - - activateProject - - The following new response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - createNetworkACL - The following new request parameters are added: aclid (optional), action - (optional), number (optional) - The following request parameter is now optional: networkid - The following new response parameters are added: aclid, action, number - - - - enableStaticNat - The following new request parameters are added: vmguestip (optional) - - - - registerIso - The following new request parameters are added: imagestoreuuid (optional), - isdynamicallyscalable (optional) - The following new response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - createIpForwardingRule - - The following new response parameter is added: vmguestip - - - resetPasswordForVirtualMachine - - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - createVolume - The following new request parameter is added: displayvolume (optional) - The following new response parameter is added: displayvolume - - - startRouter - - The following new response parameters are added: ip6dns1, ip6dns2, role - - - - listCapabilities - The following new response parameters are added: apilimitinterval and - apilimitmax. - - - createServiceOffering - The following new request parameters are added: deploymentplanner (optional), - isvolatile (optional), serviceofferingdetails (optional). - isvolatie indicates whether the service offering includes Volatile VM capability, - which will discard the VM's root disk and create a new one on reboot. - The following new response parameters are added: deploymentplanner, isvolatile - - - - restoreVirtualMachine - The following request parameter is added: templateID (optional). This is used to point to the - new template ID when the base image is updated. The parameter templateID can be an ISO - ID in case of restore vm deployed using ISO. - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - createNetwork - The following new request parameters are added: aclid (optional), - displaynetwork (optional), endipv6 (optional), ip6cidr (optional), ip6gateway - (optional), isolatedpvlan (optional), startipv6 (optional) - The following new response parameters are added: aclid, displaynetwork, ip6cidr, - ip6gateway, ispersistent, networkcidr, reservediprange - - - createVlanIpRange - The following new request parameters are added: startipv6, endipv6, - ip6gateway, ip6cidr - Changed parameters: startip (is now optional) - The following new response parameters are added: startipv6, endipv6, ip6gateway, - ip6cidr - - - CreateZone - The following new request parameters are added: ip6dns1, ip6dns2 - The following new response parameters are added: ip6dns1, ip6dns2 - - - deployVirtualMachine - The following request parameters are added: affinitygroupids (optional), - affinitygroupnames (optional), displayvm (optional), ip6address (optional) - The following request parameter is modified: iptonetworklist has a new possible - value, ipv6 - The following new response parameters are added: diskioread, diskiowrite, - diskkbsread, diskkbswrite, displayvm, isdynamicallyscalable, - affinitygroup - - - - createNetworkOffering - - - The following request parameters are added: details (optional), - egressdefaultpolicy (optional), ispersistent (optional) - ispersistent determines if the network or network offering created or listed by - using this offering are persistent or not. - The following response parameters are added: details, egressdefaultpolicy, - ispersistent - - - - - listNetworks - - - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed by - using this offering are persistent or not. - - - - - listNetworkOfferings - - - The following request parameters is added: isPersistent. - This parameter determines if the network or network offering created or listed by - using this offering are persistent or not. - For listNetworkOfferings, the following response parameter has been added: - details, egressdefaultpolicy, ispersistent - - - - - addF5LoadBalancer - configureNetscalerLoadBalancer - addNetscalerLoadBalancer - listF5LoadBalancers - configureF5LoadBalancer - listNetscalerLoadBalancers - - - The following response parameter is removed: inline. - - - - - listRouters - - - For nic responses, the following fields have been added. - - - ip6address - - - ip6gateway - - - ip6cidr - - - - - - - listVirtualMachines - - - The following request parameters are added: affinitygroupid (optional), vpcid - (optional) - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listRouters - listZones - - - For DomainRouter and DataCenter response, the following fields have been - added. - - - ip6dns1 - - - ip6dns2 - - - For listZones, the following optional request parameters are added: name, - networktype - - - - listFirewallRules - createFirewallRule - - The following request parameter is added: traffictype (optional). - The following response parameter is added: networkid - - - - listUsageRecords - The following response parameter is added: virtualsize. - - - - - deleteIso - - - The following request parameter is removed: forced - - - - addCluster - The following request parameters are added: guestvswitchtype (optional), guestvswitchtype - (optional), publicvswitchtype (optional), publicvswitchtype (optional) - The following request parameters are removed: cpuovercommitratio, - memoryovercommitratio - - - - updateCluster - The following request parameters are added: cpuovercommitratio, - ramovercommitratio - - - - - createStoragePool - - - The following request parameters are added: hypervisor (optional), provider - (optional), scope (optional) - The following request parameters have been made mandatory: podid, clusterid - The following response parameter has been added: hypervisor, scope, - suitableformigration - - - - listStoragePools - The following request parameter is added: scope (optional) - The following response parameters are added: hypervisor, scope, - suitableformigration - - - - updateDiskOffering - - - The following response parameter is added: displayoffering - - - - - changeServiceForVirtualMachine - - - The following response parameter are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - recoverVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listCapabilities - - - The following response parameters are added: apilimitinterval, apilimitmax - - - - - createRemoteAccessVpn - - - The following response parameters are added: id - - - - - startVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - detachIso - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - updateVPC - - - The following request parameters has been made mandatory: id, name - - - - - associateIpAddress - - - The following request parameters are added: isportable (optional), regionid - (optional) - The following response parameters are added: isportable, vmipaddress - - - - - listProjectAccounts - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - - - disableAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - listPortForwardingRules - - - The following response parameters are added: vmguestip - - - - - migrateVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - cancelStorageMaintenance - - - The following response parameters are added: hypervisor, scope, - suitableformigration - - - - - createPortForwardingRule - - The following request parameter is added: vmguestip (optional) The - following response parameter is added: vmguestip - - - - addVpnUser - - - The following response parameter is added: state - - - - - createVPCOffering - - - The following request parameter is added: serviceproviderlist (optional) - - - - - assignVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listConditions - - - The following response parameters are added: account, counter, domain, domainid, - project, projectid, relationaloperator, threshold - Removed response parameters: name, source, value - - - - - createPrivateGateway - - - The following request parameters are added: aclid (optional), sourcenatsupported - (optional) - The following response parameters are added: aclid, sourcenatsupported - - - - - updateVirtualMachine - - - The following request parameters are added: displayvm (optional), - isdynamicallyscalable (optional) - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - destroyRouter - - - The following response parameters are added: ip6dns1, ip6dns2, role - - - - - listServiceOfferings - - - The following response parameters are added: deploymentplanner, isvolatile - - - - - listUsageRecords - - - The following response parameters are removed: virtualsize - - - - - createProject - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - ipavailable, iplimit, iptotal, memoryavailable, memorylimit, memorytotal, - networkavailable, networklimit, networktotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal, snapshotavailable, snapshotlimit, - snapshottotal, templateavailable, templatelimit, templatetotal, vmavailable, vmlimit, - vmrunning, vmstopped, vmtotal, volumeavailable, volumelimit, volumetotal, - vpcavailable, vpclimit, vpctotal - - - - - enableUser - - - The following response parameters are added: iscallerchilddomain, isdefault - - - - - - createLoadBalancerRule - - - The following response parameter is added: networkid - - - - - updateAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - copyIso - - - The following response parameters are added: isdynamicallyscalable, sshkeyenabled - - - - - - uploadVolume - - - The following request parameters are added: imagestoreuuid (optional), projectid - (optional - The following response parameters are added: displayvolume - - - - - createDomain - - - The following request parameter is added: domainid (optional) - - - - - stopVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - listAccounts - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - createSnapshot - - - The following response parameter is added: zoneid - - - - - updateIso - - - The following request parameters are added: isdynamicallyscalable (optional), - isrouting (optional) - The following response parameters are added: isdynamicallyscalable, - sshkeyenabled - - - - - listIpForwardingRules - - - The following response parameter is added: vmguestip - - - - - updateNetwork - - - The following request parameters are added: displaynetwork (optional), guestvmcidr - (optional) - The following response parameters are added: aclid, displaynetwork, ip6cidr, - ip6gateway, ispersistent, networkcidr, reservediprange - - - - - destroyVirtualMachine - - - The following response parameters are added: diskioread, diskiowrite, diskkbsread, - diskkbswrite, displayvm, isdynamicallyscalable, affinitygroup - - - - - createDiskOffering - - - The following request parameter is added: displayoffering (optional) - The following response parameter is added: displayoffering - - - - - rebootRouter - - - The following response parameters are added: ip6dns1, ip6dns2, role - - - - - listConfigurations - - - The following request parameters are added: accountid (optional), clusterid - (optional), storageid (optional), zoneid (optional) - The following response parameters are added: id, scope - - - - - createUser - - - The following request parameter is added: userid (optional) - The following response parameters are added: iscallerchilddomain, isdefault - - - - - listDiskOfferings - - - The following response parameter is added: displayoffering - - - - - detachVolume - - - The following response parameter is added: displayvolume - - - - - deleteUser - - - The following response parameters are added: displaytext, success - Removed parameters: id, account, accountid, accounttype, apikey, created, domain, - domainid, email, firstname, lastname, secretkey, state, timezone, username - - - - - listSnapshots - - - The following request parameter is added: zoneid (optional) - The following response parameter is added: zoneid - - - - - markDefaultZoneForAccount - - - The following response parameters are added: cpuavailable, cpulimit, cputotal, - isdefault, memoryavailable, memorylimit, memorytotal, primarystorageavailable, - primarystoragelimit, primarystoragetotal, secondarystorageavailable, - secondarystoragelimit, secondarystoragetotal - - - - - restartVPC - - - The following request parameters are made mandatory: id - - - - - updateHypervisorCapabilities - - - The following response parameters are added: hypervisor, hypervisorversion, - maxdatavolumeslimit, maxguestslimit, maxhostspercluster, securitygroupenabled, - storagemotionenabled - Removed parameters: cpunumber, cpuspeed, created, defaultuse, displaytext, domain, - domainid, hosttags, issystem, limitcpuuse, memory, name, networkrate, offerha, - storagetype, systemvmtype, tags - - - - - updateLoadBalancerRule - - - The following response parameter is added: networkid - - - - - listVlanIpRanges - - - The following response parameters are added: endipv6, ip6cidr, ip6gateway, - startipv6 - - - - - listHypervisorCapabilities - - - The following response parameters are added: maxdatavolumeslimit, - maxhostspercluster, storagemotionenabled - - - - - updateNetworkOffering - - - The following response parameters are added: details, egressdefaultpolicy, - ispersistent - - - - - createVirtualRouterElement - - - The following request parameters are added: providertype (optional) - - - - - listVpnUsers - - - The following response parameter is added: state - - - - - listUsers - - - The following response parameters are added: iscallerchilddomain, isdefault - - - - - - listSupportedNetworkServices - - - The following response parameter is added: provider - - - - - listIsos - - - The following response parameters are added: isdynamicallyscalable, sshkeyenabled - - - - - - -
diff --git a/docs/en-US/changed-apicommands-4-0.xml b/docs/en-US/changed-apicommands-4-0.xml deleted file mode 100644 index 042d5e2611e..00000000000 --- a/docs/en-US/changed-apicommands-4-0.xml +++ /dev/null @@ -1,268 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Changed API Commands in 4.0.0-incubating - - - - - - - API Commands - Description - - - - - - copyTemplate - prepareTemplate - registerTemplate - updateTemplate - createProject - activateProject - suspendProject - updateProject - listProjectAccounts - createVolume - migrateVolume - attachVolume - detachVolume - uploadVolume - createSecurityGroup - registerIso - copyIso - updateIso - createIpForwardingRule - listIpForwardingRules - createLoadBalancerRule - updateLoadBalancerRule - createSnapshot - - - The commands in this list have a single new response parameter, and no other - changes. - New response parameter: tags(*) - - Many other commands also have the new tags(*) parameter in addition to other - changes; those commands are listed separately. - - - - - rebootVirtualMachine - attachIso - detachIso - listLoadBalancerRuleInstances - resetPasswordForVirtualMachine - changeServiceForVirtualMachine - recoverVirtualMachine - startVirtualMachine - migrateVirtualMachine - deployVirtualMachine - assignVirtualMachine - updateVirtualMachine - restoreVirtualMachine - stopVirtualMachine - destroyVirtualMachine - - - The commands in this list have two new response parameters, and no other - changes. - New response parameters: keypair, tags(*) - - - - - listSecurityGroups - listFirewallRules - listPortForwardingRules - listSnapshots - listIsos - listProjects - listTemplates - listLoadBalancerRules - - The commands in this list have the following new parameters, and no other - changes. - New request parameter: tags (optional) - New response parameter: tags(*) - - - - - listF5LoadBalancerNetworks - listNetscalerLoadBalancerNetworks - listSrxFirewallNetworks - updateNetwork - - - The commands in this list have three new response parameters, and no other - changes. - New response parameters: canusefordeploy, vpcid, tags(*) - - - - - createZone - updateZone - - The commands in this list have the following new parameters, and no other - changes. - New request parameter: localstorageenabled (optional) - New response parameter: localstorageenabled - - - - listZones - New response parameter: localstorageenabled - - - - rebootRouter - changeServiceForRouter - startRouter - destroyRouter - stopRouter - - The commands in this list have two new response parameters, and no other - changes. - New response parameters: vpcid, nic(*) - - - - updateAccount - disableAccount - listAccounts - markDefaultZoneForAccount - enableAccount - - The commands in this list have three new response parameters, and no other - changes. - New response parameters: vpcavailable, vpclimit, vpctotal - - - listRouters - - New request parameters: forvpc (optional), vpcid (optional) - New response parameters: vpcid, nic(*) - - - - listNetworkOfferings - - New request parameters: forvpc (optional) - New response parameters: forvpc - - - - listVolumes - - New request parameters: details (optional), tags (optional) - New response parameters: tags(*) - - - - addTrafficMonitor - - New request parameters: excludezones (optional), includezones (optional) - - - - createNetwork - - New request parameters: vpcid (optional) - New response parameters: canusefordeploy, vpcid, tags(*) - - - - listPublicIpAddresses - - New request parameters: tags (optional), vpcid (optional) - New response parameters: vpcid, tags(*) - - - - listNetworks - - New request parameters: canusefordeploy (optional), forvpc (optional), tags - (optional), vpcid (optional) - New response parameters: canusefordeploy, vpcid, tags(*) - - - - restartNetwork - - New response parameters: vpcid, tags(*) - - - - enableStaticNat - - New request parameter: networkid (optional) - - - - createDiskOffering - - New request parameter: storagetype (optional) - New response parameter: storagetype - - - - listDiskOfferings - - New response parameter: storagetype - - - - updateDiskOffering - - New response parameter: storagetype - - - - createFirewallRule - - Changed request parameters: ipaddressid (old version - optional, new version - - required) - New response parameter: tags(*) - - - - listVirtualMachines - - New request parameters: isoid (optional), tags (optional), templateid - (optional) - New response parameters: keypair, tags(*) - - - - updateStorageNetworkIpRange - - New response parameters: id, endip, gateway, netmask, networkid, podid, startip, - vlan, zoneid - - - - - -
diff --git a/docs/en-US/changed-apicommands-4.1.xml b/docs/en-US/changed-apicommands-4.1.xml deleted file mode 100644 index 1667aafaa22..00000000000 --- a/docs/en-US/changed-apicommands-4.1.xml +++ /dev/null @@ -1,253 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Changed API Commands in 4.1 - - - - - - - API Commands - Description - - - - - - createNetworkOffering - - - The following request parameters have been added: - - - isPersistent - - - startipv6 - - - endipv6 - - - ip6gateway - - - ip6cidr - - - - - - - listNetworkOfferings - listNetworks - - - The following request parameters have been added: - - - isPersistent - This parameter determines if the network or network offering listed are - persistent or not. - - - ip6gateway - - - ip6cidr - - - - - - - createVlanIpRange - - - The following request parameters have been added: - - - startipv6 - - - endipv6 - - - ip6gateway - - - ip6cidr - - - - - - - deployVirtualMachine - - - The following parameter has been added: ip6Address. - The following parameter is updated to accept the IPv6 address: - iptonetworklist. - - - - - CreateZoneCmd - - - The following parameter have been added: ip6dns1, ip6dns2. - - - - - listRouters - listVirtualMachines - - - For nic responses, the following fields have been added. - - - ip6address - - - ip6gateway - - - ip6cidr - - - - - - - listVlanIpRanges - - - For nic responses, the following fields have been added. - - - startipv6 - - - endipv6 - - - ip6gateway - - - ip6cidr - - - - - - - listRouters - listZones - - - For DomainRouter and DataCenter response, the following fields have been - added. - - - ip6dns1 - - - ip6dns2 - - - - - - - addF5LoadBalancer - configureNetscalerLoadBalancer - addNetscalerLoadBalancer - listF5LoadBalancers - configureF5LoadBalancer - listNetscalerLoadBalancers - - - The following response parameter is removed: inline. - - - - listFirewallRules - createFirewallRule - - The following request parameter is added: traffictype (optional). - - - - listUsageRecords - The following response parameter is added: virtualsize. - - - - - deleteIso - - - The following request parameter is added: forced (optional). - - - - - createStoragePool - - - The following request parameters are made mandatory: - - - podid - - - clusterid - - - - - - - listZones - - - The following request parameter is added: securitygroupenabled - - - - createAccount - The following new request parameters are added: accountid, userid - - - createUser - The following new request parameter is added: userid - - - createDomain - The following new request parameter is added: domainid - - - - -
diff --git a/docs/en-US/changing-root-password.xml b/docs/en-US/changing-root-password.xml deleted file mode 100644 index 880f50fcf22..00000000000 --- a/docs/en-US/changing-root-password.xml +++ /dev/null @@ -1,50 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the Root Password - During installation and ongoing cloud administration, you will need to log in to the UI as the root administrator. - The root administrator account manages the &PRODUCT; deployment, including physical infrastructure. - The root administrator can modify configuration settings to change basic functionality, create or delete user accounts, and take many actions that should be performed only by an authorized person. - When first installing &PRODUCT;, be sure to change the default password to a new, unique value. - - Open your favorite Web browser and go to this URL. Substitute the IP address of your own Management Server: - http://<management-server-ip-address>:8080/client - - Log in to the UI using the current root user ID and password. The default is admin, password. - Click Accounts. - Click the admin account name. - Click View Users. - Click the admin user name. - - Click the Change Password button. - - - - - change-password.png: button to change a user's password - - - Type the new password, and click OK. - -
diff --git a/docs/en-US/changing-secondary-storage-ip.xml b/docs/en-US/changing-secondary-storage-ip.xml deleted file mode 100644 index 34f93e32c61..00000000000 --- a/docs/en-US/changing-secondary-storage-ip.xml +++ /dev/null @@ -1,44 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the Secondary Storage IP Address - You can change the secondary storage IP address after it has been provisioned. After changing the IP address on the host, log in to your management server and execute the following commands. Replace HOSTID below with your own value, and change the URL to use the appropriate IP address and path for your server: - - # mysql -p - mysql> use cloud; - mysql> select id from host where type = 'SecondaryStorage'; - mysql> update host_details set value = 'nfs://192.168.160.20/export/mike-ss1' - where host_id = HOSTID and name = 'orig.url'; - mysql> update host set name = 'nfs://192.168.160.20/export/mike-ss1' where type - = 'SecondaryStorage' and id = #; - mysql> update host set url = 'nfs://192.168.160.20/export/mike-ss1' where type - = 'SecondaryStorage' and id = #; - mysql> update host set guid = 'nfs://192.168.160.20/export/mike-ss1' where type - = 'SecondaryStorage' and id = #; - - When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text. - Then log in to the cloud console UI and stop and start (not reboot) the Secondary Storage VM for that Zone. - -
- diff --git a/docs/en-US/changing-secondary-storage-servers.xml b/docs/en-US/changing-secondary-storage-servers.xml deleted file mode 100644 index a628eec9b39..00000000000 --- a/docs/en-US/changing-secondary-storage-servers.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Changing Secondary Storage Servers - You can change the secondary storage NFS mount. Perform the following steps to do so: - - Stop all running Management Servers. - Wait 30 minutes. This allows any writes to secondary storage to complete. - Copy all files from the old secondary storage mount to the new. - Use the procedure above to change the IP address for secondary storage if required. - Start the Management Server. - -
- diff --git a/docs/en-US/changing-service-offering-for-vm.xml b/docs/en-US/changing-service-offering-for-vm.xml deleted file mode 100644 index f4e2ceb309f..00000000000 --- a/docs/en-US/changing-service-offering-for-vm.xml +++ /dev/null @@ -1,190 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the Service Offering for a VM - To upgrade or downgrade the level of compute resources available to a virtual machine, you - can change the VM's compute offering. - - - Log in to the &PRODUCT; UI as a user or admin. - - - In the left navigation, click Instances. - - - Choose the VM that you want to work with. - - - (Skip this step if you have enabled dynamic VM scaling; see .) - Click the Stop button to stop the VM. - - - - - StopButton.png: button to stop a VM - - - - - - Click the Change Service button. - - - - - ChangeServiceButton.png: button to change the service of a VM - - - The Change service dialog box is displayed. - - - Select the offering you want to apply to the selected VM. - - - Click OK. - - -
- - CPU and Memory Scaling for Running VMs - (Supported on VMware and XenServer) - It is not always possible to accurately predict the CPU and RAM requirements when you - first deploy a VM. You might need to increase these resources at any time during the life of a - VM. You can dynamically modify CPU and RAM levels to scale up these resources for a running VM - without incurring any downtime. - Dynamic CPU and RAM scaling can be used in the following cases: - - - User VMs on hosts running VMware and XenServer. - - - System VMs on VMware. - - - VMware Tools or XenServer Tools must be installed on the virtual machine. - - - The new requested CPU and RAM values must be within the constraints allowed by the - hypervisor and the VM operating system. - - - New VMs that are created after the installation of &PRODUCT; 4.2 can use the dynamic - scaling feature. If you are upgrading from a previous version of &PRODUCT;, your existing - VMs created with previous versions will not have the dynamic scaling capability unless you - update them using the following procedure. - - -
-
- Updating Existing VMs - If you are upgrading from a previous version of &PRODUCT;, and you want your existing VMs - created with previous versions to have the dynamic scaling capability, update the VMs using - the following steps: - - - Make sure the zone-level setting enable.dynamic.scale.vm is set to true. In the left - navigation bar of the &PRODUCT; UI, click Infrastructure, then click Zones, click the zone - you want, and click the Settings tab. - - - Install Xen tools (for XenServer hosts) or VMware Tools (for VMware hosts) on each VM - if they are not already installed. - - - Stop the VM. - - - Click the Edit button. - - - Click the Dynamically Scalable checkbox. - - - Click Apply. - - - Restart the VM. - - -
-
- Configuring Dynamic CPU and RAM Scaling - To configure this feature, use the following new global configuration variables: - - - enable.dynamic.scale.vm: Set to True to enable the feature. By default, the feature is - turned off. - - - scale.retry: How many times to attempt the scaling operation. Default = 2. - - -
-
- How to Dynamically Scale CPU and RAM - To modify the CPU and/or RAM capacity of a virtual machine, you need to change the compute - offering of the VM to a new compute offering that has the desired CPU and RAM values. You can - use the same steps described above in , but - skip the step where you stop the virtual machine. Of course, you might have to create a new - compute offering first. - When you submit a dynamic scaling request, the resources will be scaled up on the current - host if possible. If the host does not have enough resources, the VM will be live migrated to - another host in the same cluster. If there is no host in the cluster that can fulfill the - requested level of CPU and RAM, the scaling operation will fail. The VM will continue to run - as it was before. -
-
- Limitations - - - You can not do dynamic scaling for system VMs on XenServer. - - - &PRODUCT; will not check to be sure that the new CPU and RAM levels are compatible - with the OS running on the VM. - - - When scaling memory or CPU for a Linux VM on VMware, you might need to run scripts in - addition to the other steps mentioned above. For more information, see Hot adding memory in Linux (1012764) in the VMware Knowledge Base. - - - (VMware) If resources are not available on the current host, scaling up will fail on - VMware because of a known issue where &PRODUCT; and vCenter calculate the available - capacity differently. For more information, see https://issues.apache.org/jira/browse/CLOUDSTACK-1809. - - - On VMs running Linux 64-bit and Windows 7 32-bit operating systems, if the VM is - initially assigned a RAM of less than 3 GB, it can be dynamically scaled up to 3 GB, but - not more. This is due to a known issue with these operating systems, which will freeze if - an attempt is made to dynamically scale from less than 3 GB to more than 3 GB. - - -
-
- diff --git a/docs/en-US/changing-vm-name-os-group.xml b/docs/en-US/changing-vm-name-os-group.xml deleted file mode 100644 index daf78bca107..00000000000 --- a/docs/en-US/changing-vm-name-os-group.xml +++ /dev/null @@ -1,59 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the VM Name, OS, or Group - After a VM is created, you can modify the display name, operating system, and the group it belongs to. - To access a VM through the &PRODUCT; UI: - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation, click Instances. - Select the VM that you want to modify. - Click the Stop button to stop the VM. - - - - - StopButton.png: button to stop a VM - - - - Click Edit. - - - - - EditButton.png: button to edit the properties of a VM - - - Make the desired changes to the following: - - Display name: Enter a new display name if you want to change - the name of the VM. - OS Type: Select the desired operating system. - Group: Enter the group name for the VM. - - Click Apply. - -
- diff --git a/docs/en-US/choosing-a-deployment-architecture.xml b/docs/en-US/choosing-a-deployment-architecture.xml deleted file mode 100644 index 0503d8c7597..00000000000 --- a/docs/en-US/choosing-a-deployment-architecture.xml +++ /dev/null @@ -1,29 +0,0 @@ - -%BOOK_ENTITIES; -]> - - - - Choosing a Deployment Architecture - The architecture used in a deployment will vary depending on the size and purpose of the deployment. This section contains examples of deployment architecture, including a small-scale deployment useful for test and trial deployments and a fully-redundant large-scale setup for production deployments. - - - - - - diff --git a/docs/en-US/choosing-a-hypervisor.xml b/docs/en-US/choosing-a-hypervisor.xml deleted file mode 100644 index bf83fe3d17f..00000000000 --- a/docs/en-US/choosing-a-hypervisor.xml +++ /dev/null @@ -1,136 +0,0 @@ - -%BOOK_ENTITIES; -]> - - - - Choosing a Hypervisor: Supported Features - &PRODUCT; supports many popular hypervisors. Your cloud can consist entirely of hosts running a single hypervisor, or you can use multiple hypervisors. Each cluster of hosts must run the same hypervisor. - You might already have an installed base of nodes running a particular hypervisor, in which case, your choice of hypervisor has already been made. If you are starting from scratch, you need to decide what hypervisor software best suits your needs. A discussion of the relative advantages of each hypervisor is outside the scope of our documentation. However, it will help you to know which features of each hypervisor are supported by &PRODUCT;. The following table provides this information. - - - - - - - - - - - - Feature - XenServer 6.0.2 - vSphere 4.1/5.0 - KVM - RHEL 6.2 - OVM 2.3 - Bare Metal - - - - - Network Throttling - Yes - Yes - No - No - N/A - - - Security groups in zones that use basic networking - Yes - No - Yes - No - No - - - iSCSI - Yes - Yes - Yes - Yes - N/A - - - FibreChannel - Yes - Yes - Yes - No - N/A - - - Local Disk - Yes - Yes - Yes - No - Yes - - - HA - Yes - Yes (Native) - Yes - Yes - N/A - - - Snapshots of local disk - Yes - Yes - Yes - No - N/A - - - Local disk as data disk - No - No - No - No - N/A - - - Work load balancing - No - DRS - No - No - N/A - - - Manual live migration of VMs from host to host - Yes - Yes - Yes - Yes - N/A - - - Conserve management traffic IP address by using link local network to communicate with virtual router - Yes - No - Yes - Yes - N/A - - - - - diff --git a/docs/en-US/cisco3750-hardware.xml b/docs/en-US/cisco3750-hardware.xml deleted file mode 100644 index b5266105074..00000000000 --- a/docs/en-US/cisco3750-hardware.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Cisco 3750 - The following steps show how a Cisco 3750 is configured for zone-level layer-3 switching. - These steps assume VLAN 201 is used to route untagged private IPs for pod 1, and pod 1’s layer-2 - switch is connected to GigabitEthernet1/0/1. - - - Setting VTP mode to transparent allows us to utilize VLAN IDs above 1000. Since we only - use VLANs up to 999, vtp transparent mode is not strictly required. - vtp mode transparent -vlan 200-999 -exit - - - Configure GigabitEthernet1/0/1. - interface GigabitEthernet1/0/1 -switchport trunk encapsulation dot1q -switchport mode trunk -switchport trunk native vlan 201 -exit - - - The statements configure GigabitEthernet1/0/1 as follows: - - - VLAN 201 is the native untagged VLAN for port GigabitEthernet1/0/1. - - - Cisco passes all VLANs by default. As a result, all VLANs (300-999) are passed to all the pod-level layer-2 switches. - - -
diff --git a/docs/en-US/cisco3750-layer2.xml b/docs/en-US/cisco3750-layer2.xml deleted file mode 100644 index e4fe1422056..00000000000 --- a/docs/en-US/cisco3750-layer2.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Cisco 3750 - The following steps show how a Cisco 3750 is configured for pod-level layer-2 - switching. - - - Setting VTP mode to transparent allows us to utilize VLAN IDs above 1000. Since we only - use VLANs up to 999, vtp transparent mode is not strictly required. - vtp mode transparent -vlan 300-999 -exit - - - Configure all ports to dot1q and set 201 as the native VLAN. - interface range GigabitEthernet 1/0/1-24 -switchport trunk encapsulation dot1q -switchport mode trunk -switchport trunk native vlan 201 -exit - - - By default, Cisco passes all VLANs. Cisco switches complain of the native VLAN IDs are - different when 2 ports are connected together. That’s why you must specify VLAN 201 as the - native VLAN on the layer-2 switch. -
diff --git a/docs/en-US/citrix-xenserver-installation.xml b/docs/en-US/citrix-xenserver-installation.xml deleted file mode 100644 index 09d07aa2a90..00000000000 --- a/docs/en-US/citrix-xenserver-installation.xml +++ /dev/null @@ -1,757 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Citrix XenServer Installation for &PRODUCT; - If you want to use the Citrix XenServer hypervisor to run guest virtual machines, install - XenServer 6.0 or XenServer 6.0.2 on the host(s) in your cloud. For an initial installation, - follow the steps below. If you have previously installed XenServer and want to upgrade to - another version, see . -
- System Requirements for XenServer Hosts - - - The host must be certified as compatible with one of the following. See the Citrix - Hardware Compatibility Guide: http://hcl.xensource.com - - - XenServer 5.6 SP2 - - - XenServer 6.0 - - - XenServer 6.0.2 - - - - - You must re-install Citrix XenServer if you are going to re-use a host from a previous - install. - - - Must support HVM (Intel-VT or AMD-V enabled) - - - Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the - release of hypervisor patches through your hypervisor vendor’s support channel, and apply - patches as soon as possible after they are released. &PRODUCT; will not track or notify - you of required hypervisor patches. It is essential that your hosts are completely up to - date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to - support any system that is not up to date with patches. - - - All hosts within a cluster must be homogeneous. The CPUs must be of the same type, - count, and feature flags. - - - Must support HVM (Intel-VT or AMD-V enabled in BIOS) - - - 64-bit x86 CPU (more cores results in better performance) - - - Hardware virtualization support required - - - 4 GB of memory - - - 36 GB of local disk - - - At least 1 NIC - - - Statically allocated IP Address - - - When you deploy &PRODUCT;, the hypervisor host must not have any VMs already - running - - - - The lack of up-do-date hotfixes can lead to data corruption and lost VMs. - -
-
- XenServer Installation Steps - - - From https://www.citrix.com/English/ss/downloads/, download the appropriate version - of XenServer for your &PRODUCT; version (see ). Install it using the Citrix XenServer - Installation Guide. - Older Versions of XenServer - Note that you can download the most recent release of XenServer without having a Citrix account. If you wish to download older versions, you will need to create an account and look through the download archives. - - - - After installation, perform the following configuration steps, which are described in - the next few sections: - - - - - - - Required - Optional - - - - - - - - - - Set up SR if not using NFS, iSCSI, or local disk; see - - - - - - - - - - - - - - -
-
- Configure XenServer dom0 Memory - Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable - XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for - XenServer dom0. For instructions on how to do this, see http://support.citrix.com/article/CTX126531. The article refers to XenServer 5.6, - but the same information applies to XenServer 6.0. -
-
- Username and Password - All XenServers in a cluster must have the same username and password as configured in - &PRODUCT;. -
-
- Time Synchronization - The host must be set to use NTP. All hosts in a pod must have the same time. - - - Install NTP. - # yum install ntp - - - Edit the NTP configuration file to point to your NTP server. - # vi /etc/ntp.conf - Add one or more server lines in this file with the names of the NTP servers you want - to use. For example: - server 0.xenserver.pool.ntp.org -server 1.xenserver.pool.ntp.org -server 2.xenserver.pool.ntp.org -server 3.xenserver.pool.ntp.org - - - - Restart the NTP client. - # service ntpd restart - - - Make sure NTP will start again upon reboot. - # chkconfig ntpd on - - -
-
- Licensing - Citrix XenServer Free version provides 30 days usage without a license. Following the 30 - day trial, XenServer requires a free activation and license. You can choose to install a - license now or skip this step. If you skip this step, you will need to install a license when - you activate and license the XenServer. -
- Getting and Deploying a License - If you choose to install a license now you will need to use the XenCenter to activate - and get a license. - - - In XenCenter, click Tools > License manager. - - - Select your XenServer and select Activate Free XenServer. - - - Request a license. - - - You can install the license with XenCenter or using the xe command line tool. -
-
-
- Install &PRODUCT; XenServer Support Package (CSP) - (Optional) - To enable security groups, elastic load balancing, and elastic IP on XenServer, download - and install the &PRODUCT; XenServer Support Package (CSP). After installing XenServer, perform - the following additional steps on each XenServer host. - - - Download the CSP software onto the XenServer host from one of the following - links: - For XenServer 6.0.2: - http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz - For XenServer 5.6 SP2: - http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz - For XenServer 6.0: - http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz - - - Extract the file: - # tar xf xenserver-cloud-supp.tgz - - - Run the following script: - # xe-install-supplemental-pack xenserver-cloud-supp.iso - - - If the XenServer host is part of a zone that uses basic networking, disable Open - vSwitch (OVS): - # xe-switch-network-backend bridge - Restart the host machine when prompted. - - - The XenServer host is now ready to be added to &PRODUCT;. -
-
- Primary Storage Setup for XenServer - &PRODUCT; natively supports NFS, iSCSI and local storage. If you are using one of these - storage types, there is no need to create the XenServer Storage Repository ("SR"). - If, however, you would like to use storage connected via some other technology, such as - FiberChannel, you must set up the SR yourself. To do so, perform the following steps. If you - have your hosts in a XenServer pool, perform the steps on the master node. If you are working - with a single XenServer which is not part of a cluster, perform the steps on that - XenServer. - - - Connect FiberChannel cable to all hosts in the cluster and to the FiberChannel storage - host. - - - Rescan the SCSI bus. Either use the following command or use XenCenter to perform an - HBA rescan. - # scsi-rescan - - - Repeat step on every host. - - - Check to be sure you see the new SCSI disk. - # ls /dev/disk/by-id/scsi-360a98000503365344e6f6177615a516b -l - The output should look like this, although the specific file name will be different - (scsi-<scsiID>): - lrwxrwxrwx 1 root root 9 Mar 16 13:47 -/dev/disk/by-id/scsi-360a98000503365344e6f6177615a516b -> ../../sdc - - - - Repeat step on every host. - - - On the storage server, run this command to get a unique ID for the new SR. - # uuidgen - The output should look like this, although the specific ID will be different: - e6849e96-86c3-4f2c-8fcc-350cc711be3d - - - Create the FiberChannel SR. In name-label, use the unique ID you just - generated. - -# xe sr-create type=lvmohba shared=true -device-config:SCSIid=360a98000503365344e6f6177615a516b -name-label="e6849e96-86c3-4f2c-8fcc-350cc711be3d" - - This command returns a unique ID for the SR, like the following example (your ID will - be different): - 7a143820-e893-6c6a-236e-472da6ee66bf - - - To create a human-readable description for the SR, use the following command. In uuid, - use the SR ID returned by the previous command. In name-description, set whatever friendly - text you prefer. - # xe sr-param-set uuid=7a143820-e893-6c6a-236e-472da6ee66bf name-description="Fiber Channel storage repository" - Make note of the values you will need when you add this storage to &PRODUCT; later - (see ). In the Add Primary Storage dialog, in - Protocol, you will choose PreSetup. In SR Name-Label, you will enter the name-label you - set earlier (in this example, e6849e96-86c3-4f2c-8fcc-350cc711be3d). - - - (Optional) If you want to enable multipath I/O on a FiberChannel SAN, refer to the - documentation provided by the SAN vendor. - - -
-
- iSCSI Multipath Setup for XenServer (Optional) - When setting up the storage repository on a Citrix XenServer, you can enable multipath - I/O, which uses redundant physical components to provide greater reliability in the connection - between the server and the SAN. To enable multipathing, use a SAN solution that is supported - for Citrix servers and follow the procedures in Citrix documentation. The following links - provide a starting point: - - - http://support.citrix.com/article/CTX118791 - - - http://support.citrix.com/article/CTX125403 - - - You can also ask your SAN vendor for advice about setting up your Citrix repository for - multipathing. - Make note of the values you will need when you add this storage to the &PRODUCT; later - (see ). In the Add Primary Storage dialog, in Protocol, - you will choose PreSetup. In SR Name-Label, you will enter the same name used to create the - SR. - If you encounter difficulty, address the support team for the SAN provided by your vendor. - If they are not able to solve your issue, see Contacting Support. -
-
- Physical Networking Setup for XenServer - Once XenServer has been installed, you may need to do some additional network - configuration. At this point in the installation, you should have a plan for what NICs the - host will have and what traffic each NIC will carry. The NICs should be cabled as necessary to - implement your plan. - If you plan on using NIC bonding, the NICs on all hosts in the cluster must be cabled - exactly the same. For example, if eth0 is in the private bond on one host in a cluster, then - eth0 must be in the private bond on all hosts in the cluster. - The IP address assigned for the management network interface must be static. It can be set - on the host itself or obtained via static DHCP. - &PRODUCT; configures network traffic of various types to use different NICs or bonds on - the XenServer host. You can control this process and provide input to the Management Server - through the use of XenServer network name labels. The name labels are placed on physical - interfaces or bonds and configured in &PRODUCT;. In some simple cases the name labels are not - required. - When configuring networks in a XenServer environment, network traffic labels must be - properly configured to ensure that the virtual interfaces are created by &PRODUCT; are bound - to the correct physical device. The name-label of the XenServer network must match the - XenServer traffic label specified while creating the &PRODUCT; network. This is set by running - the following command: - xe network-param-set uuid=<network id> name-label=<CloudStack traffic label> -
- Configuring Public Network with a Dedicated NIC for XenServer (Optional) - &PRODUCT; supports the use of a second NIC (or bonded pair of NICs, described in ) for the public network. If bonding is not used, the - public network can be on any NIC and can be on different NICs on the hosts in a cluster. For - example, the public network can be on eth0 on node A and eth1 on node B. However, the - XenServer name-label for the public network must be identical across all hosts. The - following examples set the network label to "cloud-public". After the management - server is installed and running you must configure it with the name of the chosen network - label (e.g. "cloud-public"); this is discussed in . - If you are using two NICs bonded together to create a public network, see . - If you are using a single dedicated NIC to provide public network access, follow this - procedure on each new host that is added to &PRODUCT; before adding the host. - - - Run xe network-list and find the public network. This is usually attached to the NIC - that is public. Once you find the network make note of its UUID. Call this - <UUID-Public>. - - - Run the following command. - # xe network-param-set name-label=cloud-public uuid=<UUID-Public> - - -
-
- Configuring Multiple Guest Networks for XenServer (Optional) - &PRODUCT; supports the use of multiple guest networks with the XenServer hypervisor. - Each network is assigned a name-label in XenServer. For example, you might have two networks - with the labels "cloud-guest" and "cloud-guest2". After the management - server is installed and running, you must add the networks and use these labels so that - &PRODUCT; is aware of the networks. - Follow this procedure on each new host before adding the host to &PRODUCT;: - - - Run xe network-list and find one of the guest networks. Once you find the network - make note of its UUID. Call this <UUID-Guest>. - - - Run the following command, substituting your own name-label and uuid values. - # xe network-param-set name-label=<cloud-guestN> uuid=<UUID-Guest> - - - Repeat these steps for each additional guest network, using a different name-label - and uuid each time. - - -
-
- Separate Storage Network for XenServer (Optional) - You can optionally set up a separate storage network. This should be done first on the - host, before implementing the bonding steps below. This can be done using one or two - available NICs. With two NICs bonding may be done as above. It is the administrator's - responsibility to set up a separate storage network. - Give the storage network a different name-label than what will be given for other - networks. - For the separate storage network to work correctly, it must be the only interface that - can ping the primary storage device's IP address. For example, if eth0 is the - management network NIC, ping -I eth0 <primary storage device IP> must fail. In all - deployments, secondary storage devices must be pingable from the management network NIC or - bond. If a secondary storage device has been placed on the storage network, it must also be - pingable via the storage network NIC or bond on the hosts as well. - You can set up two separate storage networks as well. For example, if you intend to - implement iSCSI multipath, dedicate two non-bonded NICs to multipath. Each of the two - networks needs a unique name-label. - If no bonding is done, the administrator must set up and name-label the separate storage - network on all hosts (masters and slaves). - Here is an example to set up eth5 to access a storage network on 172.16.0.0/24. - -# xe pif-list host-name-label='hostname' device=eth5 -uuid(RO): ab0d3dd4-5744-8fae-9693-a022c7a3471d -device ( RO): eth5 -#xe pif-reconfigure-ip DNS=172.16.3.3 gateway=172.16.0.1 IP=172.16.0.55 mode=static netmask=255.255.255.0 uuid=ab0d3dd4-5744-8fae-9693-a022c7a3471d -
-
- NIC Bonding for XenServer (Optional) - XenServer supports Source Level Balancing (SLB) NIC bonding. Two NICs can be bonded - together to carry public, private, and guest traffic, or some combination of these. Separate - storage networks are also possible. Here are some example supported configurations: - - - 2 NICs on private, 2 NICs on public, 2 NICs on storage - - - 2 NICs on private, 1 NIC on public, storage uses management network - - - 2 NICs on private, 2 NICs on public, storage uses management network - - - 1 NIC for private, public, and storage - - - All NIC bonding is optional. - XenServer expects all nodes in a cluster will have the same network cabling and same - bonds implemented. In an installation the master will be the first host that was added to - the cluster and the slave hosts will be all subsequent hosts added to the cluster. The bonds - present on the master set the expectation for hosts added to the cluster later. The - procedure to set up bonds on the master and slaves are different, and are described below. - There are several important implications of this: - - - You must set bonds on the first host added to a cluster. Then you must use xe - commands as below to establish the same bonds in the second and subsequent hosts added - to a cluster. - - - Slave hosts in a cluster must be cabled exactly the same as the master. For example, - if eth0 is in the private bond on the master, it must be in the management network for - added slave hosts. - - -
- Management Network Bonding - The administrator must bond the management network NICs prior to adding the host to - &PRODUCT;. -
-
- Creating a Private Bond on the First Host in the Cluster - Use the following steps to create a bond in XenServer. These steps should be run on - only the first host in a cluster. This example creates the cloud-private network with two - physical NICs (eth0 and eth1) bonded into it. - - - Find the physical NICs that you want to bond together. - # xe pif-list host-name-label='hostname' device=eth0 -# xe pif-list host-name-label='hostname' device=eth1 - These command shows the eth0 and eth1 NICs and their UUIDs. Substitute the ethX - devices of your choice. Call the UUID's returned by the above command slave1-UUID - and slave2-UUID. - - - Create a new network for the bond. For example, a new network with name - "cloud-private". - This label is important. &PRODUCT; looks for a network by a - name you configure. You must use the same name-label for all hosts in the cloud for - the management network. - # xe network-create name-label=cloud-private -# xe bond-create network-uuid=[uuid of cloud-private created above] -pif-uuids=[slave1-uuid],[slave2-uuid] - - - Now you have a bonded pair that can be recognized by &PRODUCT; as the management - network. -
-
- Public Network Bonding - Bonding can be implemented on a separate, public network. The administrator is - responsible for creating a bond for the public network if that network will be bonded and - will be separate from the management network. -
-
- Creating a Public Bond on the First Host in the Cluster - These steps should be run on only the first host in a cluster. This example creates - the cloud-public network with two physical NICs (eth2 and eth3) bonded into it. - - - Find the physical NICs that you want to bond together. - #xe pif-list host-name-label='hostname' device=eth2 -# xe pif-list host-name-label='hostname' device=eth3 - These command shows the eth2 and eth3 NICs and their UUIDs. Substitute the ethX - devices of your choice. Call the UUID's returned by the above command slave1-UUID - and slave2-UUID. - - - Create a new network for the bond. For example, a new network with name - "cloud-public". - This label is important. &PRODUCT; looks for a network by a - name you configure. You must use the same name-label for all hosts in the cloud for - the public network. - # xe network-create name-label=cloud-public -# xe bond-create network-uuid=[uuid of cloud-public created above] -pif-uuids=[slave1-uuid],[slave2-uuid] - - - Now you have a bonded pair that can be recognized by &PRODUCT; as the public - network. -
-
- Adding More Hosts to the Cluster - With the bonds (if any) established on the master, you should add additional, slave - hosts. Run the following command for all additional hosts to be added to the cluster. This - will cause the host to join the master in a single XenServer pool. - # xe pool-join master-address=[master IP] master-username=root -master-password=[your password] -
-
- Complete the Bonding Setup Across the Cluster - With all hosts added to the pool, run the cloud-setup-bond script. This script will - complete the configuration and set up of the bonds across all hosts in the cluster. - - - Copy the script from the Management Server in - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/cloud-setup-bonding.sh to the - master host and ensure it is executable. - - - Run the script: - # ./cloud-setup-bonding.sh - - - Now the bonds are set up and configured properly across the cluster. -
-
-
-
- Upgrading XenServer Versions - This section tells how to upgrade XenServer software on &PRODUCT; hosts. The actual - upgrade is described in XenServer documentation, but there are some additional steps you must - perform before and after the upgrade. - - Be sure the hardware is certified compatible with the new version of XenServer. - - To upgrade XenServer: - - - Upgrade the database. On the Management Server node: - - - Back up the database: - # mysqldump --user=root --databases cloud > cloud.backup.sql -# mysqldump --user=root --databases cloud_usage > cloud_usage.backup.sql - - - You might need to change the OS type settings for VMs running on the upgraded - hosts. - - - If you upgraded from XenServer 5.6 GA to XenServer 5.6 SP2, change any VMs - that have the OS type CentOS 5.5 (32-bit), Oracle Enterprise Linux 5.5 (32-bit), - or Red Hat Enterprise Linux 5.5 (32-bit) to Other Linux (32-bit). Change any VMs - that have the 64-bit versions of these same OS types to Other Linux - (64-bit). - - - If you upgraded from XenServer 5.6 SP2 to XenServer 6.0.2, change any VMs that - have the OS type CentOS 5.6 (32-bit), CentOS 5.7 (32-bit), Oracle Enterprise Linux - 5.6 (32-bit), Oracle Enterprise Linux 5.7 (32-bit), Red Hat Enterprise Linux 5.6 - (32-bit) , or Red Hat Enterprise Linux 5.7 (32-bit) to Other Linux (32-bit). - Change any VMs that have the 64-bit versions of these same OS types to Other Linux - (64-bit). - - - If you upgraded from XenServer 5.6 to XenServer 6.0.2, do all of the - above. - - - - - Restart the Management Server and Usage Server. You only need to do this once for - all clusters. - # service cloudstack-management start -# service cloudstack-usage start - - - - - Disconnect the XenServer cluster from &PRODUCT;. - - - Log in to the &PRODUCT; UI as root. - - - Navigate to the XenServer cluster, and click Actions – Unmanage. - - - Watch the cluster status until it shows Unmanaged. - - - - - Log in to one of the hosts in the cluster, and run this command to clean up the - VLAN: - # . /opt/xensource/bin/cloud-clean-vlan.sh - - - Still logged in to the host, run the upgrade preparation script: - # /opt/xensource/bin/cloud-prepare-upgrade.sh - Troubleshooting: If you see the error "can't eject CD," log in to the - VM and umount the CD, then run the script again. - - - Upgrade the XenServer software on all hosts in the cluster. Upgrade the master - first. - - - Live migrate all VMs on this host to other hosts. See the instructions for live - migration in the Administrator's Guide. - Troubleshooting: You might see the following error when you migrate a VM: - [root@xenserver-qa-2-49-4 ~]# xe vm-migrate live=true host=xenserver-qa-2-49-5 vm=i-2-8-VM -You attempted an operation on a VM which requires PV drivers to be installed but the drivers were not detected. -vm: b6cf79c8-02ee-050b-922f-49583d9f1a14 (i-2-8-VM) - To solve this issue, run the following: - # /opt/xensource/bin/make_migratable.sh b6cf79c8-02ee-050b-922f-49583d9f1a14 - - - Reboot the host. - - - Upgrade to the newer version of XenServer. Use the steps in XenServer - documentation. - - - After the upgrade is complete, copy the following files from the management server - to this host, in the directory locations shown below: - - - - - - - Copy this Management Server file... - ...to this location on the XenServer host - - - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py - /opt/xensource/sm/NFSSR.py - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/setupxenserver.sh - /opt/xensource/bin/setupxenserver.sh - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/make_migratable.sh - /opt/xensource/bin/make_migratable.sh - - - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/cloud-clean-vlan.sh - /opt/xensource/bin/cloud-clean-vlan.sh - - - - - - - Run the following script: - # /opt/xensource/bin/setupxenserver.sh - Troubleshooting: If you see the following error message, you can safely ignore - it. - mv: cannot stat `/etc/cron.daily/logrotate': No such file or directory - - - Plug in the storage repositories (physical block devices) to the XenServer - host: - # for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk '{print $NF}'`; do xe pbd-plug uuid=$pbd ; done - Note: If you add a host to this XenServer pool, you need to migrate all VMs on - this host to other hosts, and eject this host from XenServer pool. - - - - - Repeat these steps to upgrade every host in the cluster to the same version of - XenServer. - - - Run the following command on one host in the XenServer cluster to clean up the host - tags: - # for host in $(xe host-list | grep ^uuid | awk '{print $NF}') ; do xe host-param-clear uuid=$host param-name=tags; done; - - When copying and pasting a command, be sure the command has pasted as a single line - before executing. Some document viewers may introduce unwanted line breaks in copied - text. - - - - Reconnect the XenServer cluster to &PRODUCT;. - - - Log in to the &PRODUCT; UI as root. - - - Navigate to the XenServer cluster, and click Actions – Manage. - - - Watch the status to see that all the hosts come up. - - - - - After all hosts are up, run the following on one host in the cluster: - # /opt/xensource/bin/cloud-clean-vlan.sh - - -
-
diff --git a/docs/en-US/cloud-infrastructure-concepts.xml b/docs/en-US/cloud-infrastructure-concepts.xml deleted file mode 100644 index 2ba228aa4dd..00000000000 --- a/docs/en-US/cloud-infrastructure-concepts.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Cloud Infrastructure Concepts - - - - - - - - - diff --git a/docs/en-US/cloud-infrastructure-overview.xml b/docs/en-US/cloud-infrastructure-overview.xml deleted file mode 100644 index 49a413871a5..00000000000 --- a/docs/en-US/cloud-infrastructure-overview.xml +++ /dev/null @@ -1,79 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Cloud Infrastructure Overview - - The Management Server manages one or more zones (typically, - datacenters) containing host computers where guest virtual - machines will run. The cloud infrastructure is organized as follows: - - - - - Zone: Typically, a zone is equivalent to a single - datacenter. A zone consists of one or more pods and secondary - storage. - - - - - Pod: A pod is usually one rack of hardware that includes a - layer-2 switch and one or more clusters. - - - - - Cluster: A cluster consists of one or more hosts and primary - storage. - - - - - Host: A single compute node within a cluster. The hosts are - where the actual cloud services run in the form of guest - virtual machines. - - - - - Primary storage is associated with a cluster, and it stores - the disk volumes for all the VMs running on hosts in that cluster. - - - - Secondary storage is associated with a zone, and it stores - templates, ISO images, and disk volume snapshots. - - - - - - - - infrastructure_overview.png: Nested organization of a zone - - More Information - For more information, see documentation on cloud infrastructure concepts. -
diff --git a/docs/en-US/cloudmonkey.xml b/docs/en-US/cloudmonkey.xml deleted file mode 100644 index be4d17c3aa1..00000000000 --- a/docs/en-US/cloudmonkey.xml +++ /dev/null @@ -1,264 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- CloudMonkey - CloudMonkey is the &PRODUCT; Command Line Interface (CLI). It is written in Python. CloudMonkey can be used both as an interactive shell and as a command line tool which simplifies &PRODUCT; configuration and management. It can be used with &PRODUCT; releases since the 4.0.x branch. - - CloudMonkey is still under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation: - https://cwiki.apache.org/CLOUDSTACK/cloudstack-cloudmonkey-cli.html - - -
- Installing CloudMonkey - CloudMonkey is dependent on readline, pygments, prettytable, when installing from source you will need to resolve those dependencies. Using the cheese shop, the dependencies will be automatically installed. - There are three ways to get CloudMonkey. Via the official &PRODUCT; source releases or via a community maintained distribution at the cheese shop. Developers can also get it directly from the git repository in tools/cli/. - - - - Via the official Apache &PRODUCT; releases as well as the git repository. - - - - - - Via a community maintained package on Cheese Shop - pip install cloudmonkey - - - -
- -
- Configuration - To configure CloudMonkey you can edit the ~/.cloudmonkey/config file in the user's home directory as shown below. The values can also be set interactively at the cloudmonkey prompt. Logs are kept in ~/.cloudmonkey/log, and history is stored in ~/.cloudmonkey/history. Discovered apis are listed in ~/.cloudmonkey/cache. Only the log and history files can be custom paths and can be configured by setting appropriate file paths in ~/.cloudmonkey/config - -$ cat ~/.cloudmonkey/config -[core] -log_file = /Users/sebastiengoasguen/.cloudmonkey/log -asyncblock = true -paramcompletion = false -history_file = /Users/sebastiengoasguen/.cloudmonkey/history - -[ui] -color = true -prompt = > -tabularize = false - -[user] -secretkey =VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ -apikey = plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdMkAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg - -[server] -path = /client/api -host = localhost -protocol = http -port = 8080 -timeout = 3600 - - The values can also be set at the CloudMonkey prompt. The API and secret keys are obtained via the &PRODUCT; UI or via a raw api call. - - set prompt myprompt> -myprompt> set host localhost -myprompt> set port 8080 -myprompt> set apikey -myprompt> set secretkey -]]> - - You can use CloudMonkey to interact with a local cloud, and even with a remote public cloud. You just need to set the host value properly and obtain the keys from the cloud administrator. -
- -
- API Discovery - - In &PRODUCT; 4.0.* releases, the list of api calls available will be pre-cached, while starting with &PRODUCT; 4.1 releases and above an API discovery service is enabled. CloudMonkey will discover automatically the api calls available on the management server. The sync command in CloudMonkey pulls a list of apis which are accessible to your user role, along with help docs etc. and stores them in ~/.cloudmonkey/cache. This allows cloudmonkey to be adaptable to changes in mgmt server, so in case the sysadmin enables a plugin such as Nicira NVP for that user role, the users can get those changes. New verbs and grammar (DSL) rules are created on the fly. - - To discover the APIs available do: - - > sync -324 APIs discovered and cached - -
- -
- Tabular Output - The number of key/value pairs returned by the api calls can be large resulting in a very long output. To enable easier viewing of the output, a tabular formatting can be setup. You may enable tabular listing and even choose set of column fields, this allows you to create your own field using the filter param which takes in comma separated argument. If argument has a space, put them under double quotes. The create table will have the same sequence of field filters provided - To enable it, use the set function and create filters like so: - -> set tabularize true -> list users filter=id,domain,account -count = 1 -user: -+--------------------------------------+--------+---------+ -| id | domain | account | -+--------------------------------------+--------+---------+ -| 7ed6d5da-93b2-4545-a502-23d20b48ef2a | ROOT | admin | -+--------------------------------------+--------+---------+ - -
- -
- Interactive Shell Usage - To start learning CloudMonkey, the best is to use the interactive shell. Simply type CloudMonkey at the prompt and you should get the interactive shell. - At the CloudMonkey prompt press the tab key twice, you will see all potential verbs available. Pick on, enter a space and then press tab twice. You will see all actions available for that verb - - -EOF assign cancel create detach extract ldap prepare reconnect restart shell update -activate associate change delete disable generate list query register restore start upload -add attach configure deploy enable get mark quit remove revoke stop -api authorize copy destroy exit help migrate reboot reset set suspend -cloudmonkey>create -account diskoffering loadbalancerrule portforwardingrule snapshot tags vpc -autoscalepolicy domain network privategateway snapshotpolicy template vpcoffering -autoscalevmgroup firewallrule networkacl project sshkeypair user vpnconnection -autoscalevmprofile instancegroup networkoffering remoteaccessvpn staticroute virtualrouterelement vpncustomergateway -condition ipforwardingrule physicalnetwork securitygroup storagenetworkiprange vlaniprange vpngateway -counter lbstickinesspolicy pod serviceoffering storagepool volume zone -]]> - - Picking one action and entering a space plus the tab key, you will obtain the list of parameters for that specific api call. - -create network -account= domainid= isAsync= networkdomain= projectid= vlan= -acltype= endip= name= networkofferingid= startip= vpcid= -displaytext= gateway= netmask= physicalnetworkid= subdomainaccess= zoneid= -]]> - - To get additional help on that specific api call you can use the following: - -create network -h -Creates a network -Required args: displaytext name networkofferingid zoneid -Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid - -cloudmonkey>create network -help -Creates a network -Required args: displaytext name networkofferingid zoneid -Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid - -cloudmonkey>create network --help -Creates a network -Required args: displaytext name networkofferingid zoneid -Args: account acltype displaytext domainid endip gateway isAsync name netmask networkdomain networkofferingid physicalnetworkid projectid startip subdomainaccess vlan vpcid zoneid -cloudmonkey> -]]> - - Note the required arguments necessary for the calls. - To find out the required parameters value, using a debugger console on the &PRODUCT; UI might be very useful. For instance using Firebug on Firefox, you can navigate the UI and check the parameters values for each call you are making as you navigate the UI. -
- -
- Starting a Virtual Machine instance with CloudMonkey - To start a virtual machine instance we will use the deploy virtualmachine call. - -deploy virtualmachine -h -Creates and automatically starts a virtual machine based on a service offering, disk offering, and template. -Required args: serviceofferingid templateid zoneid -Args: account diskofferingid displayname domainid group hostid hypervisor ipaddress iptonetworklist isAsync keyboard keypair name networkids projectid securitygroupids securitygroupnames serviceofferingid size startvm templateid userdata zoneid -]]> - - The required arguments are serviceofferingid, templateid and zoneid - In order to specify the template that we want to use, we can list all available templates with the following call: - -list templates templatefilter=all -count = 2 -template: -======== -domain = ROOT -domainid = 8a111e58-e155-4482-93ce-84efff3c7c77 -zoneid = e1bfdfaf-3d9b-43d4-9aea-2c9f173a1ae7 -displaytext = SystemVM Template (XenServer) -ostypeid = 849d7d0a-9fbe-452a-85aa-70e0a0cbc688 -passwordenabled = False -id = 6d360f79-4de9-468c-82f8-a348135d298e -size = 2101252608 -isready = True -templatetype = SYSTEM -zonename = devcloud -... -]]> - - In this snippet, I used DevCloud and only showed the beginning output of the first template, the SystemVM template - Similarly to get the serviceofferingid you would do: - -list serviceofferings | grep id -id = ef2537ad-c70f-11e1-821b-0800277e749c -id = c66c2557-12a7-4b32-94f4-48837da3fa84 -id = 3d8b82e5-d8e7-48d5-a554-cf853111bc50 -]]> - - Note that we can use the linux pipe as well as standard linux commands within the interactive shell. Finally we would start an instance with the following call: - -deploy virtualmachine templateid=13ccff62-132b-4caf-b456-e8ef20cbff0e zoneid=e1bfdfaf-3d9b-43d4-9aea-2c9f173a1ae7 serviceofferingid=ef2537ad-c70f-11e1-821b-0800277e749c -jobprocstatus = 0 -created = 2013-03-05T13:04:51-0800 -cmd = com.cloud.api.commands.DeployVMCmd -userid = 7ed6d5da-93b2-4545-a502-23d20b48ef2a -jobstatus = 1 -jobid = c441d894-e116-402d-aa36-fdb45adb16b7 -jobresultcode = 0 -jobresulttype = object -jobresult: -========= -virtualmachine: -============== -domain = ROOT -domainid = 8a111e58-e155-4482-93ce-84efff3c7c77 -haenable = False -templatename = tiny Linux -... -]]> - - The instance would be stopped with: - -cloudmonkey>stop virtualmachine id=7efe0377-4102-4193-bff8-c706909cc2d2 - - The ids that you will use will differ from this example. Make sure you use the ones that corresponds to your &PRODUCT; cloud. -
- -
- Scripting with CloudMonkey - All previous examples use CloudMonkey via the interactive shell, however it can be used as a straightfoward CLI, passing the commands to the cloudmonkey command like shown below. - $cloudmonkey list users - As such it can be used in shell scripts, it can received commands via stdin and its output can be parsed like any other unix commands as mentioned before. -
- -
diff --git a/docs/en-US/cloudstack-api.xml b/docs/en-US/cloudstack-api.xml deleted file mode 100644 index 891b19f580b..00000000000 --- a/docs/en-US/cloudstack-api.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- &PRODUCT; API - The &PRODUCT; API is a low level API that has been used to implement the &PRODUCT; web UIs. It is also a good basis for implementing other popular APIs such as EC2/S3 and emerging DMTF standards. - Many &PRODUCT; API calls are asynchronous. These will return a Job ID immediately when called. This Job ID can be used to query the status of the job later. Also, status calls on impacted resources will provide some indication of their state. - The API has a REST-like query basis and returns results in XML or JSON. - See the Developer’s Guide and the API Reference. -
diff --git a/docs/en-US/cloudstack.ent b/docs/en-US/cloudstack.ent deleted file mode 100644 index abb18851bcf..00000000000 --- a/docs/en-US/cloudstack.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/en-US/cloudstack.xml b/docs/en-US/cloudstack.xml deleted file mode 100644 index 0b762a2da1f..00000000000 --- a/docs/en-US/cloudstack.xml +++ /dev/null @@ -1,80 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - &PRODUCT; Complete Documentation - Apache CloudStack - 4.0.0-incubating - 1 - - - - Complete documentation for &PRODUCT;. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/cluster-add.xml b/docs/en-US/cluster-add.xml deleted file mode 100644 index 3046c5e0dfd..00000000000 --- a/docs/en-US/cluster-add.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding a Cluster - You need to tell &PRODUCT; about the hosts that it will manage. Hosts exist inside clusters, so before you begin adding hosts to the cloud, you must add at least one cluster. - - - -
diff --git a/docs/en-US/compatibility-matrix.xml b/docs/en-US/compatibility-matrix.xml deleted file mode 100644 index 8576f71e781..00000000000 --- a/docs/en-US/compatibility-matrix.xml +++ /dev/null @@ -1,116 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Compatibility Matrix - - - - - Hypervisor - CloudStack 2.1.x - CloudStack 2.2.x - CloudStack 3.0.0 - CloudStack 3.0.1 - CloudStack 3.0.2 - CloudStack 3.0.3 - - - - - XenServer 5.6 - Yes - Yes - No - No - No - No - - - XenServer 5.6 FP1 - Yes - Yes - No - No - No - No - - - XenServer 5.6 SP2 - Yes - Yes - No - No - Yes - Yes - - - XenServer 6.0.0 - No - No - No - No - No - Yes - - - XenServer 6.0.2 - No - No - Yes - Yes - Yes - Yes - - - XenServer 6.1 - No - No - No - No - No - No - - - KVM (RHEL 6.0 or 6.1) - Yes - Yes - Yes - Yes - Yes - Yes - - - VMware (vSphere and vCenter, both version 4.1) - Yes - Yes - Yes - Yes - Yes - Yes - - - - -
diff --git a/docs/en-US/compute-disk-service-offerings.xml b/docs/en-US/compute-disk-service-offerings.xml deleted file mode 100644 index 1fd2a91a38b..00000000000 --- a/docs/en-US/compute-disk-service-offerings.xml +++ /dev/null @@ -1,50 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Compute and Disk Service Offerings - A service offering is a set of virtual hardware features such as CPU core count and speed, memory, and disk size. The &PRODUCT; administrator can set up various offerings, and then end users choose from the available offerings when they create a new VM. A service offering includes the following elements: - - CPU, memory, and network resource guarantees - How resources are metered - How the resource usage is charged - How often the charges are generated - - For example, one service offering might allow users to create a virtual machine instance that is equivalent to a 1 GHz Intel® Core™ 2 CPU, with 1 GB memory at $0.20/hour, with network traffic metered at $0.10/GB. Based on the user’s selected offering, &PRODUCT; emits usage records that can be integrated with billing systems. &PRODUCT; separates service offerings into compute offerings and disk offerings. The computing service offering specifies: - - Guest CPU - Guest RAM - Guest Networking type (virtual or direct) - Tags on the root disk - - The disk offering specifies: - - Disk size (optional). An offering without a disk size will allow users to pick their own - Tags on the data disk - - - - -
- - diff --git a/docs/en-US/concepts.xml b/docs/en-US/concepts.xml deleted file mode 100644 index e20f442a935..00000000000 --- a/docs/en-US/concepts.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Concepts - - - - diff --git a/docs/en-US/configure-acl.xml b/docs/en-US/configure-acl.xml deleted file mode 100644 index 3ac2b7462c4..00000000000 --- a/docs/en-US/configure-acl.xml +++ /dev/null @@ -1,287 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Configuring Network Access Control List - Define Network Access Control List (ACL) on the VPC virtual router to control incoming - (ingress) and outgoing (egress) traffic between the VPC tiers, and the tiers and Internet. By - default, all incoming traffic to the guest networks is blocked and all outgoing traffic from - guest networks is allowed, once you add an ACL rule for outgoing traffic, then only outgoing - traffic specified in this ACL rule is allowed, the rest is blocked. To open the ports, you must - create a new network ACL. The network ACLs can be created for the tiers only if the NetworkACL - service is supported. -
- About Network ACL Lists - In &PRODUCT; terminology, Network ACL is a group of Network ACL items. Network ACL items - are nothing but numbered rules that are evaluated in order, starting with the lowest numbered - rule. These rules determine whether traffic is allowed in or out of any tier associated with - the network ACL. You need to add the Network ACL items to the Network ACL, then associate the - Network ACL with a tier. Network ACL is associated with a VPC and can be assigned to multiple - VPC tiers within a VPC. A Tier is associated with a Network ACL at all the times. Each tier - can be associated with only one ACL. - The default Network ACL is used when no ACL is associated. Default behavior is all the - incoming traffic is blocked and outgoing traffic is allowed from the tiers. Default network - ACL cannot be removed or modified. Contents of the default Network ACL is: - - - - - - - - - - Rule - Protocol - Traffic type - Action - CIDR - - - - - 1 - All - Ingress - Deny - 0.0.0.0/0 - - - 2 - All - Egress - Deny - 0.0.0.0/0 - - - - -
-
- Creating ACL Lists - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Network ACL Lists. - The following default rules are displayed in the Network ACLs page: default_allow, - default_deny. - - - Click Add ACL Lists, and specify the following: - - - ACL List Name: A name for the ACL list. - - - Description: A short description of the ACL list - that can be displayed to users. - - - - -
-
- Creating an ACL Rule - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC. - - - Select Network ACL Lists. - In addition to the custom ACL lists you have created, the following default rules are - displayed in the Network ACLs page: default_allow, default_deny. - - - Select the desired ACL list. - - - Select the ACL List Rules tab. - To add an ACL rule, fill in the following fields to specify what kind of network - traffic is allowed in the VPC. - - - Rule Number: The order in which the rules are - evaluated. - - - CIDR: The CIDR acts as the Source CIDR for the - Ingress rules, and Destination CIDR for the Egress rules. To accept traffic only from - or to the IP addresses within a particular address block, enter a CIDR or a - comma-separated list of CIDRs. The CIDR is the base IP address of the incoming - traffic. For example, 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0. - - - Action: What action to be taken. Allow traffic or - block. - - - Protocol: The networking protocol that sources - use to send traffic to the tier. The TCP and UDP protocols are typically used for data - exchange and end-user communications. The ICMP protocol is typically used to send - error messages or network monitoring data. All supports all the traffic. Other option - is Protocol Number. - - - Start Port, End - Port (TCP, UDP only): A range of listening ports that are the destination - for the incoming traffic. If you are opening a single port, use the same number in - both fields. - - - Protocol Number: The protocol number associated - with IPv4 or IPv6. For more information, see Protocol - Numbers. - - - ICMP Type, ICMP - Code (ICMP only): The type of message and error code that will be - sent. - - - Traffic Type: The type of traffic: Incoming or - outgoing. - - - - - Click Add. The ACL rule is added. - You can edit the tags assigned to the ACL rules and delete the ACL rules you have - created. Click the appropriate button in the Details tab. - - -
-
- Creating a Tier with Custom ACL List - - - Create a VPC. - - - Create a custom ACL list. - - - Add ACL rules to the ACL list. - - - Create a tier in the VPC. - Select the desired ACL list while creating a tier. - - - Click OK. - - -
-
- Assigning a Custom ACL List to a Tier - - - Create a VPC. - - - Create a tier in the VPC. - - - Associate the tier with the default ACL rule. - - - Create a custom ACL list. - - - Add ACL rules to the ACL list. - - - Select the tier for which you want to assign the custom ACL. - - - Click the Replace ACL List icon. - - - - - replace-acl-icon.png: button to replace an ACL list - - - The Replace ACL List dialog is displayed. - - - Select the desired ACL list. - - - Click OK. - - -
-
diff --git a/docs/en-US/configure-guest-traffic-in-advanced-zone.xml b/docs/en-US/configure-guest-traffic-in-advanced-zone.xml deleted file mode 100644 index fb6685091a5..00000000000 --- a/docs/en-US/configure-guest-traffic-in-advanced-zone.xml +++ /dev/null @@ -1,79 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Configure Guest Traffic in an Advanced Zone - These steps assume you have already logged in to the &PRODUCT; UI. To configure the base - guest network: - - - In the left navigation, choose Infrastructure. On Zones, click View More, then click the - zone to which you want to add a network. - - - Click the Network tab. - - - Click Add guest network. - The Add guest network window is displayed: - - - - - - networksetupzone.png: Depicts network setup in a single zone - - - - - Provide the following information: - - - Name. The name of the network. This will be - user-visible - - - Display Text: The description of the network. This - will be user-visible - - - Zone: The zone in which you are configuring the - guest network. - - - Network offering: If the administrator has - configured multiple network offerings, select the one you want to use for this - network - - - Guest Gateway: The gateway that the guests should - use - - - Guest Netmask: The netmask in use on the subnet the - guests will use - - - - - Click OK. - - -
\ No newline at end of file diff --git a/docs/en-US/configure-package-repository.xml b/docs/en-US/configure-package-repository.xml deleted file mode 100644 index cda46773f53..00000000000 --- a/docs/en-US/configure-package-repository.xml +++ /dev/null @@ -1,69 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configure package repository - &PRODUCT; is only distributed from source from the official mirrors. - However, members of the CloudStack community may build convenience binaries - so that users can install Apache CloudStack without needing to build from - source. - - - If you didn't follow the steps to build your own packages from source - in the sections for or - you may find pre-built - DEB and RPM packages for your convenience linked from the - downloads - page. - - - These repositories contain both the Management Server and KVM Hypervisor packages. - -
- DEB package repository - You can add a DEB package repository to your apt sources with the following commands. Please note that only packages for Ubuntu 12.04 LTS (precise) are being built at this time. - Use your preferred editor and open (or create) /etc/apt/sources.list.d/cloudstack.list. Add the community provided repository to the file: -deb http://cloudstack.apt-get.eu/ubuntu precise 4.1 - We now have to add the public key to the trusted keys. - $ wget -O - http://cloudstack.apt-get.eu/release.asc|apt-key add - - Now update your local apt cache. - $ apt-get update - Your DEB package repository should now be configured and ready for use. -
-
- RPM package repository - There is a RPM package repository for &PRODUCT; so you can easily install on RHEL based platforms. - If you're using an RPM-based system, you'll want to add the Yum repository so that you can install &PRODUCT; with Yum. - Yum repository information is found under /etc/yum.repos.d. You'll see several .repo files in this directory, each one denoting a specific repository. - To add the &PRODUCT; repository, create /etc/yum.repos.d/cloudstack.repo and insert the following information. - -[cloudstack] -name=cloudstack -baseurl=http://cloudstack.apt-get.eu/rhel/4.1/ -enabled=1 -gpgcheck=0 - - Now you should be able to install CloudStack using Yum. -
-
diff --git a/docs/en-US/configure-public-traffic-in-an-advanced-zone.xml b/docs/en-US/configure-public-traffic-in-an-advanced-zone.xml deleted file mode 100644 index 7a61cd380af..00000000000 --- a/docs/en-US/configure-public-traffic-in-an-advanced-zone.xml +++ /dev/null @@ -1,25 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Configure Public Traffic in an Advanced Zone - In a zone that uses advanced networking, you need to configure at least one range of IP - addresses for Internet traffic. -
\ No newline at end of file diff --git a/docs/en-US/configure-snmp-rhel.xml b/docs/en-US/configure-snmp-rhel.xml deleted file mode 100644 index bd227ff8ed5..00000000000 --- a/docs/en-US/configure-snmp-rhel.xml +++ /dev/null @@ -1,86 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Configuring SNMP Community String on a RHEL Server - The SNMP Community string is similar to a user id or password that provides access to a - network device, such as router. This string is sent along with all SNMP requests. If the - community string is correct, the device responds with the requested information. If the - community string is incorrect, the device discards the request and does not respond. - The NetScaler device uses SNMP to communicate with the VMs. You must install SNMP and - configure SNMP Community string for a secure communication between the NetScaler device and the - RHEL machine. - - - Ensure that you installed SNMP on RedHat. If not, run the following command: - yum install net-snmp-utils - - - Edit the /etc/snmp/snmpd.conf file to allow the SNMP polling from the NetScaler - device. - - - Map the community name into a security name (local and mynetwork, depending on where - the request is coming from): - - Use a strong password instead of public when you edit the following table. - - # sec.name source community -com2sec local localhost public -com2sec mynetwork 0.0.0.0 public - - Setting to 0.0.0.0 allows all IPs to poll the NetScaler server. - - - - Map the security names into group names: - # group.name sec.model sec.name -group MyRWGroup v1 local -group MyRWGroup v2c local -group MyROGroup v1 mynetwork -group MyROGroup v2c mynetwork - - - Create a view to allow the groups to have the permission to: - incl/excl subtree mask view all included .1 - - - Grant access with different write permissions to the two groups to the view you - created. - # context sec.model sec.level prefix read write notif - access MyROGroup "" any noauth exact all none none - access MyRWGroup "" any noauth exact all all all - - - - - Unblock SNMP in iptables. - iptables -A INPUT -p udp --dport 161 -j ACCEPT - - - Start the SNMP service: - service snmpd start - - - Ensure that the SNMP service is started automatically during the system startup: - chkconfig snmpd on - - -
diff --git a/docs/en-US/configure-usage-server.xml b/docs/en-US/configure-usage-server.xml deleted file mode 100644 index 83bed07b349..00000000000 --- a/docs/en-US/configure-usage-server.xml +++ /dev/null @@ -1,104 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring the Usage Server - To configure the usage server: - - Be sure the Usage Server has been installed. This requires extra steps beyond just installing the &PRODUCT; software. See Installing the Usage Server (Optional) in the Advanced Installation Guide. - Log in to the &PRODUCT; UI as administrator. - Click Global Settings. - In Search, type usage. Find the configuration parameter that controls the behavior you want to set. See the table below for a description of the available parameters. - In Actions, click the Edit icon. - Type the desired value and click the Save icon. - Restart the Management Server (as usual with any global configuration change) and also the Usage Server: - # service cloudstack-management restart -# service cloudstack-usage restart - - - The following table shows the global configuration settings that control the behavior of the Usage Server. - - - - - Parameter Name - Description - - - - - enable.usage.server - Whether the Usage Server is active. - - - usage.aggregation.timezone - Time zone of usage records. Set this if the usage records and daily job execution are in different time zones. For example, with the following settings, the usage job will run at PST 00:15 and generate usage records for the 24 hours from 00:00:00 GMT to 23:59:59 GMT: - usage.stats.job.exec.time = 00:15 -usage.execution.timezone = PST -usage.aggregation.timezone = GMT - - Valid values for the time zone are specified in - Default: GMT - - - - usage.execution.timezone - The time zone of usage.stats.job.exec.time. Valid values for the time zone are specified in - Default: The time zone of the management server. - - - - usage.sanity.check.interval - The number of days between sanity checks. Set this in order to periodically search for records with erroneous data before issuing customer invoices. For example, this checks for VM usage records created after the VM was destroyed, and similar checks for templates, volumes, and so on. It also checks for usage times longer than the aggregation range. If any issue is found, the alert ALERT_TYPE_USAGE_SANITY_RESULT = 21 is sent. - - - usage.stats.job.aggregation.range - The time period in minutes between Usage Server processing jobs. For example, if you set it to 1440, the Usage Server will run once per day. If you set it to 600, it will run every ten hours. In general, when a Usage Server job runs, it processes all events generated since usage was last run. - There is special handling for the case of 1440 (once per day). In this case the Usage Server does not necessarily process all records since Usage was last run. &PRODUCT; assumes that you require processing once per day for the previous, complete day’s records. For example, if the current day is October 7, then it is assumed you would like to process records for October 6, from midnight to midnight. &PRODUCT; assumes this “midnight to midnight†is relative to the usage.execution.timezone. - Default: 1440 - - - - usage.stats.job.exec.time - The time when the Usage Server processing will start. It is specified in 24-hour format (HH:MM) in the time zone of the server, which should be GMT. For example, to start the Usage job at 10:30 GMT, enter “10:30â€. - If usage.stats.job.aggregation.range is also set, and its value is not 1440, then its value will be added to usage.stats.job.exec.time to get the time to run the Usage Server job again. This is repeated until 24 hours have elapsed, and the next day's processing begins again at usage.stats.job.exec.time. - Default: 00:15. - - - - - - For example, suppose that your server is in GMT, your user population is predominantly in the East Coast of the United States, and you would like to process usage records every night at 2 AM local (EST) time. Choose these settings: - - enable.usage.server = true - usage.execution.timezone = America/New_York - usage.stats.job.exec.time = 07:00. This will run the Usage job at 2:00 AM EST. Note that this will shift by an hour as the East Coast of the U.S. enters and exits Daylight Savings Time. - usage.stats.job.aggregation.range = 1440 - - With this configuration, the Usage job will run every night at 2 AM EST and will process records for the previous day’s midnight-midnight as defined by the EST (America/New_York) time zone. - Because the special value 1440 has been used for usage.stats.job.aggregation.range, the Usage - Server will ignore the data between midnight and 2 AM. That data will be included in the - next day's run. - - -
diff --git a/docs/en-US/configure-virtual-router.xml b/docs/en-US/configure-virtual-router.xml deleted file mode 100644 index 8740c0cef8b..00000000000 --- a/docs/en-US/configure-virtual-router.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configuring the Virtual Router - You can set the following: - - IP range - Supported network services - Default domain name for the network serviced by the virtual router - Gateway IP address - How often &PRODUCT; fetches network usage statistics from &PRODUCT; virtual routers. If you want to collect traffic metering data from the virtual router, set the global configuration parameter router.stats.interval. If you are not using the virtual router to gather network usage statistics, set it to 0. - - -
diff --git a/docs/en-US/configure-vpc.xml b/docs/en-US/configure-vpc.xml deleted file mode 100644 index e0e2ee93f19..00000000000 --- a/docs/en-US/configure-vpc.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Configuring a Virtual Private Cloud - - - - - - - - - - - - - - -
diff --git a/docs/en-US/configure-vpn.xml b/docs/en-US/configure-vpn.xml deleted file mode 100644 index f389f30efc3..00000000000 --- a/docs/en-US/configure-vpn.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring Remote Access VPN - To set up VPN for the cloud: - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation, click Global Settings. - Set the following global configuration parameters. - - remote.access.vpn.client.ip.range – The range of IP addresses to be allocated to remote access VPN clients. The first IP in the range is used by the VPN server. - remote.access.vpn.psk.length – Length of the IPSec key. - remote.access.vpn.user.limit – Maximum number of VPN users per account. - - To enable VPN for a particular network: - - Log in as a user or administrator to the &PRODUCT; UI. - In the left navigation, click Network. - Click the name of the network you want to work with. - Click View IP Addresses. - Click one of the displayed IP address names. - Click the Enable VPN button. - - - - - AttachDiskButton.png: button to attach a volume - - - The IPsec key is displayed in a popup window. - -
diff --git a/docs/en-US/configure-xenserver-dom0-memory.xml b/docs/en-US/configure-xenserver-dom0-memory.xml deleted file mode 100644 index 0a02d3e3818..00000000000 --- a/docs/en-US/configure-xenserver-dom0-memory.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configure XenServer dom0 Memory - Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see Citrix Knowledgebase Article.The article refers to XenServer 5.6, but the same information applies to XenServer 6 -
- diff --git a/docs/en-US/configuring-projects.xml b/docs/en-US/configuring-projects.xml deleted file mode 100644 index af1fc5323e3..00000000000 --- a/docs/en-US/configuring-projects.xml +++ /dev/null @@ -1,32 +0,0 @@ - - - -%BOOK_ENTITIES; -]> -
- Configuring Projects - Before &PRODUCT; users start using projects, the &PRODUCT; administrator must set - up various systems to support them, including membership invitations, limits on project - resources, and controls on who can create projects. - - - -
- diff --git a/docs/en-US/console-proxy.xml b/docs/en-US/console-proxy.xml deleted file mode 100644 index 5f9a82027d2..00000000000 --- a/docs/en-US/console-proxy.xml +++ /dev/null @@ -1,140 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Console Proxy - The Console Proxy is a type of System Virtual Machine that has a role in presenting a - console view via the web UI. It connects the user’s browser to the VNC port made available via - the hypervisor for the console of the guest. Both the administrator and end user web UIs offer a - console connection. - Clicking a console icon brings up a new window. The AJAX code downloaded into that window - refers to the public IP address of a console proxy VM. There is exactly one public IP address - allocated per console proxy VM. The AJAX application connects to this IP. The console proxy then - proxies the connection to the VNC port for the requested VM on the Host hosting the - guest. - - The hypervisors will have many ports assigned to VNC usage so that multiple VNC sessions - can occur simultaneously. - - There is never any traffic to the guest virtual IP, and there is no need to enable VNC - within the guest. - The console proxy VM will periodically report its active session count to the Management - Server. The default reporting interval is five seconds. This can be changed through standard - Management Server configuration with the parameter consoleproxy.loadscan.interval. - Assignment of guest VM to console proxy is determined by first determining if the guest VM - has a previous session associated with a console proxy. If it does, the Management Server will - assign the guest VM to the target Console Proxy VM regardless of the load on the proxy VM. - Failing that, the first available running Console Proxy VM that has the capacity to handle new - sessions is used. - Console proxies can be restarted by administrators but this will interrupt existing console - sessions for users. -
- Using a SSL Certificate for the Console Proxy - The console viewing functionality uses a dynamic DNS service under the domain name - realhostip.com to assist in providing SSL security to console sessions. The console proxy is - assigned a public IP address. In order to avoid browser warnings for mismatched SSL - certificates, the URL for the new console window is set to the form of - https://aaa-bbb-ccc-ddd.realhostip.com. You will see this URL during console session creation. - &PRODUCT; includes the realhostip.com SSL certificate in the console proxy VM. Of course, - &PRODUCT; cannot know about the DNS A records for our customers' public IPs prior to shipping - the software. &PRODUCT; therefore runs a dynamic DNS server that is authoritative for the - realhostip.com domain. It maps the aaa-bbb-ccc-ddd part of the DNS name to the IP address - aaa.bbb.ccc.ddd on lookups. This allows the browser to correctly connect to the console - proxy's public IP, where it then expects and receives a SSL certificate for realhostip.com, - and SSL is set up without browser warnings. -
-
- Changing the Console Proxy SSL Certificate and Domain - If the administrator prefers, it is possible for the URL of the customer's console session - to show a domain other than realhostip.com. The administrator can customize the displayed - domain by selecting a different domain and uploading a new SSL certificate and private key. - The domain must run a DNS service that is capable of resolving queries for addresses of the - form aaa-bbb-ccc-ddd.your.domain to an IPv4 IP address in the form aaa.bbb.ccc.ddd, for - example, 202.8.44.1. To change the console proxy domain, SSL certificate, and private - key: - - - Set up dynamic name resolution or populate all possible DNS names in your public IP - range into your existing DNS server with the format aaa-bbb-ccc-ddd.company.com -> - aaa.bbb.ccc.ddd. - - - Generate the private key and certificate signing request (CSR). When you are using - openssl to generate private/public key pairs and CSRs, for the private key that you are - going to paste into the &PRODUCT; UI, be sure to convert it into PKCS#8 format. - - - Generate a new 2048-bit private key - openssl genrsa -des3 -out yourprivate.key 2048 - - - Generate a new certificate CSR - openssl req -new -key yourprivate.key -out yourcertificate.csr - - - Head to the website of your favorite trusted Certificate Authority, purchase an - SSL certificate, and submit the CSR. You should receive a valid certificate in - return - - - Convert your private key format into PKCS#8 encrypted format. - openssl pkcs8 -topk8 -in yourprivate.key -out yourprivate.pkcs8.encrypted.key - - - Convert your PKCS#8 encrypted private key into the PKCS#8 format that is compliant - with &PRODUCT; - openssl pkcs8 -in yourprivate.pkcs8.encrypted.key -out yourprivate.pkcs8.key - - - - - In the Update SSL Certificate screen of the &PRODUCT; UI, paste the following: - - - The certificate you've just generated. - - - The private key you've just generated. - - - The desired new domain name; for example, company.com - - - - - - - - updatessl.png: Updating Console Proxy SSL Certificate - - - - - The desired new domain name; for example, company.com - This stops all currently running console proxy VMs, then restarts them with the new - certificate and key. Users might notice a brief interruption in console - availability. - - - The Management Server generates URLs of the form "aaa-bbb-ccc-ddd.company.com" after this - change is made. The new console requests will be served with the new DNS domain name, - certificate, and key. -
-
diff --git a/docs/en-US/convert-hyperv-vm-to-template.xml b/docs/en-US/convert-hyperv-vm-to-template.xml deleted file mode 100644 index df388234d1f..00000000000 --- a/docs/en-US/convert-hyperv-vm-to-template.xml +++ /dev/null @@ -1,69 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Converting a Hyper-V VM to a Template - To convert a Hyper-V VM to a XenServer-compatible &PRODUCT; template, you will need a standalone XenServer host with an attached NFS VHD SR. Use whatever XenServer version you are using with &PRODUCT;, but use XenCenter 5.6 FP1 or SP2 (it is backwards compatible to 5.6). Additionally, it may help to have an attached NFS ISO SR. - For Linux VMs, you may need to do some preparation in Hyper-V before trying to get the VM to work in XenServer. Clone the VM and work on the clone if you still want to use the VM in Hyper-V. Uninstall Hyper-V Integration Components and check for any references to device names in /etc/fstab: - - From the linux_ic/drivers/dist directory, run make uninstall (where "linux_ic" is the path to the copied Hyper-V Integration Components files). - Restore the original initrd from backup in /boot/ (the backup is named *.backup0). - Remove the "hdX=noprobe" entries from /boot/grub/menu.lst. - Check /etc/fstab for any partitions mounted by device name. Change those entries (if any) to - mount by LABEL or UUID. You can get that information with the blkid command. - - The next step is make sure the VM is not running in Hyper-V, then get the VHD into XenServer. There are two options for doing this. - Option one: - - Import the VHD using XenCenter. In XenCenter, go to Tools>Virtual Appliance Tools>Disk Image Import. - Choose the VHD, then click Next. - Name the VM, choose the NFS VHD SR under Storage, enable "Run Operating System Fixups" and choose the NFS ISO SR. - Click Next, then Finish. A VM should be created. - - Option two: - - Run XenConvert, under From choose VHD, under To choose XenServer. Click Next. - Choose the VHD, then click Next. - Input the XenServer host info, then click Next. - Name the VM, then click Next, then Convert. A VM should be created. - - Once you have a VM created from the Hyper-V VHD, prepare it using the following steps: - - Boot the VM, uninstall Hyper-V Integration Services, and reboot. - Install XenServer Tools, then reboot. - Prepare the VM as desired. For example, run sysprep on Windows VMs. See . - - Either option above will create a VM in HVM mode. This is fine for Windows VMs, but Linux VMs may not perform optimally. Converting a Linux VM to PV mode will require additional steps and will vary by distribution. - - Shut down the VM and copy the VHD from the NFS storage to a web server; for example, mount the NFS share on the web server and copy it, or from the XenServer host use sftp or scp to upload it to the web server. - In &PRODUCT;, create a new template using the following values: - - URL. Give the URL for the VHD - OS Type. Use the appropriate OS. For PV mode on CentOS, choose Other PV (32-bit) or Other PV (64-bit). This choice is available only for XenServer. - Hypervisor. XenServer - Format. VHD - - - - The template will be created, and you can create instances from it. -
diff --git a/docs/en-US/create-bare-metal-template.xml b/docs/en-US/create-bare-metal-template.xml deleted file mode 100644 index 0ee4c11fead..00000000000 --- a/docs/en-US/create-bare-metal-template.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Creating a Bare Metal Template - Before you can create a bare metal template, you must have performed several other installation and setup steps to create a bare metal cluster and environment. See Bare Metal Installation in the Installation Guide. It is assumed you already have a directory named "win7_64bit" on your CIFS server, containing the image for the bare metal instance. This directory and image are set up as part of the Bare Metal Installation procedure. - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation bar, click Templates. - Click Create Template. - In the dialog box, enter the following values. - - Name. Short name for the template. - Display Text. Description of the template. - URL. The directory name which contains image file on your CIFS server. For example, win7_64bit. - Zone. All Zones. - OS Type. Select the OS type of the ISO image. Choose other if the OS Type of the ISO is not listed or if the ISO is not bootable. - Hypervisor. BareMetal. - Format. BareMetal. - Password Enabled. No. - Public. No. - Featured. Choose Yes if you would like this template to be more prominent for users to select. Only administrators may make templates featured. - -
diff --git a/docs/en-US/create-linux-template.xml b/docs/en-US/create-linux-template.xml deleted file mode 100755 index 156a0acf613..00000000000 --- a/docs/en-US/create-linux-template.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - -
- Creating a Linux Template - Linux templates should be prepared using this documentation in order to prepare your linux VMs for template deployment. For ease of documentation, the VM which you are configuring the template on will be referred to as "Template Master". This guide currently covers legacy setups which do not take advantage of UserData and cloud-init and assumes openssh-server is installed during installation. - - - An overview of the procedure is as follow: - - Upload your Linux ISO. For more information, see . - Create a VM Instance with this ISO. For more information, see . - Prepare the Linux VM - Create a template from the VM. For more information, see . - - - -
- diff --git a/docs/en-US/create-new-projects.xml b/docs/en-US/create-new-projects.xml deleted file mode 100644 index 7696c9ee00f..00000000000 --- a/docs/en-US/create-new-projects.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Creating a New Project - &PRODUCT; administrators and domain administrators can create projects. If the global configuration parameter allow.user.create.projects is set to true, end users can also create projects. - - Log in as administrator to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select view, click Projects. - Click New Project. - Give the project a name and description for display to users, then click Create Project. - A screen appears where you can immediately add more members to the project. This is optional. Click Next when you are ready to move on. - Click Save. - -
diff --git a/docs/en-US/create-template-from-existing-vm.xml b/docs/en-US/create-template-from-existing-vm.xml deleted file mode 100644 index 35788fdfcc1..00000000000 --- a/docs/en-US/create-template-from-existing-vm.xml +++ /dev/null @@ -1,56 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating a Template from an Existing Virtual Machine - Once you have at least one VM set up in the way you want, you can use it as the prototype for other VMs. - - Create and start a virtual machine using any of the techniques given in . - Make any desired configuration changes on the running VM, then click Stop. - Wait for the VM to stop. When the status shows Stopped, go to the next step. - Click Create Template and provide the following: - - Name and Display Text. These will be shown in the UI, so - choose something descriptive. - OS Type. This helps &PRODUCT; and the hypervisor perform - certain operations and make assumptions that improve the performance of the - guest. Select one of the following. - - If the operating system of the stopped VM is listed, choose it. - If the OS type of the stopped VM is not listed, choose Other. - If you want to boot from this template in PV mode, choose Other PV (32-bit) or Other PV (64-bit). This choice is available only for XenServere: - Note: Generally you should not choose an older version of the OS than the version in the image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will in general not work. In those cases you should choose Other. - - - Public. Choose Yes to make this template accessible to all - users of this &PRODUCT; installation. The template will appear in the - Community Templates list. See . - Password Enabled. Choose Yes if your template has the - &PRODUCT; password change script installed. See . - - Click Add. - - The new template will be visible in the Templates section when the template creation process - has been completed. The template is then available when creating a new VM. -
diff --git a/docs/en-US/create-template-from-snapshot.xml b/docs/en-US/create-template-from-snapshot.xml deleted file mode 100644 index d9684226671..00000000000 --- a/docs/en-US/create-template-from-snapshot.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating a Template from a Snapshot - - If you do not want to stop the VM in order to use the Create Template menu item (as described in ), you can create a template directly from any snapshot through the &PRODUCT; UI. -
diff --git a/docs/en-US/create-templates-overview.xml b/docs/en-US/create-templates-overview.xml deleted file mode 100644 index 900165f482f..00000000000 --- a/docs/en-US/create-templates-overview.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating Templates: Overview - &PRODUCT; ships with a default template for the CentOS operating system. There are a variety of ways to add more templates. Administrators and end users can add templates. The typical sequence of events is: - - Launch a VM instance that has the operating system you want. Make any other desired configuration changes to the VM. - Stop the VM. - Convert the volume into a template. - - There are other ways to add templates to &PRODUCT;. For example, you can take a snapshot - of the VM's volume and create a template from the snapshot, or import a VHD from another - system into &PRODUCT;. - The various techniques for creating templates are described in the next few sections. - -
diff --git a/docs/en-US/create-vpn-connection-vpc.xml b/docs/en-US/create-vpn-connection-vpc.xml deleted file mode 100644 index 88a058c9f89..00000000000 --- a/docs/en-US/create-vpn-connection-vpc.xml +++ /dev/null @@ -1,122 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Creating a VPN Connection - &PRODUCT; supports creating up to 8 VPN connections. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you create for the account are listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - - - Click the Settings icon. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Site-to-Site VPN. - The Site-to-Site VPN page is displayed. - - - From the Select View drop-down, ensure that VPN Connection is selected. - - - Click Create VPN Connection. - The Create VPN Connection dialog is displayed: - - - - - - createvpnconnection.png: creating a vpn connection to the customer - gateway. - - - - - Select the desired customer gateway, then click OK to confirm. - Within a few moments, the VPN Connection is displayed. - The following information on the VPN connection is displayed: - - - IP Address - - - Gateway - - - State - - - IPSec Preshared Key - - - IKE Policy - - - ESP Policy - - - - -
diff --git a/docs/en-US/create-vpn-customer-gateway.xml b/docs/en-US/create-vpn-customer-gateway.xml deleted file mode 100644 index 8bcd488160c..00000000000 --- a/docs/en-US/create-vpn-customer-gateway.xml +++ /dev/null @@ -1,191 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Creating and Updating a VPN Customer Gateway - - A VPN customer gateway can be connected to only one VPN gateway at a time. - - To add a VPN Customer Gateway: - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPN Customer Gateway. - - - Click Add site-to-site VPN. - - - - - - addvpncustomergateway.png: adding a customer gateway. - - - Provide the following information: - - - Name: A unique name for the VPN customer gateway - you create. - - - Gateway: The IP address for the remote - gateway. - - - CIDR list: The guest CIDR list of the remote - subnets. Enter a CIDR or a comma-separated list of CIDRs. Ensure that a guest CIDR list - is not overlapped with the VPC’s CIDR, or another guest CIDR. The CIDR must be - RFC1918-compliant. - - - IPsec Preshared Key: Preshared keying is a method - where the endpoints of the VPN share a secret key. This key value is used to - authenticate the customer gateway and the VPC VPN gateway to each other. - - The IKE peers (VPN end points) authenticate each other by computing and sending a - keyed hash of data that includes the Preshared key. If the receiving peer is able to - create the same hash independently by using its Preshared key, it knows that both - peers must share the same secret, thus authenticating the customer gateway. - - - - IKE Encryption: The Internet Key Exchange (IKE) - policy for phase-1. The supported encryption algorithms are AES128, AES192, AES256, and - 3DES. Authentication is accomplished through the Preshared Keys. - - The phase-1 is the first phase in the IKE process. In this initial negotiation - phase, the two VPN endpoints agree on the methods to be used to provide security for - the underlying IP traffic. The phase-1 authenticates the two VPN gateways to each - other, by confirming that the remote gateway has a matching Preshared Key. - - - - IKE Hash: The IKE hash for phase-1. The supported - hash algorithms are SHA1 and MD5. - - - IKE DH: A public-key cryptography protocol which - allows two parties to establish a shared secret over an insecure communications channel. - The 1536-bit Diffie-Hellman group is used within IKE to establish session keys. The - supported options are None, Group-5 (1536-bit) and Group-2 (1024-bit). - - - ESP Encryption: Encapsulating Security Payload - (ESP) algorithm within phase-2. The supported encryption algorithms are AES128, AES192, - AES256, and 3DES. - - The phase-2 is the second phase in the IKE process. The purpose of IKE phase-2 is - to negotiate IPSec security associations (SA) to set up the IPSec tunnel. In phase-2, - new keying material is extracted from the Diffie-Hellman key exchange in phase-1, to - provide session keys to use in protecting the VPN data flow. - - - - ESP Hash: Encapsulating Security Payload (ESP) hash - for phase-2. Supported hash algorithms are SHA1 and MD5. - - - Perfect Forward Secrecy: Perfect Forward Secrecy - (or PFS) is the property that ensures that a session key derived from a set of long-term - public and private keys will not be compromised. This property enforces a new - Diffie-Hellman key exchange. It provides the keying material that has greater key - material life and thereby greater resistance to cryptographic attacks. The available - options are None, Group-5 (1536-bit) and Group-2 (1024-bit). The security of the key - exchanges increase as the DH groups grow larger, as does the time of the - exchanges. - - When PFS is turned on, for every negotiation of a new phase-2 SA the two gateways - must generate a new set of phase-1 keys. This adds an extra layer of protection that - PFS adds, which ensures if the phase-2 SA’s have expired, the keys used for new - phase-2 SA’s have not been generated from the current phase-1 keying material. - - - - IKE Lifetime (seconds): The phase-1 lifetime of the - security association in seconds. Default is 86400 seconds (1 day). Whenever the time - expires, a new phase-1 exchange is performed. - - - ESP Lifetime (seconds): The phase-2 lifetime of the - security association in seconds. Default is 3600 seconds (1 hour). Whenever the value is - exceeded, a re-key is initiated to provide a new IPsec encryption and authentication - session keys. - - - Dead Peer Detection: A method to detect an - unavailable Internet Key Exchange (IKE) peer. Select this option if you want the virtual - router to query the liveliness of its IKE peer at regular intervals. It’s recommended to - have the same configuration of DPD on both side of VPN connection. - - - - - Click OK. - - - - Updating and Removing a VPN Customer Gateway - You can update a customer gateway either with no VPN connection, or related VPN connection - is in error state. - - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPN Customer Gateway. - - - Select the VPN customer gateway you want to work with. - - - To modify the required parameters, click the Edit VPN Customer Gateway button - - - - - edit.png: button to edit a VPN customer gateway - - - - - To remove the VPN customer gateway, click the Delete VPN Customer Gateway button - - - - - delete.png: button to remove a VPN customer gateway - - - - - Click OK. - - -
diff --git a/docs/en-US/create-vpn-gateway-for-vpc.xml b/docs/en-US/create-vpn-gateway-for-vpc.xml deleted file mode 100644 index 0f8a0dcc03b..00000000000 --- a/docs/en-US/create-vpn-gateway-for-vpc.xml +++ /dev/null @@ -1,98 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Creating a VPN gateway for the VPC - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - - - Click the Settings icon. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Site-to-Site VPN. - If you are creating the VPN gateway for the first time, selecting Site-to-Site VPN - prompts you to create a VPN gateway. - - - In the confirmation dialog, click Yes to confirm. - Within a few moments, the VPN gateway is created. You will be prompted to view the - details of the VPN gateway you have created. Click Yes to confirm. - The following details are displayed in the VPN Gateway page: - - - IP Address - - - Account - - - Domain - - - - -
diff --git a/docs/en-US/create-vr-network-offering.xml b/docs/en-US/create-vr-network-offering.xml deleted file mode 100644 index 317e3c200a1..00000000000 --- a/docs/en-US/create-vr-network-offering.xml +++ /dev/null @@ -1,108 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Creating and Changing a Virtual Router Network Offering - To create the network offering in association with a virtual router system service - offering: - - - Log in to the &PRODUCT; UI as a user or admin. - - - First, create a system service offering, for example: VRsystemofferingHA. - For more information on creating a system service offering, see . - - - From the Select Offering drop-down, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make the following choices: - - - Name. Any desired name for the network - offering. - - - Description. A short description of the offering - that can be displayed to users. - - - Network Rate. Allowed data transfer rate in MB per - second. - - - Traffic Type. The type of network traffic that will - be carried on the network. - - - Guest Type. Choose whether the guest network is - isolated or shared. For a description of these terms, see . - - - Specify VLAN. (Isolated guest networks only) - Indicate whether a VLAN should be specified when this offering is used. - - - Supported Services. Select one or more of the - possible network services. For some services, you must also choose the service provider; - for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or - any other load balancers that have been configured in the cloud. Depending on which - services you choose, additional fields may appear in the rest of the dialog box. For - more information, see - - - System Offering. Choose the system service offering - that you want virtual routers to use in this network. In this case, the default “System - Offering For Software Router†and the custom “VRsystemofferingHA†are available and - displayed. - - - - - Click OK and the network offering is created. - - - To change the network offering of a guest network to the virtual router service - offering: - - - Select Network from the left navigation pane. - - - Select the guest network that you want to offer this network service to. - - - Click the Edit button. - - - From the Network Offering drop-down, select the virtual router network offering you have - just created. - - - Click OK. - - -
diff --git a/docs/en-US/create-windows-template.xml b/docs/en-US/create-windows-template.xml deleted file mode 100644 index d02f0678444..00000000000 --- a/docs/en-US/create-windows-template.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Creating a Windows Template - Windows templates must be prepared with Sysprep before they can be provisioned on multiple machines. Sysprep allows you to create a generic Windows template and avoid any possible SID conflicts. - (XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in the template or added after the VM is created. The PV drivers are necessary for essential management functions such as mounting additional volumes and ISO images, live migration, and graceful shutdown. - - - An overview of the procedure is as follows: - - Upload your Windows ISO. For more information, see . - Create a VM Instance with this ISO. For more information, see . - Follow the steps in Sysprep for Windows Server 2008 R2 (below) or Sysprep for Windows Server 2003 R2, depending on your version of Windows Server - The preparation steps are complete. Now you can actually create the template as described in Creating the Windows Template. - - - -
diff --git a/docs/en-US/creating-a-plugin.xml b/docs/en-US/creating-a-plugin.xml deleted file mode 100644 index 448d4e6ea69..00000000000 --- a/docs/en-US/creating-a-plugin.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Plugin Development - This chapter will detail different elements related to the development of plugins within Cloudstack - - diff --git a/docs/en-US/creating-compute-offerings.xml b/docs/en-US/creating-compute-offerings.xml deleted file mode 100644 index 5c5033afabb..00000000000 --- a/docs/en-US/creating-compute-offerings.xml +++ /dev/null @@ -1,70 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating a New Compute Offering - To create a new compute offering: - - Log in with admin privileges to the &PRODUCT; UI. - In the left navigation bar, click Service Offerings. - In Select Offering, choose Compute Offering. - Click Add Compute Offering. - In the dialog, make the following choices: - - Name: Any desired name for the service offering. - Description: A short description of the offering that can be - displayed to users - Storage type: The type of disk that should be allocated. - Local allocates from storage attached directly to the host where the system - VM is running. Shared allocates from storage accessible via NFS. - # of CPU cores: The number of cores which should be allocated - to a system VM with this offering - CPU (in MHz): The CPU speed of the cores that the system VM - is allocated. For example, “2000†would provide for a 2 GHz clock. - Memory (in MB): The amount of memory in megabytes that the - system VM should be allocated. For example, “2048†would provide for a 2 GB - RAM allocation. - Network Rate: Allowed data transfer rate in MB per - second. - Offer HA: If yes, the administrator can choose to have the - system VM be monitored and as highly available as possible. - Storage Tags: The tags that should be associated with the - primary storage used by the system VM. - Host Tags: (Optional) Any tags that you use to organize your - hosts - CPU cap: Whether to limit the level of CPU usage even if - spare capacity is available. - isVolatile: If checked, VMs created from this service - offering will have their root disks reset upon reboot. This is useful for - secure environments that need a fresh start on every boot and for desktops - that should not retain state. - Public: Indicate whether the service offering should be - available all domains or only some domains. Choose Yes to make it available - to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; - will then prompt for the subdomain's name. - - Click Add. - - - -
diff --git a/docs/en-US/creating-disk-offerings.xml b/docs/en-US/creating-disk-offerings.xml deleted file mode 100644 index 627311e4418..00000000000 --- a/docs/en-US/creating-disk-offerings.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Creating a New Disk Offering - To create a new disk offering: - - Log in with admin privileges to the &PRODUCT; UI. - In the left navigation bar, click Service Offerings. - In Select Offering, choose Disk Offering. - Click Add Disk Offering. - In the dialog, make the following choices: - - Name. Any desired name for the disk offering. - Description. A short description of the offering that can be displayed to users - Custom Disk Size. If checked, the user can set their own disk size. If not checked, the root administrator must define a value in Disk Size. - Disk Size. Appears only if Custom Disk Size is not selected. Define the volume size in GB. - QoS Type. Three options: Empty (no Quality of Service), hypervisor (rate limiting enforced on the hypervisor side), and storage (guaranteed minimum and maximum IOPS enforced on the storage side). If leveraging QoS, make sure that the hypervisor or storage system supports this feature. - Custom IOPS. If checked, the user can set their own IOPS. If not checked, the root administrator can define values. If the root admin does not set values when using storage QoS, default values are used (the defauls can be overridden if the proper parameters are passed into &PRODUCT; when creating the primary storage in question). - Min IOPS. Appears only if storage QoS is to be used. Set a guaranteed minimum number of IOPS to be enforced on the storage side. - Max IOPS. Appears only if storage QoS is to be used. Set a maximum number of IOPS to be enforced on the storage side (the system may go above this limit in certain circumstances for short intervals). - (Optional)Storage Tags. The tags that should be associated with the primary storage for this disk. Tags are a comma separated list of attributes of the storage. For example "ssd,blue". Tags are also added on Primary Storage. &PRODUCT; matches tags on a disk offering to tags on the storage. If a tag is present on a disk offering that tag (or tags) must also be present on Primary Storage for the volume to be provisioned. If no such primary storage exists, allocation from the disk offering will fail.. - Public. Indicate whether the service offering should be available all domains or only some domains. Choose Yes to make it available to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; will then prompt for the subdomain's name. - - Click Add. - -
diff --git a/docs/en-US/creating-my-first-plugin.xml b/docs/en-US/creating-my-first-plugin.xml deleted file mode 100644 index 3809fd30335..00000000000 --- a/docs/en-US/creating-my-first-plugin.xml +++ /dev/null @@ -1,216 +0,0 @@ - - -
- Creating my first plugin - This is a brief walk through of creating a simple plugin that adds an additional command to the API to return the message "Hello World". -
- Letting Cloudstack know about the plugin - Before we can being we need to tell Cloudstack about the existance of our plugin. In order to do this we are required to edit some files related to the cloud-client-ui module - - - Navigate to the folder called client - - - Open pom.xml and add a dependency, this will look something like the following: - - client/pom.xml - <dependency> - <groupId>org.apache.cloudstack</groupId> - <artifactId>cloud-plugin-api-helloworld</artifactId> - <version>${project.version}</version> -</dependency> - - - - Continuing with client as your working directory open up tomcatconf/applicationContext.xml.in - - - Within this file we must insert a bean to load our class: - - client/tomcatconf/applicationContext.xml.in - <bean id="helloWorldImpl" class="org.apache.cloudstack.helloworld.HelloWorldImpl" /> - - - - Finally we need to register the additional API commands we add. Again with client as your working directory this is done by modifying tomcatconf/commands.properties.in - - - Within the file we simply add the names of the API commands we want to create followed by a permission number. 1 = admin, 2 = resource domain admin, 4 = domain admin, 8 = user. - - tomcatconf/commands.properties.in - helloWorld=8 - - - -
-
- Creating the plugin - Within the Cloudstack filing structure all plugins live under the plugins folder. Since the sample plugin for this document is going to be API related it will live in plugins/api/helloworld. Along with this we will need a standard maven package layout, so lets create all the required folders: - $ mkdir -p plugins/api/helloworld/{src,target,test} -$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/{api,helloworld} -$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/api/{command,response} -$ mkdir -p plugins/api/helloworld/src/org/apache/cloudstack/api/command/user/helloworld - With helloworld as our working directory we should have a tree layout like the following: - $ cd plugins/api/helloworld -$ tree -. -|-- src -| `-- org -| `-- apache -| `-- cloudstack -| |-- api -| | |-- command -| | | `-- user -| | | `-- helloworld -| | |-- response -| `-- helloworld -|-- target -`-- test - -12 directories, 0 files - First we will create a pom.xml for our plugin: - - plugins/api/helloworld/pom.xml - <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> - <modelVersion>4.0.0</modelVersion> - <artifactId>cloud-plugin-api-helloworld</artifactId> - <name>Apache CloudStack Plugin - Hello World Plugin</name> - <parent> - <groupId>org.apache.cloudstack</groupId> - <artifactId>cloudstack-plugins</artifactId> - <version>4.2.0-SNAPSHOT</version> - <relativePath>../../pom.xml</relativePath> - </parent> - <dependencies> - <dependency> - <groupId>org.apache.cloudstack</groupId> - <artifactId>cloud-api</artifactId> - <version>${project.version}</version> - </dependency> - <dependency> - <groupId>org.apache.cloudstack</groupId> - <artifactId>cloud-utils</artifactId> - <version>${project.version}</version> - </dependency> - </dependencies> - <build> - <defaultGoal>install</defaultGoal> - <sourceDirectory>src</sourceDirectory> - <testSourceDirectory>test</testSourceDirectory> - </build> -</project> - - Next we need to make the root plugin pom aware of our plugin to do this simply edit plugins/pom.xml inserting a line like the following: - ...... -<module>api/helloworld</module> -...... - Finally we will being to create code for your plugin. Create an interface called HelloWorld that will extend PluggableService within src/org/apache/cloudstack/hellowold - package org.apache.cloudstack.helloworld; - -import com.cloud.utils.component.PluggableService; - -public interface HelloWorld extends PluggableService { } - Create an implementation of HelloWorld called HelloWorldImpl: - package org.apache.cloudstack.helloworld; - -import org.apache.cloudstack.api.command.user.helloworld.HelloWorldCmd; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import javax.ejb.Local; -import java.util.*; - -@Component -@Local(value = HelloWorld.class) -public class HelloWorldImpl implements HelloWorld { - private static final Logger s_logger = Logger.getLogger(HelloWorldImpl.class); - - public HelloWorldImpl() { - super(); - } - /** - * This informs cloudstack of the API commands you are creating. - */ - @Override - public List<Class<?>> getCommands() { - List<Class<?>> cmdList = new ArrayList<Class<?>>(); - cmdList.add(HelloWorldCmd.class); - return cmdList; - } -} - Next we will create our API command navigate to src/org/apache/cloudstack/api/command/user/helloworld and open up HelloWorldCmd.java, create it as follows - package org.apache.cloudstack.api.command.user.helloworld; - -import org.apache.cloudstack.api.APICommand; -import org.apache.cloudstack.api.BaseCmd; -import org.apache.cloudstack.api.response.HelloWorldResponse; -import org.apache.log4j.Logger; - -// Note this name matches the name you inserted into client/tomcatconf/commands.properties.in -@APICommand(name = "helloWorld", responseObject = HelloWorldResponse.class, description = "Returns a hello world message", since = "4.2.0") -public class HelloWorldCmd extends BaseCmd { - public static final Logger s_logger = Logger.getLogger(HelloWorldCmd.class.getName()); - private static final String s_name = "helloworldresponse"; - - @Override - public void execute() - { - HelloWorldResponse response = new HelloWorldResponse(); - response.setObjectName("helloworld"); - response.setResponseName(getCommandName()); - this.setResponseObject(response); - } - - @Override - public String getCommandName() { - return s_name; - } - - @Override - public long getEntityOwnerId() { - return 0; - } -} - Finally we need to create our HelloWorldResponse class, this will exist within src/org/apache/cloudstack/api/response/ - package org.apache.cloudstack.api.response; - -import com.google.gson.annotations.SerializedName; -import org.apache.cloudstack.api.BaseResponse; -import com.cloud.serializer.Param; - -@SuppressWarnings("unused") -public class HelloWorldResponse extends BaseResponse { - @SerializedName("HelloWorld") @Param(description="HelloWorld Response") - private String HelloWorld; - - public HelloWorldResponse(){ - this.HelloWorld = "Hello World"; - } -} -
-
- Compiling your plugin: - Within the directory of your plugin i.e. plugins/api/helloworld run mvn clean install. - After this we need to recompile the client-cloud-ui to do this come back to the cloudstack base directory and execute mvn -pl client clean install -
-
- Starting Cloudstack and Testing: - Start up cloudstack with the normal mvn pl :client-cloud-ui jetty:run, wait a few moments for it to start up then head over to: localhost:8096/client/api?command=helloWorld and you should see your HelloWorld message. -
-
diff --git a/docs/en-US/creating-network-offerings.xml b/docs/en-US/creating-network-offerings.xml deleted file mode 100644 index 4f75781c3cb..00000000000 --- a/docs/en-US/creating-network-offerings.xml +++ /dev/null @@ -1,285 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating a New Network Offering - To create a network offering: - - - Log in with admin privileges to the &PRODUCT; UI. - - - In the left navigation bar, click Service Offerings. - - - In Select Offering, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make the following choices: - - - Name. Any desired name for the network - offering. - - - Description. A short description of the offering - that can be displayed to users. - - - Network Rate. Allowed data transfer rate in MB per - second. - - - Guest Type. Choose whether the guest network is - isolated or shared. - For a description of this term, see . - For a description of this term, see the Administration Guide. - - - - Persistent. Indicate whether the guest network is - persistent or not. The network that you can provision without having to deploy a VM on - it is termed persistent network. For more information, see . - - - Specify VLAN. (Isolated guest networks only) - Indicate whether a VLAN could be specified when this offering is used. If you select - this option and later use this network offering while creating a VPC tier or an isolated - network, you will be able to specify a VLAN ID for the network you create. - - - VPC. This option indicate whether the guest network - is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a private, isolated - part of &PRODUCT;. A VPC can have its own virtual network topology that resembles a - traditional physical network. For more information on VPCs, see . - - - Supported Services. Select one or more of the - possible network services. For some services, you must also choose the service provider; - for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or - any other load balancers that have been configured in the cloud. Depending on which - services you choose, additional fields may appear in the rest of the dialog box. - Based on the guest network type selected, you can see the following supported - services: - - - - - Supported Services - Description - Isolated - Shared - - - - - DHCP - For more information, see . - Supported - Supported - - - DNS - For more information, see . - Supported - Supported - - - Load Balancer - If you select Load Balancer, you can choose the &PRODUCT; virtual - router or any other load balancers that have been configured in the - cloud. - Supported - Supported - - - Firewall - For more information, see . - For more information, see the Administration - Guide. - Supported - Supported - - - Source NAT - If you select Source NAT, you can choose the &PRODUCT; virtual router - or any other Source NAT providers that have been configured in the - cloud. - Supported - Supported - - - Static NAT - If you select Static NAT, you can choose the &PRODUCT; virtual router - or any other Static NAT providers that have been configured in the - cloud. - Supported - Supported - - - Port Forwarding - If you select Port Forwarding, you can choose the &PRODUCT; virtual - router or any other Port Forwarding providers that have been configured in the - cloud. - Supported - Not Supported - - - VPN - For more information, see . - Supported - Not Supported - - - User Data - For more information, see . - For more information, see the Administration - Guide. - Not Supported - Supported - - - Network ACL - For more information, see . - Supported - Not Supported - - - Security Groups - For more information, see . - Not Supported - Supported - - - - - - - System Offering. If the service provider for any of - the services selected in Supported Services is a virtual router, the System Offering - field appears. Choose the system service offering that you want virtual routers to use - in this network. For example, if you selected Load Balancer in Supported Services and - selected a virtual router to provide load balancing, the System Offering field appears - so you can choose between the &PRODUCT; default system service offering and any custom - system service offerings that have been defined by the &PRODUCT; root - administrator. - For more information, see . - For more information, see the Administration Guide. - - - LB Isolation: Specify what type of load balancer - isolation you want for the network: Shared or Dedicated. - Dedicated: If you select dedicated LB isolation, a - dedicated load balancer device is assigned for the network from the pool of dedicated - load balancer devices provisioned in the zone. If no sufficient dedicated load balancer - devices are available in the zone, network creation fails. Dedicated device is a good - choice for the high-traffic networks that make full use of the device's - resources. - Shared: If you select shared LB isolation, a shared - load balancer device is assigned for the network from the pool of shared load balancer - devices provisioned in the zone. While provisioning &PRODUCT; picks the shared load - balancer device that is used by the least number of accounts. Once the device reaches - its maximum capacity, the device will not be allocated to a new account. - - - Mode: You can select either Inline mode or Side by - Side mode: - Inline mode: Supported only for Juniper SRX - firewall and BigF5 load balancer devices. In inline mode, a firewall device is placed in - front of a load balancing device. The firewall acts as the gateway for all the incoming - traffic, then redirect the load balancing traffic to the load balancer behind it. The - load balancer in this case will not have the direct access to the public network. - Side by Side: In side by side mode, a firewall - device is deployed in parallel with the load balancer device. So the traffic to the load - balancer public IP is not routed through the firewall, and therefore, is exposed to the - public network. - - - Associate Public IP: Select this option if you want - to assign a public IP address to the VMs deployed in the guest network. This option is - available only if - - - Guest network is shared. - - - StaticNAT is enabled. - - - Elastic IP is enabled. - - - For information on Elastic IP, see . - - - Redundant router capability: Available only when - Virtual Router is selected as the Source NAT provider. Select this option if you want to - use two virtual routers in the network for uninterrupted connection: one operating as - the master virtual router and the other as the backup. The master virtual router - receives requests from and sends responses to the user’s VM. The backup virtual router - is activated only when the master is down. After the failover, the backup becomes the - master virtual router. &PRODUCT; deploys the routers on different hosts to ensure - reliability if one host is down. - - - Conserve mode: Indicate whether to use conserve - mode. In this mode, network resources are allocated only when the first virtual machine - starts in the network. When conservative mode is off, the public IP can only be used for - a single service. For example, a public IP used for a port forwarding rule cannot be - used for defining other services, such as StaticNAT or load balancing. When the conserve - mode is on, you can define more than one service on the same public IP. - - If StaticNAT is enabled, irrespective of the status of the conserve mode, no port - forwarding or load balancing rule can be created for the IP. However, you can add the - firewall rules by using the createFirewallRule command. - - - - Tags: Network tag to specify which physical network - to use. - - - Default egress policy: Configure the default policy - for firewall egress rules. Options are Allow and Deny. Default is Allow if no egress - policy is specified, which indicates that all the egress traffic is accepted when a - guest network is created from this offering. - To block the egress traffic for a guest network, select Deny. In this case, when you - configure an egress rules for an isolated guest network, rules are added to allow the - specified traffic. - - - - - Click Add. - - -
diff --git a/docs/en-US/creating-new-volumes.xml b/docs/en-US/creating-new-volumes.xml deleted file mode 100644 index 5440dc5a016..00000000000 --- a/docs/en-US/creating-new-volumes.xml +++ /dev/null @@ -1,84 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Creating a New Volume - You can add more data disk volumes to a guest VM at any time, up to the limits of your - storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When - you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage - resources are not allocated on the physical storage device until you attach the volume. This - optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it - when the first attachment is made. -
- Using Local Storage for Data Volumes - You can create data volumes on local storage (supported with XenServer, KVM, and VMware). - The data volume is placed on the same host as the VM instance that is attached to the data - volume. These local data volumes can be attached to virtual machines, detached, re-attached, - and deleted just as with the other types of data volume. - Local storage is ideal for scenarios where persistence of data volumes and HA is not - required. Some of the benefits include reduced disk I/O latency and cost reduction from using - inexpensive local disks. - In order for local volumes to be used, the feature must be enabled for the zone. - You can create a data disk offering for local storage. When a user creates a new VM, they - can select this disk offering in order to cause the data disk volume to be placed in local - storage. - You can not migrate a VM that has a volume in local storage to a different host, nor - migrate the volume itself away to a different host. If you want to put a host into maintenance - mode, you must first stop any VMs with local data volumes on that host. -
-
- To Create a New Volume - - - Log in to the &PRODUCT; UI as a user or admin. - - - In the left navigation bar, click Storage. - - - In Select View, choose Volumes. - - - To create a new volume, click Add Volume, provide the following details, and click - OK. - - - Name. Give the volume a unique name so you can find it later. - - - Availability Zone. Where do you want the storage to reside? This should be close - to the VM that will use the volume. - - - Disk Offering. Choose the characteristics of the storage. - - - The new volume appears in the list of volumes with the state “Allocated.†The volume - data is stored in &PRODUCT;, but the volume is not yet ready for use - - - To start using the volume, continue to Attaching a Volume - - -
-
diff --git a/docs/en-US/creating-shared-network.xml b/docs/en-US/creating-shared-network.xml deleted file mode 100644 index e6a018f39d5..00000000000 --- a/docs/en-US/creating-shared-network.xml +++ /dev/null @@ -1,132 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring a Shared Guest Network - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation, choose Infrastructure. - - - On Zones, click View More. - - - Click the zone to which you want to add a guest network. - - - Click the Physical Network tab. - - - Click the physical network you want to work with. - - - On the Guest node of the diagram, click Configure. - - - Click the Network tab. - - - Click Add guest network. - The Add guest network window is displayed. - - - Specify the following: - - - Name: The name of the network. This will be visible - to the user. - - - Description: The short description of the network - that can be displayed to users. - - - VLAN ID: The unique ID of the VLAN. - - - Isolated VLAN ID: The unique ID of the Secondary - Isolated VLAN. - - - Scope: The available scopes are Domain, Account, - Project, and All. - - - Domain: Selecting Domain limits the scope of - this guest network to the domain you specify. The network will not be available for - other domains. If you select Subdomain Access, the guest network is available to all - the sub domains within the selected domain. - - - Account: The account for which the guest - network is being created for. You must specify the domain the account belongs - to. - - - Project: The project for which the guest - network is being created for. You must specify the domain the project belongs - to. - - - All: The guest network is available for all the - domains, account, projects within the selected zone. - - - - - Network Offering: If the administrator has - configured multiple network offerings, select the one you want to use for this - network. - - - Gateway: The gateway that the guests should - use. - - - Netmask: The netmask in use on the subnet the - guests will use. - - - IP Range: A range of IP addresses that are - accessible from the Internet and are assigned to the guest VMs. - If one NIC is used, these IPs should be in the same CIDR in the case of IPv6. - - - IPv6 CIDR: The network prefix that defines the - guest network subnet. This is the CIDR that describes the IPv6 addresses in use in the - guest networks in this zone. To allot IP addresses from within a particular address - block, enter a CIDR. - - - Network Domain: A custom DNS suffix at the level of - a network. If you want to assign a special domain name to the guest VM network, specify - a DNS suffix. - - - - - Click OK to confirm. - - -
diff --git a/docs/en-US/creating-system-service-offerings.xml b/docs/en-US/creating-system-service-offerings.xml deleted file mode 100644 index e33d9d07767..00000000000 --- a/docs/en-US/creating-system-service-offerings.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Creating a New System Service Offering - To create a system service offering: - - Log in with admin privileges to the &PRODUCT; UI. - In the left navigation bar, click Service Offerings. - In Select Offering, choose System Offering. - Click Add System Service Offering. - In the dialog, make the following choices: - - Name. Any desired name for the system offering. - Description. A short description of the offering that can be displayed to users - System VM Type. Select the type of system virtual machine that this offering is intended to support. - Storage type. The type of disk that should be allocated. Local allocates from storage attached directly to the host where the system VM is running. Shared allocates from storage accessible via NFS. - # of CPU cores. The number of cores which should be allocated to a system VM with this offering - CPU (in MHz). The CPU speed of the cores that the system VM is allocated. For example, "2000" would provide for a 2 GHz clock. - Memory (in MB). The amount of memory in megabytes that the system VM should be allocated. For example, "2048" would provide for a 2 GB RAM allocation. - Network Rate. Allowed data transfer rate in MB per second. - Offer HA. If yes, the administrator can choose to have the system VM be monitored and as highly available as possible. - Storage Tags. The tags that should be associated with the primary storage used by the system VM. - Host Tags. (Optional) Any tags that you use to organize your hosts - CPU cap. Whether to limit the level of CPU usage even if spare capacity is available. - Public. Indicate whether the service offering should be available all domains or only some domains. Choose Yes to make it available to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; will then prompt for the subdomain's name. - - Click Add. - - - -
diff --git a/docs/en-US/creating-vms.xml b/docs/en-US/creating-vms.xml deleted file mode 100644 index df4d88ed548..00000000000 --- a/docs/en-US/creating-vms.xml +++ /dev/null @@ -1,117 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Creating VMs - Virtual machines are usually created from a template. Users can also create blank virtual - machines. A blank virtual machine is a virtual machine without an OS template. Users can attach - an ISO file and install the OS from the CD/DVD-ROM. - - You can create a VM without starting it. You can determine whether the VM needs to be - started as part of the VM deployment. A request parameter, startVM, in the deployVm API - provides this feature. For more information, see the Developer's Guide - - -
- Creating a VM from a template - - - Log in to the &PRODUCT; UI as an administrator or user. - - - In the left navigation bar, click Instances. - - - Click Add Instance. - - - Select a zone. - - - Select a template, then follow the steps in the wizard. For more information about how - the templates came to be in this list, see . - - - Be sure that the hardware you have allows starting the selected service offering. - - - Click Submit and your VM will be created and started. - - For security reasons, the internal name of the VM is visible only to the root - admin. - - - -
-
- Creating a VM from an ISO - - (XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in - the template or added after the VM is created. The PV drivers are necessary for essential - management functions such as mounting additional volumes and ISO images, live migration, and - graceful shutdown. - - - - - Log in to the &PRODUCT; UI as an administrator or user. - - - In the left navigation bar, click Instances. - - - Click Add Instance. - - - Select a zone. - - - Select ISO Boot, and follow the steps in the wizard. - - - Click Submit and your VM will be created and started. - - - -
-
- - Configuring Usage of Linked Clones on VMware - (For ESX hypervisor in conjunction with vCenter) - VMs can be created as either linked clones or full clones on VMware. - For a full description of clone types, refer to VMware documentation. In summary: A - full clone is a copy of an existing virtual machine which, once created, does not depend - in any way on the original virtual machine. A linked clone is also a copy of an existing - virtual machine, but it has ongoing dependency on the original. A linked clone shares the - virtual disk of the original VM, and retains access to all files that were present at the - time the clone was created. - The use of these different clone types involves some side effects and tradeoffs, so it - is to the administrator's advantage to be able to choose which of the two types will be - used in a &PRODUCT; deployment. - A new global configuration setting has been added, vmware.create.full.clone. When the - administrator sets this to true, end users can create guest VMs only as full clones. The - default value is false. - It is not recommended to change the value of vmware.create.full.clone in a cloud with - running VMs. However, if the value is changed, existing VMs are not affected. Only VMs - created after the setting is put into effect are subject to the restriction. -
-
diff --git a/docs/en-US/customizing-dns.xml b/docs/en-US/customizing-dns.xml deleted file mode 100644 index c24bad895f4..00000000000 --- a/docs/en-US/customizing-dns.xml +++ /dev/null @@ -1,44 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Customizing the Network Domain Name - The root administrator can optionally assign a custom DNS suffix at the level of a network, account, domain, zone, or entire &PRODUCT; installation, and a domain administrator can do so within their own domain. To specify a custom domain name and put it into effect, follow these steps. - - Set the DNS suffix at the desired scope - - At the network level, the DNS suffix can be assigned through the UI when creating a new network, as described in or with the updateNetwork command in the &PRODUCT; API. - At the account, domain, or zone level, the DNS suffix can be assigned with the appropriate &PRODUCT; API commands: createAccount, editAccount, createDomain, editDomain, createZone, or editZone. - At the global level, use the configuration parameter guest.domain.suffix. You can also use the &PRODUCT; API command updateConfiguration. After modifying this global configuration, restart the Management Server to put the new setting into effect. - - To make the new DNS suffix take effect for an existing network, call the &PRODUCT; API command updateNetwork. This step is not necessary when the DNS suffix was specified while creating a new network. - - The source of the network domain that is used depends on the following rules. - - For all networks, if a network domain is specified as part of a network's own configuration, that value is used. - For an account-specific network, the network domain specified for the account is used. If none is specified, the system looks for a value in the domain, zone, and global configuration, in that order. - For a domain-specific network, the network domain specified for the domain is used. If none is specified, the system looks for a value in the zone and global configuration, in that order. - For a zone-specific network, the network domain specified for the zone is used. If none is specified, the system looks for a value in the global configuration. - -
diff --git a/docs/en-US/database-replication.xml b/docs/en-US/database-replication.xml deleted file mode 100644 index 8ca80713732..00000000000 --- a/docs/en-US/database-replication.xml +++ /dev/null @@ -1,144 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Database Replication (Optional) - &PRODUCT; supports database replication from one MySQL node to another. This is achieved using standard MySQL replication. You may want to do this as insurance against MySQL server or storage loss. MySQL replication is implemented using a master/slave model. The master is the node that the Management Servers are configured to use. The slave is a standby node that receives all write operations from the master and applies them to a local, redundant copy of the database. The following steps are a guide to implementing MySQL replication. - Creating a replica is not a backup solution. You should develop a backup procedure for the MySQL data that is distinct from replication. - - Ensure that this is a fresh install with no data in the master. - - Edit my.cnf on the master and add the following in the [mysqld] section below datadir. - -log_bin=mysql-bin -server_id=1 - - The server_id must be unique with respect to other servers. The recommended way to achieve this is to give the master an ID of 1 and each slave a sequential number greater than 1, so that the servers are numbered 1, 2, 3, etc. - - - Restart the MySQL service. On RHEL/CentOS systems, use: - -# service mysqld restart - - On Debian/Ubuntu systems, use: - -# service mysql restart - - - - Create a replication account on the master and give it privileges. We will use the "cloud-repl" user with the password "password". This assumes that master and slave run on the 172.16.1.0/24 network. - -# mysql -u root -mysql> create user 'cloud-repl'@'172.16.1.%' identified by 'password'; -mysql> grant replication slave on *.* TO 'cloud-repl'@'172.16.1.%'; -mysql> flush privileges; -mysql> flush tables with read lock; - - - Leave the current MySQL session running. - In a new shell start a second MySQL session. - - Retrieve the current position of the database. - -# mysql -u root -mysql> show master status; -+------------------+----------+--------------+------------------+ -| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | -+------------------+----------+--------------+------------------+ -| mysql-bin.000001 | 412 | | | -+------------------+----------+--------------+------------------+ - - - Note the file and the position that are returned by your instance. - Exit from this session. - - Complete the master setup. Returning to your first session on the master, release the locks and exit MySQL. - -mysql> unlock tables; - - - - Install and configure the slave. On the slave server, run the following commands. - -# yum install mysql-server -# chkconfig mysqld on - - - - Edit my.cnf and add the following lines in the [mysqld] section below datadir. - -server_id=2 -innodb_rollback_on_timeout=1 -innodb_lock_wait_timeout=600 - - - - Restart MySQL. Use "mysqld" on RHEL/CentOS systems: - -# service mysqld restart - - On Ubuntu/Debian systems use "mysql." - -# service mysql restart - - - - Instruct the slave to connect to and replicate from the master. Replace the IP address, password, log file, and position with the values you have used in the previous steps. - -mysql> change master to - -> master_host='172.16.1.217', - -> master_user='cloud-repl', - -> master_password='password', - -> master_log_file='mysql-bin.000001', - -> master_log_pos=412; - - - - Then start replication on the slave. - -mysql> start slave; - - - - Optionally, open port 3306 on the slave as was done on the master earlier. - This is not required for replication to work. But if you choose not to do this, you will need to do it when failover to the replica occurs. - - -
- Failover - This will provide for a replicated database that can be used to implement manual failover for the Management Servers. &PRODUCT; failover from one MySQL instance to another is performed by the administrator. In the event of a database failure you should: - - Stop the Management Servers (via service cloudstack-management stop). - Change the replica's configuration to be a master and restart it. - Ensure that the replica's port 3306 is open to the Management Servers. - Make a change so that the Management Server uses the new database. The simplest process here is to put the IP address of the new database server into each Management Server's /etc/cloudstack/management/db.properties. - - Restart the Management Servers: - -# service cloudstack-management start - - - -
-
diff --git a/docs/en-US/dates-in-usage-record.xml b/docs/en-US/dates-in-usage-record.xml deleted file mode 100644 index dc2f07221be..00000000000 --- a/docs/en-US/dates-in-usage-record.xml +++ /dev/null @@ -1,26 +0,0 @@ - - -
- Dates in the Usage Record - Usage records include a start date and an end date. These dates define the period of time for which the raw usage number was calculated. If daily aggregation is used, the start date is midnight on the day in question and the end date is 23:59:59 on the day in question (with one exception; see below). A virtual machine could have been deployed at noon on that day, stopped at 6pm on that day, then started up again at 11pm. When usage is calculated on that day, there will be 7 hours of running VM usage (usage type 1) and 12 hours of allocated VM usage (usage type 2). If the same virtual machine runs for the entire next day, there will 24 hours of both running VM usage (type 1) and allocated VM usage (type 2). - Note: The start date is not the time a virtual machine was started, and the end date is not the time when a virtual machine was stopped. The start and end dates give the time range within which usage was calculated. - For network usage, the start date and end date again define the range in which the number of bytes transferred was calculated. If a user downloads 10 MB and uploads 1 MB in one day, there will be two records, one showing the 10 megabytes received and one showing the 1 megabyte sent. - There is one case where the start date and end date do not correspond to midnight and 11:59:59pm when daily aggregation is used. This occurs only for network usage records. When the usage server has more than one day's worth of unprocessed data, the old data will be included in the aggregation period. The start date in the usage record will show the date and time of the earliest event. For other types of usage, such as IP addresses and VMs, the old unprocessed data is not included in daily aggregation. -
- diff --git a/docs/en-US/dedicated-ha-hosts.xml b/docs/en-US/dedicated-ha-hosts.xml deleted file mode 100644 index 89c721f080a..00000000000 --- a/docs/en-US/dedicated-ha-hosts.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Dedicated HA Hosts - One or more hosts can be designated for use only by HA-enabled VMs that are restarting due to a host failure. Setting up a pool of such dedicated HA hosts as the recovery destination for all HA-enabled VMs is useful to: - - Make it easier to determine which VMs have been restarted as part of the &PRODUCT; high-availability function. If a VM is running on a dedicated HA host, then it must be an HA-enabled VM whose original host failed. (With one exception: It is possible for an administrator to manually migrate any VM to a dedicated HA host.). - Keep HA-enabled VMs from restarting on hosts which may be reserved for other purposes. - - The dedicated HA option is set through a special host tag when the host is created. To allow the administrator to dedicate hosts to only HA-enabled VMs, set the global configuration variable ha.tag to the desired tag (for example, "ha_host"), and restart the Management Server. Enter the value in the Host Tags field when adding the host(s) that you want to dedicate to HA-enabled VMs. - If you set ha.tag, be sure to actually use that tag on at least one host in your cloud. If the tag specified in ha.tag is not set for any host in the cloud, the HA-enabled VMs will fail to restart after a crash. -
diff --git a/docs/en-US/default-account-resource-limit.xml b/docs/en-US/default-account-resource-limit.xml deleted file mode 100644 index 5134e508c11..00000000000 --- a/docs/en-US/default-account-resource-limit.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Default Account Resource Limits - You can limit resource use by accounts. The default limits are set by using global - configuration parameters, and they affect all accounts within a cloud. The relevant - parameters are those beginning with max.account, for example: max.account.snapshots. - To override a default limit for a particular account, set a per-account resource limit. - - Log in to the &PRODUCT; UI. - In the left navigation tree, click Accounts. - Select the account you want to modify. The current limits are displayed. A value of -1 shows - that there is no limit in place. - Click the Edit button. - - - - - editbutton.png: edits the settings - - - - -
diff --git a/docs/en-US/default-template.xml b/docs/en-US/default-template.xml deleted file mode 100644 index 16442c38f47..00000000000 --- a/docs/en-US/default-template.xml +++ /dev/null @@ -1,56 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- The Default Template - &PRODUCT; includes a CentOS template. This template is downloaded by the Secondary Storage VM after the primary and secondary storage are configured. You can use this template in your production deployment or you can delete it and use custom templates. - The root password for the default template is "password". - A default template is provided for each of XenServer, KVM, and vSphere. The templates that are downloaded depend on the hypervisor type that is available in your cloud. Each template is approximately 2.5 GB physical size. - The default template includes the standard iptables rules, which will block most access to the template excluding ssh. - # iptables --list -Chain INPUT (policy ACCEPT) -target prot opt source destination -RH-Firewall-1-INPUT all -- anywhere anywhere - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -RH-Firewall-1-INPUT all -- anywhere anywhere - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination - -Chain RH-Firewall-1-INPUT (2 references) -target prot opt source destination -ACCEPT all -- anywhere anywhere -ACCEPT icmp -- anywhere anywhere icmp any -ACCEPT esp -- anywhere anywhere -ACCEPT ah -- anywhere anywhere -ACCEPT udp -- anywhere 224.0.0.251 udp dpt:mdns -ACCEPT udp -- anywhere anywhere udp dpt:ipp -ACCEPT tcp -- anywhere anywhere tcp dpt:ipp -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host- - -
diff --git a/docs/en-US/delete-event-alerts.xml b/docs/en-US/delete-event-alerts.xml deleted file mode 100644 index 392b37f151f..00000000000 --- a/docs/en-US/delete-event-alerts.xml +++ /dev/null @@ -1,89 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Deleting and Archiving Events and Alerts - &PRODUCT; provides you the ability to delete or archive the existing alerts and events that - you no longer want to implement. You can regularly delete or archive any alerts or events that - you cannot, or do not want to resolve from the database. - You can delete or archive individual alerts or events either directly by using the Quickview - or by using the Details page. If you want to delete multiple alerts or events at the same time, - you can use the respective context menu. You can delete alerts or events by category for a time - period. For example, you can select categories such as USER.LOGOUT, VM.DESTROY, VM.AG.UPDATE, CONFIGURATION.VALUE.EDI, and so on. - You can also view the number of events or alerts archived or deleted. - In order to support the delete or archive alerts, the following global parameters have been - added: - - - alert.purge.delay: The alerts older than specified - number of days are purged. Set the value to 0 to never purge alerts automatically. - - - alert.purge.interval: The interval in seconds to wait - before running the alert purge thread. The default is 86400 seconds (one day). - - - - Archived alerts or events cannot be viewed in the UI or by using the API. They are - maintained in the database for auditing or compliance purposes. - -
- Permissions - Consider the following: - - - The root admin can delete or archive one or multiple alerts or events. - - - The domain admin or end user can delete or archive one or multiple events. - - -
-
- Procedure - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation, click Events. - - - Perform either of the following: - - - To archive events, click Archive Events, and specify event type and time - period. - - - To archive events, click Delete Events, and specify event type and time - period. - - - - - Click OK. - - -
-
diff --git a/docs/en-US/delete-reset-vpn.xml b/docs/en-US/delete-reset-vpn.xml deleted file mode 100644 index 2fe85d279b6..00000000000 --- a/docs/en-US/delete-reset-vpn.xml +++ /dev/null @@ -1,107 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Restarting and Removing a VPN Connection - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - - - Click the Settings icon. - For each tier, the following options are displayed: - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Site-to-Site VPN. - The Site-to-Site VPN page is displayed. - - - From the Select View drop-down, ensure that VPN Connection is selected. - All the VPN connections you created are displayed. - - - Select the VPN connection you want to work with. - The Details tab is displayed. - - - To remove a VPN connection, click the Delete VPN connection button - - - - - remove-vpn.png: button to remove a VPN connection - - - To restart a VPN connection, click the Reset VPN connection button present in the - Details tab. - - - - - reset-vpn.png: button to reset a VPN connection - - - - -
diff --git a/docs/en-US/delete-templates.xml b/docs/en-US/delete-templates.xml deleted file mode 100644 index f9351da844f..00000000000 --- a/docs/en-US/delete-templates.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Deleting Templates - Templates may be deleted. In general, when a template spans multiple Zones, only the copy that is selected for deletion will be deleted; the same template in other Zones will not be deleted. The provided CentOS template is an exception to this. If the provided CentOS template is deleted, it will be deleted from all Zones. - When templates are deleted, the VMs instantiated from them will continue to run. However, new VMs cannot be created based on the deleted template. -
diff --git a/docs/en-US/deleting-vms.xml b/docs/en-US/deleting-vms.xml deleted file mode 100644 index 97245c81ef4..00000000000 --- a/docs/en-US/deleting-vms.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Deleting VMs - Users can delete their own virtual machines. A running virtual machine will be abruptly stopped before it is deleted. Administrators can delete any virtual machines. - To delete a virtual machine: - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation, click Instances. - Choose the VM that you want to delete. - Click the Destroy Instance button. - - - - - Destroyinstance.png: button to destroy an instance - - - - -
- diff --git a/docs/en-US/dell62xx-hardware.xml b/docs/en-US/dell62xx-hardware.xml deleted file mode 100644 index 8bc7770ce86..00000000000 --- a/docs/en-US/dell62xx-hardware.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Dell 62xx - The following steps show how a Dell 62xx is configured for zone-level layer-3 switching. - These steps assume VLAN 201 is used to route untagged private IPs for pod 1, and pod 1’s layer-2 - switch is connected to Ethernet port 1/g1. - The Dell 62xx Series switch supports up to 1024 VLANs. - - - Configure all the VLANs in the database. - vlan database -vlan 200-999 -exit - - - Configure Ethernet port 1/g1. - interface ethernet 1/g1 -switchport mode general -switchport general pvid 201 -switchport general allowed vlan add 201 untagged -switchport general allowed vlan add 300-999 tagged -exit - - - The statements configure Ethernet port 1/g1 as follows: - - - VLAN 201 is the native untagged VLAN for port 1/g1. - - - All VLANs (300-999) are passed to all the pod-level layer-2 switches. - - -
diff --git a/docs/en-US/dell62xx-layer2.xml b/docs/en-US/dell62xx-layer2.xml deleted file mode 100644 index 1c0eea07203..00000000000 --- a/docs/en-US/dell62xx-layer2.xml +++ /dev/null @@ -1,49 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Dell 62xx - The following steps show how a Dell 62xx is configured for pod-level layer-2 - switching. - - - Configure all the VLANs in the database. - vlan database -vlan 300-999 -exit - - - VLAN 201 is used to route untagged private IP addresses for pod 1, and pod 1 is connected to this layer-2 switch. - interface range ethernet all -switchport mode general -switchport general allowed vlan add 300-999 tagged -exit - - - The statements configure all Ethernet ports to function as follows: - - - All ports are configured the same way. - - - All VLANs (300-999) are passed through all the ports of the layer-2 switch. - - -
diff --git a/docs/en-US/deployment-architecture-overview.xml b/docs/en-US/deployment-architecture-overview.xml deleted file mode 100644 index 835898ced7f..00000000000 --- a/docs/en-US/deployment-architecture-overview.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Deployment Architecture Overview - - A &PRODUCT; installation consists of two parts: the Management Server - and the cloud infrastructure that it manages. When you set up and - manage a &PRODUCT; cloud, you provision resources such as hosts, - storage devices, and IP addresses into the Management Server, and - the Management Server manages those resources. - - - The minimum production installation consists of one machine running - the &PRODUCT; Management Server and another machine to act as the - cloud infrastructure (in this case, a very simple infrastructure - consisting of one host running hypervisor software). In its smallest - deployment, a single machine can act as both the Management Server - and the hypervisor host (using the KVM hypervisor). - - - - - - basic-deployment.png: Basic two-machine deployment - - - A more full-featured installation consists of a highly-available - multi-node Management Server installation and up to tens of thousands of - hosts using any of several advanced networking setups. For - information about deployment options, see the "Choosing a Deployment Architecture" - section of the &PRODUCT; Installation Guide. - - - - -
diff --git a/docs/en-US/detach-move-volumes.xml b/docs/en-US/detach-move-volumes.xml deleted file mode 100644 index 8922db12161..00000000000 --- a/docs/en-US/detach-move-volumes.xml +++ /dev/null @@ -1,59 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Detaching and Moving Volumes - - This procedure is different from moving volumes from one storage pool to another as described in . - - A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT; - administrators and users can detach volumes from VMs and move them to other VMs. - If the two VMs are in different clusters, and the volume is large, it may take several - minutes for the volume to be moved to the new VM. - - - - Log in to the &PRODUCT; UI as a user or admin. - - - In the left navigation bar, click Storage, and choose Volumes in Select View. - Alternatively, if you know which VM the volume is attached to, you can click Instances, - click the VM name, and click View Volumes. - - - Click the name of the volume you want to detach, then click the Detach Disk button. - - - - - DetachDiskButton.png: button to detach a volume - - - - - - To move the volume to another VM, follow the steps in . - - -
diff --git a/docs/en-US/devcloud-usage-mode.xml b/docs/en-US/devcloud-usage-mode.xml deleted file mode 100644 index bc211ce1436..00000000000 --- a/docs/en-US/devcloud-usage-mode.xml +++ /dev/null @@ -1,60 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- DevCloud Usage Mode - DevCloud can be used in several different ways: - - - Full sandbox. Where &PRODUCT; is run within the DevCloud instance started in Virtual Box. - In this mode, the &PRODUCT; management server runs within the instance and nested virtualization allows instantiation of tiny VMs within DevCloud itself. &PRODUCT; code modifications are done within DevCloud. - The following diagram shows the architecture of the SandBox mode. - - - - - - DevCloud.png: Schematic of the DevCloud SandBox architecture - - - - - A deployment environment. Where &PRODUCT; code is developed in the localhost of the developer and the resulting build is deployed within DevCloud - This mode was used in the testing procedure of &PRODUCT; 4.0.0 incubating release. See the following screencast to see how: http://vimeo.com/54621457 - - - A host-only mode. Where DevCloud is used only as a host. &PRODUCT; management server is run in the localhost of the developer - This mode makes use of a host-only interface defined in the Virtual Box preferences. Check the following screencast to see how: http://vimeo.com/54610161 - The following schematic shows the architecture of the Host-Only mode. - - - - - - DevCloud-hostonly.png: Schematic of the DevCloud host-only architecture - - - - -
diff --git a/docs/en-US/devcloud.xml b/docs/en-US/devcloud.xml deleted file mode 100644 index 677818700ae..00000000000 --- a/docs/en-US/devcloud.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- DevCloud - DevCloud is the &PRODUCT; sandbox. It is provided as a Virtual Box appliance. It is meant to be used as a development environment to easily test new &PRODUCT; development. It has also been used for training and &PRODUCT; demos since it provides a Cloud in a box. - - DevCloud is provided as a convenience by community members. It is not an official &PRODUCT; release artifact. - The &PRODUCT; source code however, contains tools to build your own DevCloud. - - - DevCloud is under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation: - - - - -
diff --git a/docs/en-US/developer-getting-started.xml b/docs/en-US/developer-getting-started.xml deleted file mode 100644 index 14560280909..00000000000 --- a/docs/en-US/developer-getting-started.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - -
- Getting Started - - To get started using the &PRODUCT; API, you should have the following: - - URL of the &PRODUCT; server you wish to integrate with. - Both the API Key and Secret Key for an account. This should have been generated by the administrator of the cloud instance and given to you. - Familiarity with HTTP GET/POST and query strings. - Knowledge of either XML or JSON. - Knowledge of a programming language that can generate HTTP requests; for example, Java or PHP. - -
- diff --git a/docs/en-US/developer-introduction.xml b/docs/en-US/developer-introduction.xml deleted file mode 100644 index 9d54f31dae9..00000000000 --- a/docs/en-US/developer-introduction.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Introduction to the &PRODUCT; API - - - - diff --git a/docs/en-US/disable-enable-zones-pods-clusters.xml b/docs/en-US/disable-enable-zones-pods-clusters.xml deleted file mode 100644 index 7d52ae7c7a9..00000000000 --- a/docs/en-US/disable-enable-zones-pods-clusters.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Disabling and Enabling Zones, Pods, and Clusters - You can enable or disable a zone, pod, or cluster without permanently removing it from the cloud. This is useful for maintenance or when there are problems that make a portion of the cloud infrastructure unreliable. No new allocations will be made to a disabled zone, pod, or cluster until its state is returned to Enabled. When a zone, pod, or cluster is first added to the cloud, it is Disabled by default. - To disable and enable a zone, pod, or cluster: - - Log in to the &PRODUCT; UI as administrator - In the left navigation bar, click Infrastructure. - - In Zones, click View More. - - If you are disabling or enabling a zone, find the name of the zone in the list, and click the Enable/Disable button. - - - - enable-disable.png: button to enable or disable zone, pod, or cluster. - - - If you are disabling or enabling a pod or cluster, click the name of the zone that contains the pod or cluster. - Click the Compute tab. - - In the Pods or Clusters node of the diagram, click View All. - - Click the pod or cluster name in the list. - Click the Enable/Disable button. - - - - -
diff --git a/docs/en-US/disk-volume-usage-record-format.xml b/docs/en-US/disk-volume-usage-record-format.xml deleted file mode 100644 index c15d979e113..00000000000 --- a/docs/en-US/disk-volume-usage-record-format.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -
- Disk Volume Usage Record Format - For disk volumes, the following fields exist in a usage record. - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – The volume ID - offeringid – The ID of the disk offering - type – Hypervisor - templateid – ROOT template ID - size – The amount of storage allocated - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/dns-dhcp.xml b/docs/en-US/dns-dhcp.xml deleted file mode 100644 index 2359e8380cd..00000000000 --- a/docs/en-US/dns-dhcp.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- DNS and DHCP - The Virtual Router provides DNS and DHCP services to the guests. It proxies DNS requests to the DNS server configured on the Availability Zone. -
diff --git a/docs/en-US/domains.xml b/docs/en-US/domains.xml deleted file mode 100644 index f348fe88998..00000000000 --- a/docs/en-US/domains.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Domains - If the LDAP server requires SSL, you need to enable it in the ldapConfig command by setting the parameters ssl, truststore, and truststorepass. Before enabling SSL for ldapConfig, you need to get the certificate which the LDAP server is using and add it to a trusted keystore. You will need to know the path to the keystore and the password. -
diff --git a/docs/en-US/egress-firewall-rule.xml b/docs/en-US/egress-firewall-rule.xml deleted file mode 100644 index 93d5a814547..00000000000 --- a/docs/en-US/egress-firewall-rule.xml +++ /dev/null @@ -1,168 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Egress Firewall Rules in an Advanced Zone - The egress traffic originates from a private network to a public network, such as the - Internet. By default, the egress traffic is blocked in default network offerings, so no outgoing - traffic is allowed from a guest network to the Internet. However, you can control the egress - traffic in an Advanced zone by creating egress firewall rules. When an egress firewall rule is - applied, the traffic specific to the rule is allowed and the remaining traffic is blocked. When - all the firewall rules are removed the default policy, Block, is applied. -
- Prerequisites and Guidelines - Consider the following scenarios to apply egress firewall rules: - - - Egress firewall rules are supported on Juniper SRX and virtual router. - - - The egress firewall rules are not supported on shared networks. - - - Allow the egress traffic from specified source CIDR. The Source CIDR is part of guest - network CIDR. - - - Allow the egress traffic with protocol TCP,UDP,ICMP, or ALL. - - - Allow the egress traffic with protocol and destination port range. The port range is - specified for TCP, UDP or for ICMP type and code. - - - The default policy is Allow for the new network offerings, whereas on upgrade existing - network offerings with firewall service providers will have the default egress policy - Deny. - - -
-
- Configuring an Egress Firewall Rule - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In Select view, choose Guest networks, then click the Guest network you want. - - - To add an egress rule, click the Egress rules tab and fill out the following fields to - specify what type of traffic is allowed to be sent out of VM instances in this guest - network: - - - - - - egress-firewall-rule.png: adding an egress firewall rule - - - - - CIDR: (Add by CIDR only) To send traffic only to - the IP addresses within a particular address block, enter a CIDR or a comma-separated - list of CIDRs. The CIDR is the base IP address of the destination. For example, - 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0. - - - Protocol: The networking protocol that VMs uses - to send outgoing traffic. The TCP and UDP protocols are typically used for data - exchange and end-user communications. The ICMP protocol is typically used to send - error messages or network monitoring data. - - - Start Port, End Port: (TCP, UDP only) A range of - listening ports that are the destination for the outgoing traffic. If you are opening - a single port, use the same number in both fields. - - - ICMP Type, ICMP Code: (ICMP only) The type of - message and error code that are sent. - - - - - Click Add. - - -
-
- Configuring the Default Egress Policy - The default egress policy for Isolated guest network is configured by using Network - offering. Use the create network offering option to determine whether the default policy - should be block or allow all the traffic to the public network from a guest network. Use this - network offering to create the network. If no policy is specified, by default all the traffic - is allowed from the guest network that you create by using this network offering. - You have two options: Allow and Deny. - - Allow - If you select Allow for a network offering, by default egress traffic is allowed. - However, when an egress rule is configured for a guest network, rules are applied to block - the specified traffic and rest are allowed. If no egress rules are configured for the - network, egress traffic is accepted. - - - Deny - If you select Deny for a network offering, by default egress traffic for the guest - network is blocked. However, when an egress rules is configured for a guest network, rules - are applied to allow the specified traffic. While implementing a guest network, &PRODUCT; - adds the firewall egress rule specific to the default egress policy for the guest - network. - - This feature is supported only on virtual router and Juniper SRX. - - - Create a network offering with your desirable default egress policy: - - - Log in with admin privileges to the &PRODUCT; UI. - - - In the left navigation bar, click Service Offerings. - - - In Select Offering, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make necessary choices, including firewall provider. - - - In the Default egress policy field, specify the behaviour. - - - Click OK. - - - - - Create an isolated network by using this network offering. - Based on your selection, the network will have the egress public traffic blocked or - allowed. - - -
-
diff --git a/docs/en-US/elastic-ip.xml b/docs/en-US/elastic-ip.xml deleted file mode 100644 index 8ecbd75be70..00000000000 --- a/docs/en-US/elastic-ip.xml +++ /dev/null @@ -1,103 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Elastic IP - Elastic IP (EIP) addresses are the IP addresses that are associated with an account, and act - as static IP addresses. The account owner has the complete control over the Elastic IP addresses - that belong to the account. As an account owner, you can allocate an Elastic IP to a VM of your - choice from the EIP pool of your account. Later if required you can reassign the IP address to a - different VM. This feature is extremely helpful during VM failure. Instead of replacing the VM - which is down, the IP address can be reassigned to a new VM in your account. - Similar to the public IP address, Elastic IP addresses are mapped to their associated - private IP addresses by using StaticNAT. The EIP service is equipped with StaticNAT (1:1) - service in an EIP-enabled basic zone. The default network offering, - DefaultSharedNetscalerEIPandELBNetworkOffering, provides your network with EIP and ELB network - services if a NetScaler device is deployed in your zone. Consider the following illustration for - more details. - - - - - - eip-ns-basiczone.png: Elastic IP in a NetScaler-enabled Basic Zone. - - - In the illustration, a NetScaler appliance is the default entry or exit point for the - &PRODUCT; instances, and firewall is the default entry or exit point for the rest of the data - center. Netscaler provides LB services and staticNAT service to the guest networks. The guest - traffic in the pods and the Management Server are on different subnets / VLANs. The policy-based - routing in the data center core switch sends the public traffic through the NetScaler, whereas - the rest of the data center goes through the firewall. - The EIP work flow is as follows: - - - When a user VM is deployed, a public IP is automatically acquired from the pool of - public IPs configured in the zone. This IP is owned by the VM's account. - - - Each VM will have its own private IP. When the user VM starts, Static NAT is provisioned - on the NetScaler device by using the Inbound Network Address Translation (INAT) and Reverse - NAT (RNAT) rules between the public IP and the private IP. - - Inbound NAT (INAT) is a type of NAT supported by NetScaler, in which the destination - IP address is replaced in the packets from the public network, such as the Internet, with - the private IP address of a VM in the private network. Reverse NAT (RNAT) is a type of NAT - supported by NetScaler, in which the source IP address is replaced in the packets - generated by a VM in the private network with the public IP address. - - - - This default public IP will be released in two cases: - - - When the VM is stopped. When the VM starts, it again receives a new public IP, not - necessarily the same one allocated initially, from the pool of Public IPs. - - - The user acquires a public IP (Elastic IP). This public IP is associated with the - account, but will not be mapped to any private IP. However, the user can enable Static - NAT to associate this IP to the private IP of a VM in the account. The Static NAT rule - for the public IP can be disabled at any time. When Static NAT is disabled, a new public - IP is allocated from the pool, which is not necessarily be the same one allocated - initially. - - - - - For the deployments where public IPs are limited resources, you have the flexibility to - choose not to allocate a public IP by default. You can use the Associate Public IP option to - turn on or off the automatic public IP assignment in the EIP-enabled Basic zones. If you turn - off the automatic public IP assignment while creating a network offering, only a private IP is - assigned to a VM when the VM is deployed with that network offering. Later, the user can acquire - an IP for the VM and enable static NAT. - For more information on the Associate Public IP option, see . - For more information on the Associate Public IP option, see the - Administration Guide. - - The Associate Public IP feature is designed only for use with user VMs. The System VMs - continue to get both public IP and private by default, irrespective of the network offering - configuration. - - New deployments which use the default shared network offering with EIP and ELB services to - create a shared network in the Basic zone will continue allocating public IPs to each user - VM. -
diff --git a/docs/en-US/enable-disable-static-nat-vpc.xml b/docs/en-US/enable-disable-static-nat-vpc.xml deleted file mode 100644 index 467a304915d..00000000000 --- a/docs/en-US/enable-disable-static-nat-vpc.xml +++ /dev/null @@ -1,112 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Enabling or Disabling Static NAT on a VPC - A static NAT rule maps a public IP address to the private IP address of a VM in a VPC to - allow Internet traffic to it. This section tells how to enable or disable static NAT for a - particular IP address in a VPC. - If port forwarding rules are already in effect for an IP address, you cannot enable static - NAT to that IP. - If a guest VM is part of more than one network, static NAT rules will function only if they - are defined on the default network. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC to which you want to deploy the VMs. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - For each tier, the following options are displayed. - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - In the Router node, select Public IP Addresses. - The IP Addresses page is displayed. - - - Click the IP you want to work with. - - - In the Details tab,click the Static NAT button. - - - - - enable-disable.png: button to enable Static NAT. - - The button toggles between Enable and Disable, depending on whether - static NAT is currently enabled for the IP address. - - - If you are enabling static NAT, a dialog appears as follows: - - - - - - select-vmstatic-nat.png: selecting a tier to apply staticNAT. - - - - - Select the tier and the destination VM, then click Apply. - - -
diff --git a/docs/en-US/enable-disable-static-nat.xml b/docs/en-US/enable-disable-static-nat.xml deleted file mode 100644 index 0154dca2732..00000000000 --- a/docs/en-US/enable-disable-static-nat.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Enabling or Disabling Static NAT - If port forwarding rules are already in effect for an IP address, you cannot enable static NAT to that IP. - If a guest VM is part of more than one network, static NAT rules will function only if they are defined on the default network. - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation, choose Network. - Click the name of the network where you want to work with. - Click View IP Addresses. - Click the IP address you want to work with. - - Click the Static NAT - - - - - ReleaseIPButton.png: button to release an IP - - button.The button toggles between Enable and Disable, depending on whether static NAT is currently enabled for the IP address. - If you are enabling static NAT, a dialog appears where you can choose the destination VM and - click Apply. - -
diff --git a/docs/en-US/enable-security-groups.xml b/docs/en-US/enable-security-groups.xml deleted file mode 100644 index c957310f9d6..00000000000 --- a/docs/en-US/enable-security-groups.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Enabling Security Groups - In order for security groups to function in a zone, the security groups feature must first be - enabled for the zone. The administrator can do this when creating a new zone, by selecting a - network offering that includes security groups. The procedure is described in Basic Zone - Configuration in the Advanced Installation Guide. The administrator can not enable security - groups for an existing zone, only when creating a new zone. -
- diff --git a/docs/en-US/enabling-api-call-expiration.xml b/docs/en-US/enabling-api-call-expiration.xml deleted file mode 100644 index cd82d3d1141..00000000000 --- a/docs/en-US/enabling-api-call-expiration.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Enabling API Call Expiration - - You can set an expiry timestamp on API calls to prevent replay attacks over non-secure channels, such as HTTP. The server tracks the expiry timestamp you have specified and rejects all the subsequent API requests that come in after this validity period. - - To enable this feature, add the following parameters to the API request: - - signatureVersion=3: If the signatureVersion parameter is missing or is not equal to 3, the expires parameter is ignored in the API request. - expires=YYYY-MM-DDThh:mm:ssZ: Specifies the date and time at which the signature included in the request is expired. The timestamp is expressed in the YYYY-MM-DDThh:mm:ssZ format, as specified in the ISO 8601 standard. - - For example: - expires=2011-10-10T12:00:00+0530 - A sample API request with expiration is given below: - http://<IPAddress>:8080/client/api?command=listZones&signatureVersion=3&expires=2011-10-10T12:00:00+0530&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D -
- diff --git a/docs/en-US/enabling-port-8096.xml b/docs/en-US/enabling-port-8096.xml deleted file mode 100644 index 57c492edcd5..00000000000 --- a/docs/en-US/enabling-port-8096.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Enabling Port 8096 - - Port 8096, which allows API calls without authentication, is closed and disabled by default on any fresh 3.0.1 installations. You can enable 8096 (or another port) for this purpose as follows: - - - Ensure that the first Management Server is installed and running. - Set the global configuration parameter integration.api.port to the desired port. - Restart the Management Server. - On the Management Server host machine, create an iptables rule allowing access to that port. - -
- diff --git a/docs/en-US/end-user-ui-overview.xml b/docs/en-US/end-user-ui-overview.xml deleted file mode 100644 index 6ec1a25fc55..00000000000 --- a/docs/en-US/end-user-ui-overview.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- End User's UI Overview - The &PRODUCT; UI helps users of cloud infrastructure to view and use their cloud resources, including virtual machines, templates and ISOs, data volumes and snapshots, guest networks, and IP addresses. If the user is a member or administrator of one or more &PRODUCT; projects, the UI can provide a project-oriented view. -
diff --git a/docs/en-US/error-handling.xml b/docs/en-US/error-handling.xml deleted file mode 100644 index 3f119bf4d93..00000000000 --- a/docs/en-US/error-handling.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Error Handling - If an error occurs while processing an API request, the appropriate response in the format specified is returned. Each error response consists of an error code and an error text describing what possibly can go wrong. For an example error response, see page 12. - An HTTP error code of 401 is always returned if API request was rejected due to bad signatures, missing API Keys, or the user simply did not have the permissions to execute the command. -
diff --git a/docs/en-US/event-framework.xml b/docs/en-US/event-framework.xml deleted file mode 100644 index 0f62fac1407..00000000000 --- a/docs/en-US/event-framework.xml +++ /dev/null @@ -1,110 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Event Notification - Event notification framework provides a means for the Management Server components to - publish and subscribe to &PRODUCT; events. Event notification is achieved by implementing the - concept of event bus abstraction in the Management Server. An event bus is introduced in the - Management Server that allows the &PRODUCT; components and extension plug-ins to subscribe to the - events by using the Advanced Message Queuing Protocol (AMQP) client. In &PRODUCT;, a default - implementation of event bus is provided as a plug-in that uses the RabbitMQ AMQP client. The - AMQP client pushes the published events to a compatible AMQP server. Therefore all the &PRODUCT; - events are published to an exchange in the AMQP server. - A new event for state change, resource state change, is introduced as part of Event - notification framework. Every resource, such as user VM, volume, NIC, network, public IP, - snapshot, and template, is associated with a state machine and generates events as part of the - state change. That implies that a change in the state of a resource results in a state change - event, and the event is published in the corresponding state machine on the event bus. All the - &PRODUCT; events (alerts, action events, usage events) and the additional category of resource - state change events, are published on to the events bus. - - Use Cases - The following are some of the use cases: - - - - Usage or Billing Engines: A third-party cloud usage solution can implement a plug-in - that can connects to &PRODUCT; to subscribe to &PRODUCT; events and generate usage data. The - usage data is consumed by their usage software. - - - AMQP plug-in can place all the events on the a message queue, then a AMQP message broker - can provide topic-based notification to the subscribers. - - - Publish and Subscribe notification service can be implemented as a pluggable service in - &PRODUCT; that can provide rich set of APIs for event notification, such as topics-based - subscription and notification. Additionally, the pluggable service can deal with - multi-tenancy, authentication, and authorization issues. - - - - Configuration - As a &PRODUCT; administrator, perform the following one-time configuration to enable event - notification framework. At run time no changes can control the behaviour. - - - - Open 'componentContext.xml. - - - Define a bean named eventNotificationBus as follows: - - - name : Specify a name for the bean. - - - server : The name or the IP address of the RabbitMQ AMQP server. - - - port : The port on which RabbitMQ server is running. - - - username : The username associated with the account to access the RabbitMQ - server. - - - password : The password associated with the username of the account to access the - RabbitMQ server. - - - exchange : The exchange name on the RabbitMQ server where &PRODUCT; events are - published. - A sample bean is given below: - <bean id="eventNotificationBus" class="org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus"> - <property name="name" value="eventNotificationBus"/> - <property name="server" value="127.0.0.1"/> - <property name="port" value="5672"/> - <property name="username" value="guest"/> - <property name="password" value="guest"/> - <property name="exchange" value="cloudstack-events"/> - </bean> - The eventNotificationBus bean represents the - org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus class. - - - - - Restart the Management Server. - - -
diff --git a/docs/en-US/event-log-queries.xml b/docs/en-US/event-log-queries.xml deleted file mode 100644 index a0dcaa607fb..00000000000 --- a/docs/en-US/event-log-queries.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Event Log Queries - Database logs can be queried from the user interface. The list of events captured by the system includes: - - Virtual machine creation, deletion, and on-going management operations - Virtual router creation, deletion, and on-going management operations - - Template creation and deletion - Network/load balancer rules creation and deletion - Storage volume creation and deletion - User login and logout - -
diff --git a/docs/en-US/event-types.xml b/docs/en-US/event-types.xml deleted file mode 100644 index 5ce585763de..00000000000 --- a/docs/en-US/event-types.xml +++ /dev/null @@ -1,220 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Event Types - - - - - - - VM.CREATE - TEMPLATE.EXTRACT - SG.REVOKE.INGRESS - - - VM.DESTROY - TEMPLATE.UPLOAD - HOST.RECONNECT - - - VM.START - TEMPLATE.CLEANUP - MAINT.CANCEL - - - VM.STOP - VOLUME.CREATE - MAINT.CANCEL.PS - - - VM.REBOOT - VOLUME.DELETE - MAINT.PREPARE - - - VM.UPGRADE - VOLUME.ATTACH - MAINT.PREPARE.PS - - - VM.RESETPASSWORD - VOLUME.DETACH - VPN.REMOTE.ACCESS.CREATE - - - ROUTER.CREATE - VOLUME.UPLOAD - VPN.USER.ADD - - - ROUTER.DESTROY - SERVICEOFFERING.CREATE - VPN.USER.REMOVE - - - ROUTER.START - SERVICEOFFERING.UPDATE - NETWORK.RESTART - - - ROUTER.STOP - SERVICEOFFERING.DELETE - UPLOAD.CUSTOM.CERTIFICATE - - - ROUTER.REBOOT - DOMAIN.CREATE - UPLOAD.CUSTOM.CERTIFICATE - - - ROUTER.HA - DOMAIN.DELETE - STATICNAT.DISABLE - - - PROXY.CREATE - DOMAIN.UPDATE - SSVM.CREATE - - - PROXY.DESTROY - SNAPSHOT.CREATE - SSVM.DESTROY - - - PROXY.START - SNAPSHOT.DELETE - SSVM.START - - - PROXY.STOP - SNAPSHOTPOLICY.CREATE - SSVM.STOP - - - PROXY.REBOOT - SNAPSHOTPOLICY.UPDATE - SSVM.REBOOT - - - PROXY.HA - SNAPSHOTPOLICY.DELETE - SSVM.H - - - VNC.CONNECT - VNC.DISCONNECT - NET.IPASSIGN - - - NET.IPRELEASE - NET.RULEADD - NET.RULEDELETE - - - NET.RULEMODIFY - NETWORK.CREATE - NETWORK.DELETE - - - LB.ASSIGN.TO.RULE - LB.REMOVE.FROM.RULE - LB.CREATE - - - LB.DELETE - LB.UPDATE - USER.LOGIN - - - USER.LOGOUT - USER.CREATE - USER.DELETE - - - USER.UPDATE - USER.DISABLE - TEMPLATE.CREATE - - - TEMPLATE.DELETE - TEMPLATE.UPDATE - TEMPLATE.COPY - - - TEMPLATE.DOWNLOAD.START - TEMPLATE.DOWNLOAD.SUCCESS - TEMPLATE.DOWNLOAD.FAILED - - - ISO.CREATE - ISO.DELETE - ISO.COPY - - - ISO.ATTACH - ISO.DETACH - ISO.EXTRACT - - - ISO.UPLOAD - SERVICE.OFFERING.CREATE - SERVICE.OFFERING.EDIT - - - SERVICE.OFFERING.DELETE - DISK.OFFERING.CREATE - DISK.OFFERING.EDIT - - - DISK.OFFERING.DELETE - NETWORK.OFFERING.CREATE - NETWORK.OFFERING.EDIT - - - NETWORK.OFFERING.DELETE - POD.CREATE - POD.EDIT - - - POD.DELETE - ZONE.CREATE - ZONE.EDIT - - - ZONE.DELETE - VLAN.IP.RANGE.CREATE - VLAN.IP.RANGE.DELETE - - - CONFIGURATION.VALUE.EDIT - SG.AUTH.INGRESS - - - - - - diff --git a/docs/en-US/events-log.xml b/docs/en-US/events-log.xml deleted file mode 100644 index fa97db45959..00000000000 --- a/docs/en-US/events-log.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Event Logs - There are two types of events logged in the &PRODUCT; Event Log. Standard events log - the success or failure of an event and can be used to identify jobs or processes that have - failed. There are also long running job events. Events for asynchronous jobs log when a job - is scheduled, when it starts, and when it completes. Other long running synchronous jobs log - when a job starts, and when it completes. Long running synchronous and asynchronous event - logs can be used to gain more information on the status of a pending job or can be used to - identify a job that is hanging or has not started. The following sections provide more - information on these events.. -
- diff --git a/docs/en-US/events.xml b/docs/en-US/events.xml deleted file mode 100644 index 3b93ee0451e..00000000000 --- a/docs/en-US/events.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Events - An event is essentially a significant or meaningful change in the state of both virtual and - physical resources associated with a cloud environment. Events are used by monitoring systems, - usage and billing systems, or any other event-driven workflow systems to discern a pattern and - make the right business decision. In &PRODUCT; an event could be a state change of virtual or - physical resources, an action performed by an user (action events), or policy based events - (alerts). - - - - - - -
diff --git a/docs/en-US/example-activedirectory-configuration.xml b/docs/en-US/example-activedirectory-configuration.xml deleted file mode 100644 index 5a8178d5843..00000000000 --- a/docs/en-US/example-activedirectory-configuration.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Example LDAP Configuration for Active Directory - This shows the configuration settings required for using ActiveDirectory. - - samAccountName - Logon name - mail - Email Address - cn - Real name - - Along with this the ldap.user.object name needs to be modified, by default ActiveDirectory uses the value "user" for this. - Map the following attributes accordingly as shown below: - - - - - - add-ldap-configuration-ad.png: example configuration for active directory. - - -
diff --git a/docs/en-US/example-openldap-configuration.xml b/docs/en-US/example-openldap-configuration.xml deleted file mode 100644 index aa57a00cf18..00000000000 --- a/docs/en-US/example-openldap-configuration.xml +++ /dev/null @@ -1,44 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Example LDAP Configuration for OpenLdap - This shows the configuration settings required for using OpenLDAP. - The default values supplied are suited for OpenLDAP. - - uid - Logon name - mail - Email Address - cn - Real name - - Along with this the ldap.user.object name needs to be modified, by default OpenLDAP uses the value "inetOrgPerson" for this. - Map the following attributes accordingly as shown below within the cloudstack ldap configuration: - - - - - - add-ldap-configuration-openldap.png: example configuration for OpenLdap. - - -
diff --git a/docs/en-US/example-response-from-listUsageRecords.xml b/docs/en-US/example-response-from-listUsageRecords.xml deleted file mode 100644 index e0d79240e09..00000000000 --- a/docs/en-US/example-response-from-listUsageRecords.xml +++ /dev/null @@ -1,56 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Example response from listUsageRecords - - All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: - - - <listusagerecordsresponse> - <count>1816</count> - <usagerecord> - <account>user5</account> - <accountid>10004</accountid> - <domainid>1</domainid> - <zoneid>1</zoneid> - <description>i-3-4-WC running time (ServiceOffering: 1) (Template: 3)</description> - <usage>2.95288 Hrs</usage> - <usagetype>1</usagetype> - <rawusage>2.95288</rawusage> - <virtualmachineid>4</virtualmachineid> - <name>i-3-4-WC</name> - <offeringid>1</offeringid> - <templateid>3</templateid> - <usageid>245554</usageid> - <type>XenServer</type> - <startdate>2009-09-15T00:00:00-0700</startdate> - <enddate>2009-09-18T16:14:26-0700</enddate> - </usagerecord> - - … (1,815 more usage records) - </listusagerecordsresponse> - -
- diff --git a/docs/en-US/export-template.xml b/docs/en-US/export-template.xml deleted file mode 100644 index c225e360344..00000000000 --- a/docs/en-US/export-template.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Exporting Templates - End users and Administrators may export templates from the &PRODUCT;. Navigate to the template in the UI and choose the Download function from the Actions menu. - -
diff --git a/docs/en-US/external-firewalls-and-load-balancers.xml b/docs/en-US/external-firewalls-and-load-balancers.xml deleted file mode 100644 index 42ecacf9f75..00000000000 --- a/docs/en-US/external-firewalls-and-load-balancers.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- External Firewalls and Load Balancers - &PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and - an optional external NetScaler or F5 load balancer for gateway and load balancing services. In - this case, the VMs use the SRX as their gateway. - - - - - - -
diff --git a/docs/en-US/external-fw-topology-req.xml b/docs/en-US/external-fw-topology-req.xml deleted file mode 100644 index ab81496a30a..00000000000 --- a/docs/en-US/external-fw-topology-req.xml +++ /dev/null @@ -1,25 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- External Firewall Topology Requirements - When external firewall integration is in place, the public IP VLAN must still be trunked to - the Hosts. This is required to support the Secondary Storage VM and Console Proxy VM. -
diff --git a/docs/en-US/external-guest-firewall-integration.xml b/docs/en-US/external-guest-firewall-integration.xml deleted file mode 100644 index 0b34dca1065..00000000000 --- a/docs/en-US/external-guest-firewall-integration.xml +++ /dev/null @@ -1,201 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- External Guest Firewall Integration for Juniper SRX (Optional) - - Available only for guests using advanced networking. - - &PRODUCT; provides for direct management of the Juniper SRX series of firewalls. This - enables &PRODUCT; to establish static NAT mappings from public IPs to guest VMs, and to use - the Juniper device in place of the virtual router for firewall services. You can have one or - more Juniper SRX per zone. This feature is optional. If Juniper integration is not provisioned, - &PRODUCT; will use the virtual router for these services. - The Juniper SRX can optionally be used in conjunction with an external load balancer. - External Network elements can be deployed in a side-by-side or inline configuration. - - - - - - parallel-mode.png: adding a firewall and load balancer in parallel mode. - - - &PRODUCT; requires the Juniper to be configured as follows: - - Supported SRX software version is 10.3 or higher. - - - - Install your SRX appliance according to the vendor's instructions. - - - Connect one interface to the management network and one interface to the public network. - Alternatively, you can connect the same interface to both networks and a use a VLAN for the - public network. - - - Make sure "vlan-tagging" is enabled on the private interface. - - - Record the public and private interface names. If you used a VLAN for the public - interface, add a ".[VLAN TAG]" after the interface name. For example, if you are using - ge-0/0/3 for your public interface and VLAN tag 301, your public interface name would be - "ge-0/0/3.301". Your private interface name should always be untagged because the - &PRODUCT; software automatically creates tagged logical interfaces. - - - Create a public security zone and a private security zone. By default, these will - already exist and will be called "untrust" and "trust". Add the public interface to the - public zone and the private interface to the private zone. Note down the security zone - names. - - - Make sure there is a security policy from the private zone to the public zone that - allows all traffic. - - - Note the username and password of the account you want the &PRODUCT; software to log - in to when it is programming rules. - - - Make sure the "ssh" and "xnm-clear-text" system services are enabled. - - - If traffic metering is desired: - - - a. Create an incoming firewall filter and an outgoing firewall filter. These filters - should be the same names as your public security zone name and private security zone - name respectively. The filters should be set to be "interface-specific". For example, - here is the configuration where the public zone is "untrust" and the private zone is - "trust": - root@cloud-srx# show firewall -filter trust { - interface-specific; -} -filter untrust { - interface-specific; -} - - - Add the firewall filters to your public interface. For example, a sample - configuration output (for public interface ge-0/0/3.0, public security zone untrust, and - private security zone trust) is: - ge-0/0/3 { - unit 0 { - family inet { - filter { - input untrust; - output trust; - } - address 172.25.0.252/16; - } - } -} - - - - - Make sure all VLANs are brought to the private interface of the SRX. - - - After the &PRODUCT; Management Server is installed, log in to the &PRODUCT; UI as - administrator. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View More. - - - Choose the zone you want to work with. - - - Click the Network tab. - - - In the Network Service Providers node of the diagram, click Configure. (You might have - to scroll down to see this.) - - - Click SRX. - - - Click the Add New SRX button (+) and provide the following: - - - IP Address: The IP address of the SRX. - - - Username: The user name of the account on the SRX that &PRODUCT; should use. - - - Password: The password of the account. - - - Public Interface. The name of the public interface on the SRX. For example, - ge-0/0/2. A ".x" at the end of the interface indicates the VLAN that is in use. - - - Private Interface: The name of the private interface on the SRX. For example, - ge-0/0/1. - - - Usage Interface: (Optional) Typically, the public interface is used to meter - traffic. If you want to use a different interface, specify its name here - - - Number of Retries: The number of times to attempt a command on the SRX before - failing. The default value is 2. - - - Timeout (seconds): The time to wait for a command on the SRX before considering it - failed. Default is 300 seconds. - - - Public Network: The name of the public network on the SRX. For example, - trust. - - - Private Network: The name of the private network on the SRX. For example, - untrust. - - - Capacity: The number of networks the device can handle - - - Dedicated: When marked as dedicated, this device will be dedicated to a single - account. When Dedicated is checked, the value in the Capacity field has no significance - implicitly, its value is 1 - - - - - Click OK. - - - Click Global Settings. Set the parameter external.network.stats.interval to indicate how - often you want &PRODUCT; to fetch network usage statistics from the Juniper SRX. If you - are not using the SRX to gather network usage statistics, set to 0. - - -
diff --git a/docs/en-US/external-guest-lb-integration.xml b/docs/en-US/external-guest-lb-integration.xml deleted file mode 100644 index 5760f9559e6..00000000000 --- a/docs/en-US/external-guest-lb-integration.xml +++ /dev/null @@ -1,109 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- External Guest Load Balancer Integration (Optional) - &PRODUCT; can optionally use a Citrix NetScaler or BigIP F5 load balancer to provide load - balancing services to guests. If this is not enabled, &PRODUCT; will use the software load - balancer in the virtual router. - To install and enable an external load balancer for &PRODUCT; management: - - - Set up the appliance according to the vendor's directions. - - - Connect it to the networks carrying public traffic and management traffic (these could - be the same network). - - - Record the IP address, username, password, public interface name, and private interface - name. The interface names will be something like "1.1" or "1.2". - - - Make sure that the VLANs are trunked to the management network interface. - - - After the &PRODUCT; Management Server is installed, log in as administrator to the - &PRODUCT; UI. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View More. - - - Choose the zone you want to work with. - - - Click the Network tab. - - - In the Network Service Providers node of the diagram, click Configure. (You might have - to scroll down to see this.) - - - Click NetScaler or F5. - - - Click the Add button (+) and provide the following: - For NetScaler: - - - IP Address: The IP address of the SRX. - - - Username/Password: The authentication credentials to access the device. &PRODUCT; - uses these credentials to access the device. - - - Type: The type of device that is being added. It could be F5 Big Ip Load Balancer, - NetScaler VPX, NetScaler MPX, or NetScaler SDX. For a comparison of the NetScaler types, - see the &PRODUCT; Administration Guide. - - - Public interface: Interface of device that is configured to be part of the public - network. - - - Private interface: Interface of device that is configured to be part of the private - network. - - - Number of retries. Number of times to attempt a command on the device before - considering the operation failed. Default is 2. - - - Capacity: The number of networks the device can handle. - - - Dedicated: When marked as dedicated, this device will be dedicated to a single - account. When Dedicated is checked, the value in the Capacity field has no significance - implicitly, its value is 1. - - - - - Click OK. - - - The installation and provisioning of the external load balancer is finished. You can proceed - to add VMs and NAT or load balancing rules. -
diff --git a/docs/en-US/extracting-source.xml b/docs/en-US/extracting-source.xml deleted file mode 100644 index d1690401229..00000000000 --- a/docs/en-US/extracting-source.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Extracting source - - Extracting the &PRODUCT; release is relatively simple and can be done - with a single command as follows: - $ tar -jxvf apache-cloudstack-4.1.0.src.tar.bz2 - - - You can now move into the directory: - $ cd ./apache-cloudstack-4.1.0-src - -
diff --git a/docs/en-US/feature-overview.xml b/docs/en-US/feature-overview.xml deleted file mode 100644 index 57b6d84973d..00000000000 --- a/docs/en-US/feature-overview.xml +++ /dev/null @@ -1,81 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- What Can &PRODUCT; Do? - - Multiple Hypervisor Support - - - &PRODUCT; works with a variety of hypervisors, and a single cloud deployment can contain multiple hypervisor implementations. The current release of &PRODUCT; supports pre-packaged enterprise solutions like Citrix XenServer and VMware vSphere, as well as KVM or Xen running on Ubuntu or CentOS. - - - Massively Scalable Infrastructure Management - - - &PRODUCT; can manage tens of thousands of servers installed in multiple geographically distributed datacenters. The centralized management server scales linearly, eliminating the need for intermediate cluster-level management servers. No single component failure can cause cloud-wide outage. Periodic maintenance of the management server can be performed without affecting the functioning of virtual machines running in the cloud. - - - Automatic Configuration Management - - &PRODUCT; automatically configures each guest virtual machine’s networking and storage settings. - - &PRODUCT; internally manages a pool of virtual appliances to support the cloud itself. These appliances offer services such as firewalling, routing, DHCP, VPN access, console proxy, storage access, and storage replication. The extensive use of virtual appliances simplifies the installation, configuration, and ongoing management of a cloud deployment. - - - Graphical User Interface - - &PRODUCT; offers an administrator's Web interface, used for provisioning and managing the cloud, as well as an end-user's Web interface, used for running VMs and managing VM templates. The UI can be customized to reflect the desired service provider or enterprise look and feel. - - - API and Extensibility - - - &PRODUCT; provides an API that gives programmatic access to all the - management features available in the UI. The API is maintained and - documented. This API enables the creation of command line tools and - new user interfaces to suit particular needs. See the Developer’s - Guide and API Reference, both available at - Apache CloudStack Guides - and - Apache CloudStack API Reference - respectively. - - - The &PRODUCT; pluggable allocation architecture allows the creation - of new types of allocators for the selection of storage and Hosts. - See the Allocator Implementation Guide - (http://docs.cloudstack.org/CloudStack_Documentation/Allocator_Implementation_Guide). - - - High Availability - - - &PRODUCT; has a number of features to increase the availability of the - system. The Management Server itself may be deployed in a multi-node - installation where the servers are load balanced. MySQL may be configured - to use replication to provide for a manual failover in the event of - database loss. For the hosts, &PRODUCT; supports NIC bonding and the use - of separate networks for storage as well as iSCSI Multipath. - -
diff --git a/docs/en-US/feedback.xml b/docs/en-US/feedback.xml deleted file mode 100644 index 4b06c9f3898..00000000000 --- a/docs/en-US/feedback.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Feedback - to-do -
diff --git a/docs/en-US/firewall-rules.xml b/docs/en-US/firewall-rules.xml deleted file mode 100644 index 837a4c6f9d0..00000000000 --- a/docs/en-US/firewall-rules.xml +++ /dev/null @@ -1,82 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Firewall Rules - By default, all incoming traffic to the public IP address is rejected by the firewall. To - allow external traffic, you can open firewall ports by specifying firewall rules. You can - optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to - allow only incoming requests from certain IP addresses. - You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is - used, outside access is instead controlled through the use of security groups. See . - In an advanced zone, you can also create egress firewall rules by using the virtual router. - For more information, see . - Firewall rules can be created using the Firewall tab in the Management Server UI. This tab - is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the - &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to - "true." - To create a firewall rule: - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click the name of the network where you want to work with. - - - Click View IP Addresses. - - - Click the IP address you want to work with. - - - Click the Configuration tab and fill in the following values. - - - Source CIDR. (Optional) To accept only traffic from - IP addresses within a particular address block, enter a CIDR or a comma-separated list - of CIDRs. Example: 192.168.0.0/22. Leave empty to allow all CIDRs. - - - Protocol. The communication protocol in use on the - opened port(s). - - - Start Port and End Port. The port(s) you want to - open on the firewall. If you are opening a single port, use the same number in both - fields - - - ICMP Type and ICMP Code. Used only if Protocol is - set to ICMP. Provide the type and code required by the ICMP protocol to fill out the - ICMP header. Refer to ICMP documentation for more details if you are not sure what to - enter - - - - - Click Add. - - -
diff --git a/docs/en-US/first_ms_node_install.xml b/docs/en-US/first_ms_node_install.xml deleted file mode 100644 index af6b35b2c53..00000000000 --- a/docs/en-US/first_ms_node_install.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Install the First Management Server - - - - Ensure you have configured your machine according to - - or - - as appropriate for your platform. - - - - - Install the &PRODUCT; management server packages by - issuing one of the following commands as appropriate: - # yum install cloudstack-management - # apt-get install cloudstack-management - - - - - (RPM-based distributions) When the installation is - finished, run the following commands to start essential - services: - # service rpcbind start -# service nfs start -# chkconfig nfs on -# chkconfig rpcbind on - - - -
diff --git a/docs/en-US/generic-firewall-provisions.xml b/docs/en-US/generic-firewall-provisions.xml deleted file mode 100644 index 53ae45a09e0..00000000000 --- a/docs/en-US/generic-firewall-provisions.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Generic Firewall Provisions - The hardware firewall is required to serve two purposes: - - - Protect the Management Servers. NAT and port forwarding should be configured to direct - traffic from the public Internet to the Management Servers. - - - Route management network traffic between multiple zones. Site-to-site VPN should be - configured between multiple zones. - - - To achieve the above purposes you must set up fixed configurations for the firewall. - Firewall rules and policies need not change as users are provisioned into the cloud. Any brand - of hardware firewall that supports NAT and site-to-site VPN can be used. -
diff --git a/docs/en-US/getting-release.xml b/docs/en-US/getting-release.xml deleted file mode 100644 index 33c246f08c5..00000000000 --- a/docs/en-US/getting-release.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Getting the release - - You can download the latest &PRODUCT; release from the - - Apache CloudStack project download page. - - Prior releases are available via archive.apache.org as well. See the downloads page for more information on archived releases. - You'll notice several links under the 'Latest release' section. A link to a file ending in tar.bz2, as well as a PGP/GPG signature, MD5, and SHA512 file. - - The tar.bz2 file contains the Bzip2-compressed tarball with the source code. - The .asc file is a detached cryptographic signature that can be used to help verify the authenticity of the release. - The .md5 file is an MD5 hash of the release to aid in verify the validity of the release download. - The .sha file is a SHA512 hash of the release to aid in verify the validity of the release download. - -
diff --git a/docs/en-US/global-config.xml b/docs/en-US/global-config.xml deleted file mode 100644 index 237614d3f85..00000000000 --- a/docs/en-US/global-config.xml +++ /dev/null @@ -1,342 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Setting Configuration Parameters -
- About Configuration Parameters - &PRODUCT; provides a variety of settings you can use to set limits, configure features, - and enable or disable features in the cloud. Once your Management Server is running, you might - need to set some of these configuration parameters, depending on what optional features you - are setting up. You can set default values at the global level, which will be in effect - throughout the cloud unless you override them at a lower level. You can make local settings, - which will override the global configuration parameter values, at the level of an account, - zone, cluster, or primary storage. - The documentation for each &PRODUCT; feature should direct you to the names of the - applicable parameters. The following table shows a few of the more useful parameters. - - - - - - - Field - Value - - - - - management.network.cidr - A CIDR that describes the network that the management CIDRs reside on. This - variable must be set for deployments that use vSphere. It is recommended to be set - for other deployments as well. Example: 192.168.3.0/24. - - - xen.setup.multipath - For XenServer nodes, this is a true/false variable that instructs - CloudStack to enable iSCSI multipath on the XenServer Hosts when they are added. - This defaults to false. Set it to true if you would like CloudStack to enable - multipath. - If this is true for a NFS-based deployment multipath will still be enabled on - the XenServer host. However, this does not impact NFS operation and is - harmless. - - - secstorage.allowed.internal.sites - This is used to protect your internal network from rogue attempts to - download arbitrary files using the template download feature. This is a - comma-separated list of CIDRs. If a requested URL matches any of these CIDRs the - Secondary Storage VM will use the private network interface to fetch the URL. Other - URLs will go through the public interface. We suggest you set this to 1 or 2 - hardened internal machines where you keep your templates. For example, set it to - 192.168.1.66/32. - - - use.local.storage - Determines whether CloudStack will use storage that is local to the Host - for data disks, templates, and snapshots. By default CloudStack will not use this - storage. You should change this to true if you want to use local storage and you - understand the reliability and feature drawbacks to choosing local - storage. - - - host - This is the IP address of the Management Server. If you are using multiple - Management Servers you should enter a load balanced IP address that is reachable via - the private network. - - - default.page.size - Maximum number of items per page that can be returned by a CloudStack API - command. The limit applies at the cloud level and can vary from cloud to cloud. You - can override this with a lower value on a particular API call by using the page and - page size API command parameters. For more information, see the Developer's Guide. - Default: 500. - - - ha.tag - The label you want to use throughout the cloud to designate certain hosts - as dedicated HA hosts. These hosts will be used only for HA-enabled VMs that are - restarting due to the failure of another host. For example, you could set this to - ha_host. Specify the ha.tag value as a host tag when you add a new host to the - cloud. - - - - -
-
- Setting Global Configuration Parameters - Use the following steps to set global configuration parameters. These values will be the - defaults in effect throughout your &PRODUCT; deployment. - - - Log in to the UI as administrator. - - - In the left navigation bar, click Global Settings. - - - In Select View, choose one of the following: - - - Global Settings. This displays a list of the parameters with brief descriptions - and current values. - - - Hypervisor Capabilities. This displays a list of hypervisor versions with the - maximum number of guests supported for each. - - - - - Use the search box to narrow down the list to those you are interested in. - - - In the Actions column, click the Edit icon to modify a value. If you are viewing - Hypervisor Capabilities, you must click the name of the hypervisor first to display the - editing screen. - - -
-
- Setting Local Configuration Parameters - Use the following steps to set local configuration parameters for an account, zone, - cluster, or primary storage. These values will override the global configuration - settings. - - - Log in to the UI as administrator. - - - In the left navigation bar, click Infrastructure or Accounts, depending on where you - want to set a value. - - - Find the name of the particular resource that you want to work with. For example, if - you are in Infrastructure, click View All on the Zones, Clusters, or Primary Storage - area. - - - Click the name of the resource where you want to set a limit. - - - Click the Settings tab. - - - Use the search box to narrow down the list to those you are interested in. - - - In the Actions column, click the Edit icon to modify a value. - - -
-
- Granular Global Configuration Parameters - The following global configuration parameters have been made more granular. The parameters - are listed under three different scopes: account, cluster, and zone. - - - - - - - - Field - Field - Value - - - - - account - remote.access.vpn.client.iprange - The range of IPs to be allocated to remotely access the VPN clients. The - first IP in the range is used by the VPN server. - - - account - allow.public.user.templates - If false, users will not be able to create public templates. - - - account - use.system.public.ips - If true and if an account has one or more dedicated public IP ranges, IPs - are acquired from the system pool after all the IPs dedicated to the account have - been consumed. - - - account - use.system.guest.vlans - If true and if an account has one or more dedicated guest VLAN ranges, - VLANs are allocated from the system pool after all the VLANs dedicated to the - account have been consumed. - - - cluster - cluster.storage.allocated.capacity.notificationthreshold - The percentage, as a value between 0 and 1, of allocated storage utilization above which - alerts are sent that the storage is below the threshold. - - - cluster - cluster.storage.capacity.notificationthreshold - The percentage, as a value between 0 and 1, of storage utilization above which alerts are sent - that the available storage is below the threshold. - - - cluster - cluster.cpu.allocated.capacity.notificationthreshold - The percentage, as a value between 0 and 1, of cpu utilization above which alerts are sent - that the available CPU is below the threshold. - - - cluster - cluster.memory.allocated.capacity.notificationthreshold - The percentage, as a value between 0 and 1, of memory utilization above which alerts are sent - that the available memory is below the threshold. - - - cluster - cluster.cpu.allocated.capacity.disablethreshold - The percentage, as a value between 0 and 1, of CPU utilization above which allocators will - disable that cluster from further usage. Keep the corresponding notification - threshold lower than this value to be notified beforehand. - - - cluster - cluster.memory.allocated.capacity.disablethreshold - The percentage, as a value between 0 and 1, of memory utilization above which allocators will - disable that cluster from further usage. Keep the corresponding notification - threshold lower than this value to be notified beforehand. - - - cluster - cpu.overprovisioning.factor - Used for CPU over-provisioning calculation; the available CPU will be the mathematical product - of actualCpuCapacity and cpu.overprovisioning.factor. - - - cluster - mem.overprovisioning.factor - Used for memory over-provisioning calculation. - - - cluster - vmware.reserve.cpu - Specify whether or not to reserve CPU when not over-provisioning; In case of CPU - over-provisioning, CPU is always reserved. - - - cluster - vmware.reserve.mem - Specify whether or not to reserve memory when not over-provisioning; In case of memory - over-provisioning memory is always reserved. - - - zone - pool.storage.allocated.capacity.disablethreshold - The percentage, as a value between 0 and 1, of allocated storage utilization above which - allocators will disable that pool because the available allocated storage is below - the threshold. - - - zone - pool.storage.capacity.disablethreshold - The percentage, as a value between 0 and 1, of storage utilization above which allocators will - disable the pool because the available storage capacity is below the - threshold. - - - zone - storage.overprovisioning.factor - Used for storage over-provisioning calculation; available storage will be the mathematical - product of actualStorageSize and storage.overprovisioning.factor. - - - zone - network.throttling.rate - Default data transfer rate in megabits per second allowed in a network. - - - zone - guest.domain.suffix - Default domain name for VMs inside a virtual networks with a router. - - - zone - router.template.xen - Name of the default router template on Xenserver. - - - zone - router.template.kvm - Name of the default router template on KVM. - - - zone - router.template.vmware - Name of the default router template on VMware. - - - zone - enable.dynamic.scale.vm - Enable or diable dynamically scaling of a VM. - - - zone - use.external.dns - Bypass internal DNS, and use the external DNS1 and DNS2 - - - zone - blacklisted.routes - Routes that are blacklisted cannot be used for creating static routes for a VPC Private - Gateway. - - - - -
-
diff --git a/docs/en-US/globally-configured-limits.xml b/docs/en-US/globally-configured-limits.xml deleted file mode 100644 index ac71112b310..00000000000 --- a/docs/en-US/globally-configured-limits.xml +++ /dev/null @@ -1,100 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Globally Configured Limits - In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses. - The following table lists limits set in the Global Configuration: - - - - - Parameter Name - Definition - - - - - - max.account.public.ips - Number of public IP addresses that can be owned by an account - - - - max.account.snapshots - Number of snapshots that can exist for an account - - - - - max.account.templates - Number of templates that can exist for an account - - - - max.account.user.vms - Number of virtual machine instances that can exist for an account - - - - max.account.volumes - Number of disk volumes that can exist for an account - - - - max.template.iso.size - Maximum size for a downloaded template or ISO in GB - - - - max.volume.size.gb - Maximum size for a volume in GB - - - network.throttling.rate - Default data transfer rate in megabits per second allowed per user (supported on XenServer) - - - snapshot.max.hourly - Maximum recurring hourly snapshots to be retained for a volume. If the limit is reached, early snapshots from the start of the hour are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring hourly snapshots can not be scheduled - - - - snapshot.max.daily - Maximum recurring daily snapshots to be retained for a volume. If the limit is reached, snapshots from the start of the day are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring daily snapshots can not be scheduled - - - snapshot.max.weekly - Maximum recurring weekly snapshots to be retained for a volume. If the limit is reached, snapshots from the beginning of the week are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring weekly snapshots can not be scheduled - - - - snapshot.max.monthly - Maximum recurring monthly snapshots to be retained for a volume. If the limit is reached, snapshots from the beginning of the month are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring monthly snapshots can not be scheduled. - - - - - To modify global configuration parameters, use the global configuration screen in the &PRODUCT; UI. See Setting Global Configuration Parameters -
diff --git a/docs/en-US/gslb.xml b/docs/en-US/gslb.xml deleted file mode 100644 index 968e8e2cefa..00000000000 --- a/docs/en-US/gslb.xml +++ /dev/null @@ -1,487 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Global Server Load Balancing Support - &PRODUCT; supports Global Server Load Balancing (GSLB) functionalities to provide business - continuity, and enable seamless resource movement within a &PRODUCT; environment. &PRODUCT; - achieve this by extending its functionality of integrating with NetScaler Application Delivery - Controller (ADC), which also provides various GSLB capabilities, such as disaster recovery and - load balancing. The DNS redirection technique is used to achieve GSLB in &PRODUCT;. - In order to support this functionality, region level services and service provider are - introduced. A new service 'GSLB' is introduced as a region level service. The GSLB service - provider is introduced that will provider the GSLB service. Currently, NetScaler is the - supported GSLB provider in &PRODUCT;. GSLB functionality works in an Active-Active data center - environment. -
- About Global Server Load Balancing - Global Server Load Balancing (GSLB) is an extension of load balancing functionality, which - is highly efficient in avoiding downtime. Based on the nature of deployment, GSLB represents a - set of technologies that is used for various purposes, such as load sharing, disaster - recovery, performance, and legal obligations. With GSLB, workloads can be distributed across - multiple data centers situated at geographically separated locations. GSLB can also provide an - alternate location for accessing a resource in the event of a failure, or to provide a means - of shifting traffic easily to simplify maintenance, or both. -
- Components of GSLB - A typical GSLB environment is comprised of the following components: - - - GSLB Site: In &PRODUCT; terminology, GSLB sites are - represented by zones that are mapped to data centers, each of which has various network - appliances. Each GSLB site is managed by a NetScaler appliance that is local to that - site. Each of these appliances treats its own site as the local site and all other - sites, managed by other appliances, as remote sites. It is the central entity in a GSLB - deployment, and is represented by a name and an IP address. - - - GSLB Services: A GSLB service is typically - represented by a load balancing or content switching virtual server. In a GSLB - environment, you can have a local as well as remote GSLB services. A local GSLB service - represents a local load balancing or content switching virtual server. A remote GSLB - service is the one configured at one of the other sites in the GSLB setup. At each site - in the GSLB setup, you can create one local GSLB service and any number of remote GSLB - services. - - - GSLB Virtual Servers: A GSLB virtual server refers - to one or more GSLB services and balances traffic between traffic across the VMs in - multiple zones by using the &PRODUCT; functionality. It evaluates the configured GSLB - methods or algorithms to select a GSLB service to which to send the client requests. One - or more virtual servers from different zones are bound to the GSLB virtual server. GSLB - virtual server does not have a public IP associated with it, instead it will have a FQDN - DNS name. - - - Load Balancing or Content Switching Virtual - Servers: According to Citrix NetScaler terminology, a load balancing or - content switching virtual server represents one or many servers on the local network. - Clients send their requests to the load balancing or content switching virtual server’s - virtual IP (VIP) address, and the virtual server balances the load across the local - servers. After a GSLB virtual server selects a GSLB service representing either a local - or a remote load balancing or content switching virtual server, the client sends the - request to that virtual server’s VIP address. - - - DNS VIPs: DNS virtual IP represents a load - balancing DNS virtual server on the GSLB service provider. The DNS requests for domains - for which the GSLB service provider is authoritative can be sent to a DNS VIP. - - - Authoritative DNS: ADNS (Authoritative Domain Name - Server) is a service that provides actual answer to DNS queries, such as web site IP - address. In a GSLB environment, an ADNS service responds only to DNS requests for - domains for which the GSLB service provider is authoritative. When an ADNS service is - configured, the service provider owns that IP address and advertises it. When you create - an ADNS service, the NetScaler responds to DNS queries on the configured ADNS service IP - and port. - - -
-
- How Does GSLB Works in &PRODUCT;? - Global server load balancing is used to manage the traffic flow to a web site hosted on - two separate zones that ideally are in different geographic locations. The following is an - illustration of how GLSB functionality is provided in &PRODUCT;: An organization, xyztelco, - has set up a public cloud that spans two zones, Zone-1 and Zone-2, across geographically - separated data centers that are managed by &PRODUCT;. Tenant-A of the cloud launches a - highly available solution by using xyztelco cloud. For that purpose, they launch two - instances each in both the zones: VM1 and VM2 in Zone-1 and VM5 and VM6 in Zone-2. Tenant-A - acquires a public IP, IP-1 in Zone-1, and configures a load balancer rule to load balance - the traffic between VM1 and VM2 instances. &PRODUCT; orchestrates setting up a virtual - server on the LB service provider in Zone-1. Virtual server 1 that is set up on the LB - service provider in Zone-1 represents a publicly accessible virtual server that client - reaches at IP-1. The client traffic to virtual server 1 at IP-1 will be load balanced across - VM1 and VM2 instances. - Tenant-A acquires another public IP, IP-2 in Zone-2 and sets up a load balancer rule to - load balance the traffic between VM5 and VM6 instances. Similarly in Zone-2, &PRODUCT; - orchestrates setting up a virtual server on the LB service provider. Virtual server 2 that - is setup on the LB service provider in Zone-2 represents a publicly accessible virtual - server that client reaches at IP-2. The client traffic that reaches virtual server 2 at IP-2 - is load balanced across VM5 and VM6 instances. At this point Tenant-A has the service - enabled in both the zones, but has no means to set up a disaster recovery plan if one of the - zone fails. Additionally, there is no way for Tenant-A to load balance the traffic - intelligently to one of the zones based on load, proximity and so on. The cloud - administrator of xyztelco provisions a GSLB service provider to both the zones. A GSLB - provider is typically an ADC that has the ability to act as an ADNS (Authoritative Domain - Name Server) and has the mechanism to monitor health of virtual servers both at local and - remote sites. The cloud admin enables GSLB as a service to the tenants that use zones 1 and - 2. - - - - - - gslb.png: GSLB architecture - - - Tenant-A wishes to leverage the GSLB service provided by the xyztelco cloud. Tenant-A - configures a GSLB rule to load balance traffic across virtual server 1 at Zone-1 and virtual - server 2 at Zone-2. The domain name is provided as A.xyztelco.com. &PRODUCT; orchestrates - setting up GSLB virtual server 1 on the GSLB service provider at Zone-1. &PRODUCT; binds - virtual server 1 of Zone-1 and virtual server 2 of Zone-2 to GLSB virtual server 1. GSLB - virtual server 1 is configured to start monitoring the health of virtual server 1 and 2 in - Zone-1. &PRODUCT; will also orchestrate setting up GSLB virtual server 2 on GSLB service - provider at Zone-2. &PRODUCT; will bind virtual server 1 of Zone-1 and virtual server 2 of - Zone-2 to GLSB virtual server 2. GSLB virtual server 2 is configured to start monitoring the - health of virtual server 1 and 2. &PRODUCT; will bind the domain A.xyztelco.com to both the - GSLB virtual server 1 and 2. At this point, Tenant-A service will be globally reachable at - A.xyztelco.com. The private DNS server for the domain xyztelcom.com is configured by the - admin out-of-band to resolve the domain A.xyztelco.com to the GSLB providers at both the - zones, which are configured as ADNS for the domain A.xyztelco.com. A client when sends a DNS - request to resolve A.xyztelcom.com, will eventually get DNS delegation to the address of - GSLB providers at zone 1 and 2. A client DNS request will be received by the GSLB provider. - The GSLB provider, depending on the domain for which it needs to resolve, will pick up the - GSLB virtual server associated with the domain. Depending on the health of the virtual - servers being load balanced, DNS request for the domain will be resolved to the public IP - associated with the selected virtual server. -
-
-
- Configuring GSLB - To configure a GSLB deployment, you must first configure a standard load balancing setup - for each zone. This enables you to balance load across the different servers in each zone in - the region. Then on the NetScaler side, configure both NetScaler appliances that you plan to - add to each zone as authoritative DNS (ADNS) servers. Next, create a GSLB site for each zone, - configure GSLB virtual servers for each site, create GLSB services, and bind the GSLB services - to the GSLB virtual servers. Finally, bind the domain to the GSLB virtual servers. The GSLB - configurations on the two appliances at the two different zones are identical, although each - sites load-balancing configuration is specific to that site. - Perform the following as a cloud administrator. As per the example given above, the - administrator of xyztelco is the one who sets up GSLB: - - - In the cloud.dns.name global parameter, specify the DNS name of your tenant's cloud - that make use of the GSLB service. - - - On the NetScaler side, configure GSLB as given in Configuring Global Server Load Balancing (GSLB): - - - Configuring a standard load balancing setup. - - - Configure Authoritative DNS, as explained in Configuring an Authoritative DNS Service. - - - Configure a GSLB site with site name formed from the domain name details. - Configure a GSLB site with the site name formed from the domain name. - As per the example given above, the site names are A.xyztelco.com and - B.xyztelco.com. - For more information, see Configuring a Basic GSLB Site. - - - Configure a GSLB virtual server. - For more information, see Configuring a GSLB Virtual Server. - - - Configure a GSLB service for each virtual server. - For more information, see Configuring a GSLB Service. - - - Bind the GSLB services to the GSLB virtual server. - For more information, see Binding GSLB Services to a GSLB Virtual Server. - - - Bind domain name to GSLB virtual server. Domain name is obtained from the domain - details. - For more information, see Binding a Domain to a GSLB Virtual Server. - - - - - In each zone that are participating in GSLB, add GSLB-enabled NetScaler device. - For more information, see . - - - As a domain administrator/ user perform the following: - - - Add a GSLB rule on both the sites. - See . - - - Assign load balancer rules. - See . - - -
- Prerequisites and Guidelines - - - The GSLB functionality is supported both Basic and Advanced zones. - - - GSLB is added as a new network service. - - - GSLB service provider can be added to a physical network in a zone. - - - The admin is allowed to enable or disable GSLB functionality at region level. - - - The admin is allowed to configure a zone as GSLB capable or enabled. - A zone shall be considered as GSLB capable only if a GSLB service provider is - provisioned in the zone. - - - When users have VMs deployed in multiple availability zones which are GSLB enabled, - they can use the GSLB functionality to load balance traffic across the VMs in multiple - zones. - - - The users can use GSLB to load balance across the VMs across zones in a region only - if the admin has enabled GSLB in that region. - - - The users can load balance traffic across the availability zones in the same region - or different regions. - - - The admin can configure DNS name for the entire cloud. - - - The users can specify an unique name across the cloud for a globally load balanced - service. The provided name is used as the domain name under the DNS name associated with - the cloud. - The user-provided name along with the admin-provided DNS name is used to produce a - globally resolvable FQDN for the globally load balanced service of the user. For - example, if the admin has configured xyztelco.com as the DNS name for the cloud, and - user specifies 'foo' for the GSLB virtual service, then the FQDN name of the GSLB - virtual service is foo.xyztelco.com. - - - While setting up GSLB, users can select a load balancing method, such as round - robin, for using across the zones that are part of GSLB. - - - The user shall be able to set weight to zone-level virtual server. Weight shall be - considered by the load balancing method for distributing the traffic. - - - The GSLB functionality shall support session persistence, where series of client - requests for particular domain name is sent to a virtual server on the same zone. - Statistics is collected from each GSLB virtual server. - - -
-
- Enabling GSLB in NetScaler - In each zone, add GSLB-enabled NetScaler device for load balancing. - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View More. - - - Choose the zone you want to work with. - - - Click the Physical Network tab, then click the name of the physical network. - - - In the Network Service Providers node of the diagram, click Configure. - You might have to scroll down to see this. - - - Click NetScaler. - - - Click Add NetScaler device and provide the following: - For NetScaler: - - - IP Address: The IP address of the SRX. - - - Username/Password: The authentication - credentials to access the device. &PRODUCT; uses these credentials to access the - device. - - - Type: The type of device that is being added. - It could be F5 Big Ip Load Balancer, NetScaler VPX, NetScaler MPX, or NetScaler SDX. - For a comparison of the NetScaler types, see the &PRODUCT; Administration - Guide. - - - Public interface: Interface of device that is - configured to be part of the public network. - - - Private interface: Interface of device that is - configured to be part of the private network. - - - GSLB service: Select this option. - - - GSLB service Public IP: The public IP address - of the NAT translator for a GSLB service that is on a private network. - - - GSLB service Private IP: The private IP of the - GSLB service. - - - Number of Retries. Number of times to attempt a - command on the device before considering the operation failed. Default is 2. - - - Capacity: The number of networks the device can - handle. - - - Dedicated: When marked as dedicated, this - device will be dedicated to a single account. When Dedicated is checked, the value - in the Capacity field has no significance implicitly, its value is 1. - - - - - Click OK. - - -
-
- Adding a GSLB Rule - - - Log in to the &PRODUCT; UI as a domain administrator or user. - - - In the left navigation pane, click Region. - - - Select the region for which you want to create a GSLB rule. - - - In the Details tab, click View GSLB. - - - Click Add GSLB. - The Add GSLB page is displayed as follows: - - - - - - gslb-add.png: adding a gslb rule - - - - - Specify the following: - - - Name: Name for the GSLB rule. - - - Description: (Optional) A short description of - the GSLB rule that can be displayed to users. - - - GSLB Domain Name: A preferred domain name for - the service. - - - Algorithm: (Optional) The algorithm to use to - load balance the traffic across the zones. The options are Round Robin, Least - Connection, and Proximity. - - - Service Type: The transport protocol to use for - GSLB. The options are TCP and UDP. - - - Domain: (Optional) The domain for which you - want to create the GSLB rule. - - - Account: (Optional) The account on which you - want to apply the GSLB rule. - - - - - Click OK to confirm. - - -
-
- Assigning Load Balancing Rules to GSLB - - - Log in to the &PRODUCT; UI as a domain administrator or user. - - - In the left navigation pane, click Region. - - - Select the region for which you want to create a GSLB rule. - - - In the Details tab, click View GSLB. - - - Select the desired GSLB. - - - Click view assigned load balancing. - - - Click assign more load balancing. - - - Select the load balancing rule you have created for the zone. - - - Click OK to confirm. - - -
-
-
- Known Limitation - Currently, &PRODUCT; does not support orchestration of services across the zones. The - notion of services and service providers in region are to be introduced. -
-
diff --git a/docs/en-US/gsoc-dharmesh.xml b/docs/en-US/gsoc-dharmesh.xml deleted file mode 100644 index 01a77c70ab0..00000000000 --- a/docs/en-US/gsoc-dharmesh.xml +++ /dev/null @@ -1,149 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Dharmesh's 2013 GSoC Proposal - This chapter describes Dharmrsh's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. -
- Abstract - - The project aims to bring cloudformation like service to cloudstack. One of the prime use-case is cluster computing frameworks on cloudstack. A cloudformation service will give users and administrators of cloudstack ability to manage and control a set of resources easily. The cloudformation will allow booting and configuring a set of VMs and form a cluster. Simple example would be LAMP stack. More complex clusters such as mesos or hadoop cluster requires a little more advanced configuration. There is already some work done by Chiradeep Vittal at this front [5]. In this project, I will implement server side cloudformation service for cloudstack and demonstrate how to run mesos cluster using it. - -
- -
- Mesos - - Mesos is a resource management platform for clusters. It aims to increase resource utilization of clusters by sharing cluster resources among multiple processing frameworks(like MapReduce, MPI, Graph Processing) or multiple instances of same framework. It provides efficient resource isolation through use of containers. Uses zookeeper for state maintenance and fault tolerance. - -
- -
- What can run on mesos ? - - Spark: A cluster computing framework based on the Resilient Distributed Datasets (RDDs) abstraction. RDD is more generalized than MapReduce and can support iterative and interactive computation while retaining fault tolerance, scalability, data locality etc. - - Hadoop:: Hadoop is fault tolerant and scalable distributed computing framework based on MapReduce abstraction. - - Begel:: A graph processing framework based on pregel. - - and other frameworks like MPI, Hypertable. -
- -
- How to deploy mesos ? - - Mesos provides cluster installation scripts for cluster deployment. There are also scripts available to deploy a cluster on Amazon EC2. It would be interesting to see if this scripts can be leveraged in anyway. -
- -
- Deliverables - - - Deploy CloudStack and understand instance configuration/contextualization - - - Test and deploy Mesos on a set of CloudStack based VM, manually. Design/propose an automation framework - - - Test stackmate and engage chiradeep (report bugs, make suggestion, make pull request) - - - Create cloudformation template to provision a Mesos Cluster - - - Compare with Apache Whirr or other cluster provisioning tools for server side implementation of cloudformation service. - - -
- -
- Architecture and Tools - - The high level architecture is as follows: - - - - - - - - - - - It includes following components: - - - - CloudFormation Query API server: - This acts as a point of contact to and exposes CloudFormation functionality as Query API. This can be accessed directly or through existing tools from Amazon AWS for their cloudformation service. It will be easy to start as a module which resides outside cloudstack at first and I plan to use dropwizard [3] to start with. Later may be the API server can be merged with cloudstack core. I plan to use mysql for storing details of clusters. - - - - Provisioning: - - Provisioning module is responsible for handling the booting process of the VMs through cloudstack. This uses the cloudstack APIs for launching VMs. I plan to use preconfigured templates/images with required dependencies installed, which will make cluster creation process much faster even for large clusters. Error handling is very important part of this module. For example, what you do if few VMs fail to boot in cluster ? - - - - Configuration: - - This module deals with configuring the VMs to form a cluster. This can be done via manual scripts/code or via configuration management tools like chef/ironfan/knife. Potentially workflow automation tools like rundeck [4] also can be used. Also Apache whirr and Provisionr are options. I plan to explore this tools and select suitable ones. - - - -
- -
- API - - Query API will be based on Amazon AWS cloudformation service. This will allow leveraging existing tools for AWS. -
- -
- Timeline - 1-1.5 week : project design. Architecture, tools selection, API design - 1-1.5 week : getting familiar with cloudstack and stackmate codebase and architecture details - 1-1.5 week : getting familiar with mesos internals - 1-1.5 week : setting up the dev environment and create mesos templates - 2-3 week : build provisioning and configuration module - Midterm evaluation: provisioning module, configuration module - 2-3 week : develope cloudformation server side implementation - 2-3 week : test and integrate -
- -
- Future Work - - - Auto Scaling: - Automatically adding or removing VMs from mesos cluster based on various conditions like utilization going above/below a static threshold. There can be more sophisticated strategies based on prediction or fine grained metric collection with tight integration with mesos framework. - - - Cluster Simulator: - Integrating with existing simulator to simulate mesos clusters. This can be useful in various scenarios, for example while developing a new scheduling algorithm, testing autoscaling etc. - - -
-
diff --git a/docs/en-US/gsoc-imduffy15.xml b/docs/en-US/gsoc-imduffy15.xml deleted file mode 100644 index f78cb540704..00000000000 --- a/docs/en-US/gsoc-imduffy15.xml +++ /dev/null @@ -1,395 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Ians's 2013 GSoC Proposal - This chapter describes Ians 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. -
- LDAP user provisioning - - "Need to automate the way the LDAP users are provisioned into cloud stack. This will mean better - integration with a LDAP server, ability to import users and a way to define how the LDAP user - maps to the cloudstack users." - -
-
- Abstract - - The aim of this project is to provide an more effective mechanism to provision users from LDAP - into cloudstack. Currently cloudstack enables LDAP authentication. In this authentication users - must be first setup in cloudstack. Once the user is setup in cloudstack they can authenticate - using their LDAP username and password. This project will improve Cloudstack LDAP integration - by enabling users be setup automatically using their LDAP credential - -
-
- Deliverables - - - Service that retrieves a list of LDAP users from a configured group - - - Extension of the cloudstack UI "Add User" screen to offer user list from LDAP - - - Add service for saving new user it details from LDAP - - - BDD unit and acceptance automated testing - - - Document change details - - -
-
- Quantifiable Results - - - - - Given - An administrator wants to add new user to cloudstack and LDAP is setup in cloudstack - - - When - The administrator opens the "Add User" screen - - - Then - A table of users appears for the current list of users (not already created on cloudstack) from the LDAP group displaying their usernames, given name and email address. The timezone dropdown will still be available beside each user - - - - - - - - - - Given - An administrator wants to add new user to cloudstack and LDAP is not setup in cloudstack - - - When - The administrator opens the "Add User" screen - - - Then - The current add user screen and functionality is provided - - - - - - - - - - Given - An administrator wants to add new user to cloudstack and LDAP is setup in cloudstack - - - When - The administrator opens the "Add User" screen and mandatory information is missing - - - Then - These fields will be editable to enable you to populate the name or email address - - - - - - - - - - Given - An administrator wants to add new user to cloudstack, LDAP is setup and the user being created is in the LDAP query group - - - When - The administrator opens the "Add User" screen - - - Then - There is a list of LDAP users displayed but the user is present in the list - - - - - - - - - - Given - An administrator wants to add a new user to cloudstack, LDAP is setup and the user is not in the query group - - - When - The administrator opens the "Add User" screen - - - Then - There is a list of LDAP users displayed but the user is not in the list - - - - - - - - - - Given - An administrator wants to add a group of new users to cloudstack - - - When - The administrator opens the "Add User" screen, selects the users and hits save - - - Then - The list of new users are saved to the database - - - - - - - - - - Given - An administrator has created a new LDAP user on cloudstack - - - When - The user authenticates against cloudstack with the right credentials - - - Then - They are authorised in cloudstack - - - - - - - - - - Given - A user wants to edit an LDAP user - - - When - They open the "Edit User" screen - - - Then - The password fields are disabled and cannot be changed - - - - - -
-
- The Design Document - - - LDAP user list service - - - - name: ldapUserList - - - responseObject: LDAPUserResponse {username,email,name} - - - parameter: listType:enum {NEW, EXISTING,ALL} (Default to ALL if no option provided) - - - Create a new API service call for retreiving the list of users from LDAP. This will call a new - ConfigurationService which will retrieve the list of users using the configured search base and the query - filter. The list may be filtered in the ConfigurationService based on listType parameter - - - - LDAP Available Service - - - - name: ldapAvailable - - - responseObject LDAPAvailableResponse {available:boolean} - - - Create a new API service call veriying LDAP is setup correctly verifying the following configuration elements are all set: - - - ldap.hostname - - - ldap.port - - - ldap.usessl - - - ldap.queryfilter - - - ldap.searchbase - - - ldap.dn - - - ldap.password - - - - - - LDAP Save Users Service - - - - name: ldapSaveUsers - - - responseObject: LDAPSaveUsersRssponse {list]]>} - - - parameter: list of users - - - Saves the list of objects instead. Following the functionality in CreateUserCmd it will - - - Create the user via the account service - - - Handle the response - - - It will be decided whether a transation should remain over whole save or only over individual users. A list of UserResponse will be returned. - - - - Extension of cloudstack UI "Add User" screen - - - - Extend account.js enable the adding of a list of users with editable fields where required. The new "add user" screen for LDAP setup will: - - - Make an ajax call to the ldapAvailable, ldapuserList and ldapSaveUsers services - - - Validate on username, email, firstname and lastname - - - - - - Extension of cloudstack UI "Edit User" screen - - - - Extend account.js to disable the password fields on the edit user screen if LDAP available, specifically: - - - Make an ajax call to the ldapAvailable, ldapuserList and ldapSaveUsers services - - - Validate on username, email, firstname and lastname. Additional server validation will nsure the password has not changed - - - -
-
- Approach - - To get started a development cloudstack environment will be created with DevCloud used to verify changes. Once the schedule is agreed with the mentor the deliverables will be broken into small user stories with expected delivery dates set. The development cycle will focus on BDD, enforcing all unit and acceptance tests are written first. - - - A build pipe line for continious delivery environment around cloudstack will be implemented, the following stages will be adopted: - - - - - - Stage - Action - - - - - Commit - Run unit tests - - - Sonar - Runs code quality metrics - - - Acceptance - Deploys the devcloud and runs all acceptance tests - - - Deployment - Deploy a new management server using Chef - - - - -
-
- About me - - I am a Computer Science Student at Dublin City University in Ireland. I have interests in virtualization, -automation, information systems, networking and web development - - - I was involved with a project in a K-12(educational) environment of moving their server systems over -to a virtualized environment on ESXi. I have good knowledge of programming in Java, PHP and -Scripting langages. During the configuration of an automation system for OS deployment I experienced -some exposure to scripting in powershell, batch, vbs and bash and configuration of PXE images based -of WinPE and Debian. -Additionally I am also a mentor in an opensource teaching movement called CoderDojo, we teach kids -from the age of 8 everything from web page, HTML 5 game and raspberry pi development. It's really -cool. - - - I’m excited at the opportunity and learning experience that cloudstack are offering with this project. - -
-
diff --git a/docs/en-US/gsoc-meng.xml b/docs/en-US/gsoc-meng.xml deleted file mode 100644 index 8ea2b4cfda7..00000000000 --- a/docs/en-US/gsoc-meng.xml +++ /dev/null @@ -1,235 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Meng's 2013 GSoC Proposal - This chapter describes Meng's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. -
- Project Description - - Getting a hadoop cluster going can be challenging and painful due to the tedious configuration phase and the diverse idiosyncrasies of each cloud provider. Apache Whirr[1] and Provisionr is a set of libraries for running cloud services in an automatic or semi-automatic fashion. They take advantage of a cloud-neutral library called jclouds[2] to create one-click, auto-configuring hadoop clusters on multiple clouds. Since jclouds supports CloudStack API, most of the services provided by Whirr and Provisionr should work out of the box on CloudStack. My first task is to test that assumption, make sure everything is well documented, and correct all issues with the latest version of CloudStack (4.0 and 4.1). - - - -The biggest challenge for hadoop provisioning is automatically configuring each instance at launch time based on what it is supposed to do, a process known as contextualization[3][4]. It causes last minute changes inside an instance to adapt to a cluster environment. Many automated cloud services are enabled by contextualization. For example in one-click hadoop clusters, contextualization basically amounts to generating and distributing ssh key pairs among instances, telling an instance where the master node is and what other slave nodes it should be aware of, etc. On EC2 contextualization is done via passing information through the EC2_USER_DATA entry[5][6]. Whirr and Provisionr embrace this feature to provision hadoop instances on EC2. My second task is to test and extend Whirr and Provisionr’s one-click solution on EC2 to CloudStack and also improve CloudStack’s support for Whirr and Provisionr to enable hadoop provisioning on CloudStack based clouds. - - -My third task is to add a Query API that is compatible with Amazon Elastic MapReduce (EMR) to CloudStack. Through this API, all hadoop provisioning functionality will be exposed and users can reuse cloud clients that are written for EMR to create and manage hadoop clusters on CloudStack based clouds. - -
- -
- Project Details - - Whirr defines four roles for the hadoop provisioning service: Namenode, JobTracker, Datanode and TaskTraker. With the help of CloudInit[7] (a popular package for cloud instance initialization), each VM instance is configured based on its role and a compressed file that is passed in the EC2_USER_DATA entry. Since CloudStack also supports EC2_USER_DATA, I think the most feasible way to have hadoop provisioning on CloudStack is to extend Whirr’s solution on EC2 to CloudStack platform and to make necessary adjustment based on CloudStack’s - - - - Whirr and Provisionr deal with two critical issues in their role configuration scripts (configure-hadoop-role_list): SSH key authentication and hostname configuration. - - - - SSH Key Authentication. The need for SSH Key based authentication is required so that the master node can login to slave nodes to start/stop hadoop daemons. Also each node needs to login to itself to start its own hadoop daemons. Traditionally this is done by generating a key pair on the master node and distributing the public key to all slave nodes. This can be only done with human intervention. Whirr works around this problem on EC2 by having a common key pair for all nodes in a hadoop cluster. Thus every node is able to login to one another. The key pair is provided by users and obtained by CloudInit inside an instance from metadata service. As far as I know, Cloudstack does not support user-provided ssh key authentication. Although CloudStack has the createSSHKeyPair API[8] to generate SSH keys and users can create an instance template that supports SSH keys, there is no easy way to have a unified SSH key on all cluster instances. Besides Whirr prefers minimal image management, so having a customized template doesn’t seem quite fit here. - - - Hostname configuration. The hostname of each instance has to be properly set and injected into the set of hadoop config files (core-site.xml, hdfs-site.xml, mapred-site.xml ). For an EC2 instance, its host name is converted from a combination of its public IP and an EC2-specific pre/suffix (e.g. an instance with IP 54.224.206.71 will have its hostname set to ec2-54-224-206-71.compute-1.amazonaws.com). This hostname amounts to the Fully Qualified Domain Name that uniquely identifies this node on the network. As for the case of CloudStack, if users do not specify a name the hostname that identifies a VM on a network will be a unique UUID generated by CloudStack[9]. - - - - - - - These two are the main issues that need support improvement on the CloudStack side. Other things like preparing disks, installing hadoop tarballs and starting hadoop daemons can be easily done as they are relatively role/instance-independent and static. Runurl can be used to simplify user-data scripts. - - - - - - After we achieve hadoop provisioning on CloudStack using Whirr we can go further to add a Query API to CloudStack to expose this functionality. I will write an API that is compatible with Amazon Elastic MapReduce Service (EMR)[10] so that users can reuse clients that are written for EMR to submit jobs to existing hadoop clusters, poll job status, terminate a hadoop instance and do other things on CloudStack based clouds. There are eight actions[11] now supported in EMR API. I will try to implement as many as I can during the period of GSoC. The following statements give some examples of the API that I will write. - - - -This will launch a new hadoop cluster with four instances using specified instance types and add a job flow to it. - - - -This will add a step to the existing job flow with ID j-3UN6WX5RRO2AG. This step will run the specified jar file. - - - -This will return the status of the given job flow. - -
- -
- Roadmap - - Jun. 17 ∼ Jun. 30 - - - Learn CloudStack and Apache Whirr/Provisionr APIs; Deploy a CloudStack cluster. - - - - Identify how EC2_USER_DATA is passed and executed on each CloudStack instance. - - - Figure out how the files passed in EC2_USER_DATA are acted upon by CloudInit. - - - Identify files in /etc/init/ that are used or modified by Whirr and Provisionr for hadoop related configuration. - - - Deploy a hadoop cluster on CloudStack via Whirr/Provisionr. This is to test what are missing in CloudStack or Whirr/Provisionr in terms of their support for each other. - - - Jul. 1∼ Aug. 1 - - - Write scripts to configure VM hostname on CloudStack with the help of CloudInit; - - - Write scripts to distribute SSH keys among CloudStack instances. Add the capability of using user-provided ssh key for authentication to CloudStack. - - - Take care of the other things left for hadoop provisioning, such as mounting disks, installing hadoop tarballs, etc. - - - Compose files that need to be passed in EC2_USER_DATA to each CloudStack instance . Test these files and write patches to make sure that Whirr/Provisionr can succefully deploy one-click hadoop clusters on CloudStack. - - - Aug. 3 ∼ Sep. 8 - - - Design and build an Elastic Mapreduce API for CloudStack that takes control of hadoop cluster creation and management. - - - Implement the eight actions defined in EMR API. This task might take a while. - - - - Sep. 10 ∼ Sep. 23 - - - - Code cleaning and documentation wrap up. - - - - - - -
- -
- Deliverables - - - - Whirr has limited support for CloudStack. Check what’s missing and make sure all steps are properly documented on the Whirr and CloudStack websites. - - - Contribute code to CloudStack and and send patches to Whirr/Provisionr if necessary to enable hadoop provisioning on CloudStack via Whirr/Provisionr. - - - Build an EMR-compatible API for CloudStack. - - -
-
- Nice to have - In addition to the required deliverables, it’s nice to have the following: - - - - The capability to add and remove hadoop nodes dynamically to enable elastic hadoop clusters on CloudStack. - - - - A review of the existing tools that offer one-click provisioning and make sure that they support CloudStack based clouds. - - -
- -
- References - - - - - http://whirr.apache.org/ - - - http://www.jclouds.org/documentation/gettingstarted/what-is-jclouds/ - - - Katarzyna Keahey, Tim Freeman, Contextualization: Providing One-Click Virtual Clusters - - - http://www.nimbusproject.org/docs/current/clouds/clusters2.html - - - http://aws.amazon.com/amazon-linux-ami/ - - - https://svn.apache.org/repos/asf/whirr/branches/contrib-python/src/py/hadoop/cloud/data/hadoop-ec2-init-remote.sh - - - https://help.ubuntu.com/community/CloudInit - - - http://cloudstack.apache.org/docs/en-US/Apache_CloudStack/4.0.2/html/Installation_Guide/using-sshkeys.html - - - https://cwiki.apache.org/CLOUDSTACK/allow-user-provided-hostname-internal-vm-name-on-hypervisor-instead-of-cloud-platform-auto-generated-name-for-guest-vms.html - - -http://docs.aws.amazon.com/ElasticMapReduce/latest/API/Welcome.html - - - http://docs.aws.amazon.com/ElasticMapReduce/latest/API/API_Operations.html - - - http://buildacloud.org/blog/235-puppet-and-cloudstack.html - - -http://chriskleban-internet.blogspot.com/2012/03/build-cloud-cloudstack-instance.html - - - http://gehrcke.de/2009/06/aws-about-api/ - - - Apache_CloudStack-4.0.0-incubating-API_Developers_Guide-en-US.pdf - - - -
- -
diff --git a/docs/en-US/gsoc-midsummer-dharmesh.xml b/docs/en-US/gsoc-midsummer-dharmesh.xml deleted file mode 100644 index 9e0fdcfec07..00000000000 --- a/docs/en-US/gsoc-midsummer-dharmesh.xml +++ /dev/null @@ -1,193 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Dharmesh's Mid-Summer Progress Updates - This section describes Dharmesh's progress on project "Integration project to deploy and use Mesos on a CloudStack based cloud" - -
- Introduction - - I am lagging a little in my timeline of the project. After the community bonding period, I have explored several things. My mentor, Sebastian has been really helpful and along with several others from the community. Along with my GSoC project I took up the task of resolving CLOUDSTACK-212 and it has been a wonderful experience. I am putting my best effort to complete the mesos integration as described in my proposal. - -
- -
- CLOUDSTACK-212 "Switch java package structure from com.cloud to org.apache" - - CLOOUDSTACK-212(https://issues.apache.org/jira/browse/CLOUDSTACK-212) is about migrating old com.cloud package structure to new org.apache to reflect the project move to Apache Software Foundation. - - - Rohit had taken the initiative and had already refactored cloud-api project to new package. When I looked at this bug, I thought it was a pretty straight forward task. I was not quite correct. - - - I used eclipse's refactoring capabilities for most of the refactoring. I used context-menu->refactor->rename with options of update - "references", "variable/method names" and "textual references" check-boxes checked. Also I disabled autobuild option as suggested. Also I disabled the CVS plugins as suggested by eclipse community the indexing by plugin while long refactoring was interfering and left garbled code. Even after these precautions, I noticed that eclipse was messing up some of the imports and especially bean-names in xml files. After correcting them manually, I got many test case failures. Upon investigation, I came to know that the error was because of resource folders of test cases. In short, I learned a lot. - - - Due to active development on master branch even between I create master-rebased-patch and apply-test-submit and one of the committer checks the applicability of the patch, the patch was failing due to new merges during this time. After several such attempt cycles, it became clear that this is not a good idea. - So after discussion with senior members of community, separate branch "namespacechanges" was created and I applied all the code refactoring there. Then one of the committer, Dave will cherry-pick them to master freezing other merge. I have submitted the patch as planned on 19th and it is currently being reviewed. - - - One of the great advantage of working on this bug was I got much better understanding of the cloudstack codebase. Also my understanding of unit testing with maven has become much more clearer. - -
- -
- Mesos integration with cloudstack - There are multiple ways of implementing the project. I have explored following options with specific pros and cons. - - -
- Shell script to boot and configure mesos - This idea is to write a shell script to automate all the steps involved in running mesos over cloudstack. This is very flexible option as we have full power of shell. - - - create security groups for master, slave and zookeeper. - - - get latest AMI number and get the image. - - - create device mapping - - - launch slave - - - launch master - - - launch zookeeper - - - wait for instances to come up - - - ssh-copy-ids - - - rsync - - - run mesos setup script - - - - Since there exists a shell script within mesos codebase to create and configure mesos cluster on AWS, the idea is to use the same script and make use of cloudstack-aws API. Currently I am testing this script. - Following are the steps: - - - enable aws-api on cloudstack. - - - create AMI or template with required dependencies. - - - download mesos. - - - configure boto environment to use with cloudstack - - - run mesos-aws script. - - - - Pros: - - Since the script is part of mesos codebase, it will be updated to work in future as well. - - - -
- -
- WHIRR-121 "Creating Whirr service for mesos" - Whirr provides a comman API to deploy services to various clouds. Currently, it is highly hadoop centric. Tom white had done some work in Whirr community, but has not been updated for quite a long time. - - Pros: - - Leverage Whirr API and tools. - - - - Cons: - - Dependence on yet another tool. - - -
- -
- Creating a cloudformation template for mesos - The idea is to use AWS cloudformation APIs/functions, so that it can be used with any cloudformation tools. Within cloudstack, Stackmate project is implementing cloudformation service. - - Pros: - - Leverage all the available tools for AWS cloudformation and stackmate - - - Potentially can be used on multiple clouds. - - - - Cons: - - Have to stay in the limits of ASW cloudformation API and otherwise have to use user-data to pass "shell commands", which will be not a maintainable solution in long term. - - -
- -
- -
- Conclusion - - I am very happy with the kind of things I have learned so far with the project. This includes: - - - - Advanced git commands - - - Exposed to very large code base - - - Hidden features, methods and bugs of eclipse that will be useful refactoring large projects - - - How Unit testing work, especially with mvn - - - How to evaluate pros and cons of multiple options to achieve same functionality - - - Writing a blog - - - - The experience gained from this project is invaluable and it is great that the Google Summer Of Code program exist. - -
-
diff --git a/docs/en-US/gsoc-midsummer-ian.xml b/docs/en-US/gsoc-midsummer-ian.xml deleted file mode 100644 index 1f65e2d309c..00000000000 --- a/docs/en-US/gsoc-midsummer-ian.xml +++ /dev/null @@ -1,344 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Mid-Summer Progress Updates for Ian Duffy - "Ldap User Provisioning" - This section describes my progress with the project titled "LDAP User Provisioning". -
- Introduction - - Progress on my project is moving along smoothly. The Cloudstack community along with my mentor Abhi have been very accomodating. Since the community bonding period communication has been consistent and the expectations have been clear. Sebastien, head mentor, has given us great guidance. I have enjoyed their teaching style. I found it was a nice gradual build up starting with creating a simple document update patch to eventually submitting a new Cloudstack Plugin. - - - I am pleased with my progress on the project to date. I feel as if the goals set out in my proposal are very doable and that they should be achieved. - -
-
- Continuous Integration with Jenkins - - In order to try deliver working solutions of good quality I felt it would be a good idea to implement a continuous integration environment using Jenkins. The idea of this would be to automatically build and test my code. This was welcomed and aided by community members greatly. - - - - - - - jenkins-pipeline.png: Screenshot of the build pipeline. - - - - The key stages of the pipeline are as follows: - - - - - Acquire Code Base - This pulls down the latest Cloudstack codebase and builds it executing all unit tests. - - - - - Static Analysis - This runs tests on my code to ensure quality and good practice. This is being achieved with sonar source. - - - - - Integration Tests - This deploys the Cloudstack database. Brings up the Cloudstack Manager with jetty and their simulator. All checkin/integration tests are ran and then the jetty server is shutdown. - - - - - Package(Only exists on my local Jenkins) - The codebase is packaged up into an RPM and placed onto a local yum repo. If the time allows this will be used for future automated acceptance testing. - - - - - If your are interested in this I have created a screencast on youtube which walks through it: Continuous testing environment - -
-
- Ldap Plugin implementation - - At the start of the coding stage I began by reviewing the current LDAP implementation. This included: - - - - - The user authenticator - Enables LDAP users to login to Cloudstack once the user exists within the internal Cloudstack database. - - - - - LDAPConfig - Adds LDAP configuration. This is detailed in ldapConfig API reference This did not allow multiple configurations. - - - - - LDAPRemove - Removes the LDAP configuration - - - - - UI features. Global settings -> LDAP configuration allowed for the addition of a single LDAP server using the LDAPConfig command and the removal of an LDAP server using the LDAPRemove command. - - - - - After reviewing this code and implementation for some time I discovered that it wasn't the most maintainable code. I realised I could extend it if required. But it would involve creating more unmaintainable code and it would be messy. This goes against my goal of delivering quality. I decided therefore, justifiably I think to completely redo the LDAP implementation within Cloudstack. By doing this I did expanded the scope of the project. - - - I began to research the most appropriate way of structuring this. I started of by redoing the implementation. This meant creating the following classes(Excluding DAOs): - - - - - LdapManager - Manages all LDAP connections. - - - - - LdapConfiguration - Supplies all configuration from within the Cloudstack database or defaults where required. - - - - - LdapUserManager - Handles any interaction with LDAP user information. - - - - - LdapUtils - Supplies static helpers, e.g. escape search queries, get attributes from search queries. - - - - - LdapContextFactory - Manages the creation of contexts. - - - - - LdapAuthenticator - Supplies an authenticator to Cloudstack using the LdapManager. - - - - - From this I felt I had a solid foundation for creating API commands to allow the user to interact with an LDAP server. I went on to create the following commands: - - - - - LdapAddConfiguration - This allows for adding multiple LDAP configurations. Each configuration is just seen as a hostname and port. - - - - - - - add-ldap-configuration.png: Screenshot of API response. - - - - - - - - add-ldap-configuration-failure.png: Screenshot of API response. - - - - - - LdapDeleteConfiguration - This allows for the deletion of an LDAP configuration based on its hostname. - - - - - - - delete-ldap-configuration.png: Screenshot of API response. - - - - - - - - delete-ldap-configuration-failure.png: Screenshot of API response. - - - - - - LdapListConfiguration - This lists all of the LDAP configurations that exist within the database. - - - - - - - list-ldap-configuration.png: Screenshot of the build pipeline. - - - - - - LdapListAllUsers - This lists all the users within LDAP. - - - - - - - ldap-list-users.png: Screenshot of the build pipeline. - - - - - - Along with this global settings were added, this includes: - - - - - LDAP basedn - Sets the basedn for their LDAP configuration - - - - - LDAP bind password - Sets the password to use for binding to LDAP for creating the system context. If this is left blank along with bind principal then anonymous binding is used. - - - - - LDAP bind principal - Sets the principle to use for binding with LDAP for creating the system context. If this is left blank along with the bind password then anonymous binding is used. - - - - - LDAP email attribute - Sets the attribute to use for getting the users email address. Within both OpenLDAP and ActiveDirectory this is mail. For this reason this is set to mail by default. - - - - - LDAP firstname attribute - Sets the attribute to use for getting the users firstname. Within both OpenLDAP and ActiveDiretory this is givenname. For this reason this is set to givenname by default. - - - - - LDAP lastname attribute - Sets the attribute to use for getting the users lastname. Within both OpenLDAP and ActiveDiretory this is sn. For this reason this is set to sn by default. - - - - - LDAP username attribute - This sets out the attribute to use for getting the users username. Within OpenLDAP this is uid and within ActiveDirectory this is samAccountName. In order to comply with posix standards this is set as uid by default. - - - - - LDAP user object - This sets out the object type of user accounts within LDAP. Within OpenLDAP this is inetOrgPerson and within ActiveDirectory this is user. Again, in order to comply with posix standards this is set as inetOrgperson by default. - - - - - With this implementation I believe it allows for a much more extendable and flexible approach. The whole implementation is abstracted from the Cloudstack codebase using the "plugin" model. This allows all of the LDAP features to be contained within one place. Along with this the implementation supplies a good foundation. A side affect of redoing the implementation allowed me to add support for multiple LDAP servers. This means failover is supported, so for example, if you have a standard ActiveDirectory with primary and secondary domain controller. Both can be added to Cloudstack which will allow it to failover to either one assume one of them is down. - - - The API changes required me to update the UI interface within Cloudstack. With the improved API implementation this was easier. The Global Settings -> Ldap Configuration page has support for multiple LDAP servers however it only requires a hostname and port. All "global" ldap settings are set within the global settings page. - - - - - - - ldap-global-settings.png: Screenshot the LDAP related settings within global settings. - - - - - - - - ldap-configuration.png: Screenshot of the LDAP configuration page. - - -
-
- Add accounts UI - - Extending the UI to allow for easy provisioning of LDAP users is currently a work in progress. At the moment I have a 'working' implementation, see below screenshot. I am in need of assistance with it and am waiting on a review to be looked at. - - - - - - - ldap-account-addition.png: Screenshot of add user screen when LDAP is enabled. - - -
-
- Testing - - Unit tests have 92% code coverage within the LDAP Plugin. The unit tests were wrote in groovy using the spock framework. This allowed me to implement a BDD style of testing. - - - Integration tests have been wrote in python using the marvin test framework for Cloudstack. This test configures a LDAP server and attempts to login as an LDAP user. The plugin comes with an embedded LDAP server for testing purposes. - - Execute integration tests: - nosetests --with-marvin --marvin-config=setup/dev/local.cfg test/integration/component/test_ldap.py --loa - Start embedded LDAP server: - mvn -pl :cloud-plugin-user-authenticator-ldap ldap:run -
-
- Conclusion - - I am very pleased with the learning outcomes of this project so far. I have been exposed to many things that my college's computer science curriculum does not cover. This includes: - - - - Usage of source control management tools(git) and dealing with code collaboration - - - Usage of a dependency manager and build tool(maven) - - - Usage of continous testing environments(jenkins) - - - Usage of an IDE(eclipse) - - - Exposure to testing, both unit and integration tests - - - Exposure to a functional programming language(groovy) - - - Exposure to web development libraries(jQuery) - - - - The experience gained from this project is invalueable and it is great that the Google Summer Of Code program exist. - -
-
diff --git a/docs/en-US/gsoc-midsummer-meng.xml b/docs/en-US/gsoc-midsummer-meng.xml deleted file mode 100644 index ee24cf4a990..00000000000 --- a/docs/en-US/gsoc-midsummer-meng.xml +++ /dev/null @@ -1,216 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Mid-Summer Progress Updates for Meng - "Hadoop Provisioning on Cloudstack Via Whirr" - - In this section I describe my progress with the project titled "Hadoop Provisioning on CloudStack Via Whirr" -
- Introduction - - It has been five weeks since the GSOC 2013 is kick-started. During the last five weeks I have been constantly learning from the CloudStack Community in aspects of both knowledge and personality. The whole community is very accommodating and willing to help newbies. I am making progress steadily with the community's help. This is my first experience working with such a large and cool code base, definitely a challenging and wonderful experience for me. Though I am a little slipped behind my schedule, I am making my best effort and hoping to complete what I set out in my proposal by the end of this summer. - - - - -
-
- CloudStack Installation - - I spent two weeks or so on the CloudStack Installation. In the beginning, I am using the Ubuntu systems. Given that I am not familiar with maven and a little scared by various kinds of errors and exceptions during system deployment, I failed to deploy CloudStack through building from the source. With Ian's advice, I switched to CentOS and began to use rpm packages for installation, things went much smoother. By the end of the second week, I submitted my first patch -- CloudStack_4.1_Quick_Install_Guide. - - -
-
- Deploying a Hadoop Cluster on CloudStack via Whirr - - Provided that CloudStack is in place and I can register templates and add instances, I went ahead to use Whirr to deploy a hadoop cluster on CloudStack. The cluster definition file is as follows: - - - - - - - - whirr.cluster-name: the name of your hadoop cluster. - whirr.store-cluster-in-etc-hosts: store all cluster IPs and hostnames in /etc/hosts on each node. - whirr.instance-templates: this specifies your cluster layout. One node acts as the jobtracker and namenode (the hadoop master). Another two slaves nodes act as both datanode and tasktracker. - image-id: This tells CloudStack which template to use to start the cluster. - hardware-id: This is the type of hardware to use for the cluster instances. - - private/public-key-file: :the key-pair used to login to each instance. Only RSA SSH keys are supported at this moment. Jclouds will move this key pair to the set of instances on startup. - whirr.cluster-user: this is the name of the cluster admin user. - whirr.bootstrap-user: this tells Jclouds which user name and password to use to login to each instance for bootstrapping and customizing each instance. You must specify this property if the image you choose has a hardwired username/password.(e.g. the default template CentOS 5.5(64-bit) no GUI (KVM) comes with Cloudstack has a hardcoded credential: root:password), otherwise you don't need to specify this property. - whirr.env.repo: this tells Whirr which repository to use to download packages. - whirr.hadoop.install-function/whirr.hadoop.configure-function :it's self-explanatory. - - - - - Output of this deployment is as follows: - - - - - - - - - - Other details can be found at this post in my blog. In addition I have a Whirr trouble shooting post there if you are interested. - -
-
- Elastic Map Reduce(EMR) Plugin Implementation - - Given that I have completed the deployment of a hadoop cluster on CloudStack using Whirr through the above steps, I began to dive into the EMR plugin development. My first API is launchHadoopCluster, it's implementation is quite straight forward, by invoking an external Whirr command in the command line on the management server and piggybacking the Whirr output in responses.This api has a structure like below: - - - - - -The following is the source code of launchHadoopClusterCmd.java. - - - - - - - You can invoke this api through the following command in CloudMonkey: - > launchHadoopCluster config=myhadoop.properties - -This is sort of the launchHadoopCluster 0.0, other details can be found in this post . - -My undergoing working is modifying this api so that it calls Whirr libraries instead of invoking Whirr externally in the command line. -First add Whirr as a dependency of this plugin so that maven will download Whirr automatically when you compile this plugin. - - - - - - - -I am planning to replace the Runtime.getRuntime().exec() above with the following code snippet. - - LaunchClusterCommand command = new LaunchClusterCommand(); - command.run(System.in, System.out, System.err, Arrays.asList(args)); - - -Eventually when a hadoop cluster is launched. We can use Yarn to submit hadoop jobs. -Yarn exposes the following API for job submission. -ApplicationId submitApplication(ApplicationSubmissionContext appContext) throws org.apache.hadoop.yarn.exceptions.YarnRemoteException -In Yarn, an application is either a single job in the classical sense of Map-Reduce or a DAG of jobs. In other words an application can have many jobs. This fits well with the concepts in EMR design. The term job flow in EMR is equivalent to the application concept in Yarn. Correspondingly, a job flow step in EMR is equal to a job in Yarn. In addition Yarn exposes the following API to query the state of an application. -ApplicationReport getApplicationReport(ApplicationId appId) throws org.apache.hadoop.yarn.exceptions.YarnRemoteException -The above API can be used to implement the DescribeJobFlows API in EMR. - - - - -
-
- Learning Jclouds -As Whirr relies on Jclouds for clouds provisioning, it's important for me to understand what Jclouds features support Whirr and how Whirr interacts with Jclouds. I figured out the following problems: - -How does Whirr create user credentials on each node? - -Using the runScript feature provide by Jclouds, Whirr can execute a script at node bootup, one of the options in the script is to override the login credentials with the ones that provide in the cluster properties file. The following line from Whirr demonstrates this idea. -final RunScriptOptions options = overrideLoginCredentials(LoginCredentials.builder().user(clusterSpec.getClusterUser()).privateKey(clusterSpec.getPrivateKey()).build()); - - - -How does Whirr start up instances in the beginning? -The computeService APIs provided by jclouds allow Whirr to create a set of nodes in a group(specified by the cluster name),and operate them as a logical unit without worrying about the implementation details of the cloud. -Set<NodeMetadata> nodes = (Set<NodeMetadata>)computeService.createNodesInGroup(clusterName, num, template); - The above command returns all the nodes the API was able to launch into in a running state with port 22 open. -How does Whirr differentiate nodes by roles and configure them separately? -Jclouds commands ending in Matching are called predicate commands. They allow Whirr to decide which subset of nodes these commands will affect. For example, the following command in Whirr will run a script with specified options on nodes who match the given condition. - -Predicate<NodeMetadata> condition; -condition = Predicates.and(runningInGroup(spec.getClusterName()), condition); -ComputeServiceContext context = getCompute().apply(spec); -context.getComputeService().runScriptOnNodesMatching(condition,statement, options); - -The following is an example how a node playing the role of jobtracker in a hadoop cluster is configured to open certain ports using the predicate commands. - - Instance jobtracker = cluster.getInstanceMatching(role(ROLE)); // ROLE="hadoop-jobtracker" - event.getFirewallManager().addRules( - Rule.create() - .destination(jobtracker) - .ports(HadoopCluster.JOBTRACKER_WEB_UI_PORT), - Rule.create() - .source(HadoopCluster.getNamenodePublicAddress(cluster).getHostAddress()) - .destination(jobtracker) - .ports(HadoopCluster.JOBTRACKER_PORT) - ); - - - -With the help of such predicated commands, Whirr can run different bootstrap and init scripts on nodes with distinct roles. - - - - - - -
-
- Great Lessons Learned - - I am much appreciated with the opportunity to work with CloudStack and learn from the lovable community. I can see myself constantly evolving from this invaluable experience both technologically and psychologically. There were hard times that I were stuck on certain problems for days and good times that made me want to scream seeing problem cleared. This project is a great challenge for me. I am making progress steadily though not smoothly. That's where I learned the following great lessons: - - - - - - When you work in an open source community, do things in the open source way. There was a time when I locked myself up because I am stuck on problems and I am not confident enough to ask them on the mailing list. The more I restricted myself from the community the less progress I made. Also the lack of communication from my side also prevents me from learning from other people and get guidance from my mentor. - - - CloudStack is evolving at a fast pace. There are many APIs being added ,many patches being submitted every day. That's why the community use the word "SNAPSHOT" for each version. At this moment I am learning to deal with fast code changing and upgrading. A large portion of my time is devoted to system installation and deployment. I am getting used to treat system exceptions and errors as a common case. That's another reason why communication with the community is critical. - - - - - In addition to the project itself, I am strengthening my technical suite at the same time. - - -I learned to use some useful software tools: maven, git, publican, etc. - - -Reading the source code of Whirr make me learn more high level java programming skills, e.g. using generics, wildcard, service loader, the Executor model, Future object, etc . - - - I am exposed to Jclouds, a useful cloud neutral library to manipulate different cloud infrastructures. - - I gained deeper understanding of cloud web services and learned the usage of several cloud clients, e.g. Jclouds CLI, CloudMonkey,etc. - - - - - - - - - I am grateful that Google Summer Of Code exists, it gives us students a sense of how fast real-world software development works and provides us hand-on experience of coding in large open source projects. More importantly it's a self-challenging process that strengthens our minds along the way. -
-
diff --git a/docs/en-US/gsoc-midsummer-nguyen.xml b/docs/en-US/gsoc-midsummer-nguyen.xml deleted file mode 100644 index b4f4f5ab495..00000000000 --- a/docs/en-US/gsoc-midsummer-nguyen.xml +++ /dev/null @@ -1,480 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Mid-Summer Progress Updates for Nguyen Anh Tu - "Add Xen/XCP support for GRE SDN controller" - This section describes my progress with the project titled "Add Xen/XCP support for GRE SDN controller" -
- Introduction - It has been a half way of GSoC2013 journey which I am getting more familiar with its activities. Personally, the previous one-and-a-half month has surprisingly passed by in a blink with lots of pressure. In this first time joining in GSoC2013, I have found it totally new and interesting in its working methods and challenges. Along with those stressful moments, I appreciated all wonderful experiences and knowledge that I have luckily gained from this commitment. It is time to review it all and present in time order. - - My project named “Add Xen/XCP support for GRE SDN controllerâ€, the proposal can be found here: Proposal - - Specifically, I need to improve the current GRE SDN controller to work with XCP, a free version of XenServer. Then, as mentioning with my two mentor Sebastien Goasguen and Hugo, I continue to work in next missions as below: - - - re-factor GRE source code by following NiciraNVP plugin design. - add GRE support for KVM hypervisor. - develop a new ODL plugin using Opendaylight controller for controlling and managing network services via OpenFlow protocol. - - At the beginning, I started to explore frameworks and tools that CloudStack uses such as Spring framework, marven, git and Reviewboard. In my country developers are more familiar with svn than git, however these tools are also such easy to use so I don't write more about them. I want to note about using Spring in CloudStack and what happen in the Management Server startup process. - -
-
- Spring in CloudStack - Spring provides a Container which contains pre-loaded components CloudStack use. At startup, these components are loaded to Container via two ways: - - - - components are declared as beans in componentcontext.xml and applicationcontext.xml - - <bean id="accountDaoImpl" class="com.cloud.user.dao.AccountDaoImpl" /> - <bean id="accountDetailsDaoImpl" class="com.cloud.user.AccountDetailsDaoImpl" /> - <bean id="accountJoinDaoImpl" class="com.cloud.api.query.dao.AccountJoinDaoImpl" /> - <bean id="accountGuestVlanMapDaoImpl" class="com.cloud.network.dao.AccountGuestVlanMapDaoImpl" /> - <bean id="accountVlanMapDaoImpl" class="com.cloud.dc.dao.AccountVlanMapDaoImpl" /> - ... - - - - components are marked with @Component annotation - - @Component - @Local(value = { NetworkManager.class}) - public class NetworkManagerImpl extends ManagerBase implements NetworkManager, Listener { - static final Logger s_logger = Logger.getLogger(NetworkManagerImpl.class); - - - - As I know recently @Component is not recommended. - The fundamental functionality provided by the Spring Container is Dependency Injection (DI). To decouple Java components from other Java components the dependency to a certain other class should get injected into them rather that the class inself creates or finds this object. The general concept between dependency injection is called Inversion of Control. A class should not configure itself but should be configured from outside. A design based on independent classes / components increases the re-usability and possibility to test the software. Example of using DI in CloudStack is showed below: - - public class NetworkManagerImpl extends ManagerBase implements NetworkManager, Listener { - static final Logger s_logger = Logger.getLogger(NetworkManagerImpl.class); - - @Inject - DataCenterDao _dcDao = null; - @Inject - VlanDao _vlanDao = null; - @Inject - IPAddressDao _ipAddressDao = null; - @Inject - AccountDao _accountDao = null; - -
-
- Management Server Startup - The MS startup process is defined in cloud-client-ui/WEB-INF/web.xml. The following items will be loaded sequentially: - - Log4jConfigListener. - ContextLoaderListener. - CloudStartupServlet. - ConsoleServlet. - ApiServlet. - - Of which, CloudStartupServlet will call to ComponentContext to init all of pre-defined components life cycle including configure() and start() phase. The components are divided into seven levels to consecutively startup. Of course, they must override configure() and start() methods. - - public interface ComponentLifecycle { - public static final int RUN_LEVEL_SYSTEM_BOOTSTRAP = 0; // for system level bootstrap components - public static final int RUN_LEVEL_SYSTEM = 1; // for system level service components (i.e., DAOs) - public static final int RUN_LEVEL_FRAMEWORK_BOOTSTRAP = 2; // for framework startup checkers (i.e., DB migration check) - public static final int RUN_LEVEL_FRAMEWORK = 3; // for framework bootstrap components(i.e., clustering management components) - public static final int RUN_LEVEL_COMPONENT_BOOTSTRAP = 4; // general manager components - public static final int RUN_LEVEL_COMPONENT = 5; // regular adapters, plugin components - public static final int RUN_LEVEL_APPLICATION_MAINLOOP = 6; - public static final int MAX_RUN_LEVELS = 7; - - - // configuration phase - Map<String, String> avoidMap = new HashMap<String, String>(); - for(int i = 0; i < ComponentLifecycle.MAX_RUN_LEVELS; i++) { - for(Map.Entry<String, ComponentLifecycle> entry : ((Map<String, ComponentLifecycle>)classifiedComponents[i]).entrySet()) { - ComponentLifecycle component = entry.getValue(); - String implClassName = ComponentContext.getTargetClass(component).getName(); - s_logger.info("Configuring " + implClassName); - - if(avoidMap.containsKey(implClassName)) { - s_logger.info("Skip configuration of " + implClassName + " as it is already configured"); - continue; - } - - try { - component.configure(component.getName(), component.getConfigParams()); - } catch (ConfigurationException e) { - s_logger.error("Unhandled exception", e); - throw new RuntimeException("Unable to configure " + implClassName, e); - } - - avoidMap.put(implClassName, implClassName); - } - } - - - // starting phase - avoidMap.clear(); - for(int i = 0; i < ComponentLifecycle.MAX_RUN_LEVELS; i++) { - for(Map.Entry<String, ComponentLifecycle> entry : ((Map<String, ComponentLifecycle>)classifiedComponents[i]).entrySet()) { - ComponentLifecycle component = entry.getValue(); - String implClassName = ComponentContext.getTargetClass(component).getName(); - s_logger.info("Starting " + implClassName); - - if(avoidMap.containsKey(implClassName)) { - s_logger.info("Skip configuration of " + implClassName + " as it is already configured"); - continue; - } - - try { - component.start(); - - if(getTargetObject(component) instanceof ManagementBean) - registerMBean((ManagementBean)getTargetObject(component)); - } catch (Exception e) { - s_logger.error("Unhandled exception", e); - throw new RuntimeException("Unable to start " + implClassName, e); - } - - avoidMap.put(implClassName, implClassName); - } - } - -
-
- Network Architecture - Networking is the most important component in CloudStack, which serves network services from layer 2 to layer 7. In GsoC, fortunately I have a chance to learn about CloudsStack network architecture. It's really amazing. CloudStack's networking is divided to three parts: - NetworkGuru - NetworkGuru are responsible for: - - Design and implementation of virtual networks. - IP adress management. - - See full description about Network Guru on my wiki post: Add Xen/XCP support for GRE SDN controller - NetworkElement - NetworkElement in my opinion is the most important in CloudStack's networking. It represents components that are present in network. Such components can provide any kind of network service or support the virtual networking infrastructure and their interface is defined by com.cloud.network.element.NetworkElement. There are two things we attend in NetworkElement: services and elements. - CloudStack currently support network services below: - - Dhcp service. - Connectivity service. - Firewall service. - Load Balancing service. - Network ACL service. - Port Forwarding service. - SourceNat service. - StaticNat service. - UerData service. - Vpc service. - - Many Element implemented these above services. They are: - - MidonetElement. - BigSwitchVnsElement. - NiciraNvpElement. - BaremetalElement. - VirtualRouterElement. - VpcVirtualRouterElement. - CiscoVnmcElement. - JuniperSrxExternalFirewallElement. - ElasticLbElement. - F5ExternalLbElement. - CloudZoneNetworkElement. - BaremetalPxeElement. - BaremetalUserdataElement. - DnsNotifier. - OvsElement. - SecurityGroupElement. - - See full description about Network Element on my wiki post: Add Xen/XCP support for GRE SDN controller - In addition, Elements willing to support network services have to implement corresponding methods from ServicesProvider interfaces. For example, NiciraNvpElement want to support staticNat rule so it has to override applyStaticNats method. - NetworkManager - Network Manager handle the resources managed by the network elements. They are also implemented as many other "resource" managers in CloudStack. - For instance, the manager for setting up L2-in-L3 networks with Open vSwitch is OvsTunnelManagerImpl, whereas Virtual Router lifecycle is managed by VirtualApplianceManagerImpl. - In the project, I'm going to implement L3 services for sdn controller, so I need to understand how network services implement. -
-
- Network Services - As I said in previous session, network services are represented in ServiceProvider interfaces. There are currently 12 service providers including: Dhcp, Firewall, IpDeployer, LoadBalancing, NetworkACL, PortForwarding, RemoteAccessVpn, Site2siteVpn, SourceNat, StaticNat, UserData and Vpc. In this session, I'll focus on L3 services implemented in CloudStack such as FirewallRule, PortForwardingRule, StaticNatRules, etc. All services are implemented at NetworkElement and every elements including network plugins (nicira nvp, bigswitch vns,...), which is willing to support them, must override from NetworkElement. For a clearly exlaination, I'll take the StaticNat service implemented in Nicira NVP plugin, source code can be found in NiciraNvpElement.java. - NiciraNvpElement firstly has to check whether it can handle the StaticNat service via canHandle() method: - - if (!canHandle(network, Service.StaticNat)) { - return false; - } - - - protected boolean canHandle(Network network, Service service) { - s_logger.debug("Checking if NiciraNvpElement can handle service " - + service.getName() + " on network " + network.getDisplayText()); - - //Check if network has right broadcast domain type - if (network.getBroadcastDomainType() != BroadcastDomainType.Lswitch) { - return false; - } - - //Check if NiciraNVP is the provider of the network - if (!_networkModel.isProviderForNetwork(getProvider(), - network.getId())) { - s_logger.debug("NiciraNvpElement is not a provider for network " - + network.getDisplayText()); - return false; - } - - //Check if NiciraNVP support StaticNat service - if (!_ntwkSrvcDao.canProviderSupportServiceInNetwork(network.getId(), - service, Network.Provider.NiciraNvp)) { - s_logger.debug("NiciraNvpElement can't provide the " - + service.getName() + " service on network " - + network.getDisplayText()); - return false; - } - - return true; - } - - NiciraNvp checks whether it is the provider of the network and it can support StaticNat service or not. After the checking, it makes a staticNat rely on their own Logical Router, that I won't report detail here. - The sequence diagram for applying a L3 service is described below: - - - - - network_service.png: Network services implementation sequence diagram. - - After understanding network architecture and services implementation, I decided to improve Ovs plugin to support L3 services. Because it's the native sdn controller, I want to use Virtual Router for L3 services deployment. This work will be done when I call L3 services execution from OvsElement to VirtualRouterManager. With Xen hosts, VirtualRouterElement execute L3 services via xapi plugin calls. I make a flow which describes more detail about the process below - - - - - l3_services.png: Layer 3 services implementation in Ovs plugin. - - In Xen, all of L3 services are executed via a Xapi plugin naming "vmops". Default, Virtual Routers (VR) control and manage network services. In this case, "vmops" forwards request to network-responsibility shellscripts such as call_firewall.sh or call_loadbalancer.sh. They then parse parameters and call to shellscripts placed in VR via ssh. For example, if we define a staticNat rule, the process occurs as follow: - VR Manager (VirtualNetworkApplianceManager) send staticNat command to AgentManager: - - try { - answers = _agentMgr.send(router.getHostId(), cmds); - } catch (OperationTimedoutException e) { - s_logger.warn("Timed Out", e); - throw new AgentUnavailableException("Unable to send commands to virtual router ", router.getHostId(), e); - } - - AgentManager makes a xapi plugin call to host containing the VR - - String result = callHostPlugin(conn, "vmops", "setFirewallRule", "args", args.toString()); - - "vmops" forwards the request to "call_firewall" shellscript - - @echo - def setFirewallRule(session, args): - sargs = args['args'] - cmd = sargs.split(' ') - cmd.insert(0, "/usr/lib/xcp/bin/call_firewall.sh") - cmd.insert(0, "/bin/bash") - try: - txt = util.pread2(cmd) - txt = 'success' - except: - util.SMlog(" set firewall rule failed " ) - txt = '' - - return txt - - "call_firewall" parses the parameters and directly request to a shellscript placed in VR via ssh command - - ssh -p 3922 -q -o StrictHostKeyChecking=no -i $cert root@$domRIp "/root/firewall.sh $*" - - That's all. "firewall" script set some iptable rules for executing the staticNat rule -
-
- Opendaylight Controller - The project need to add an open source Openflow controller, and I decided to choose Opendaylight. - Opendaylight (ODL) is an interesting experience that I have in GSoC. Before starting project, I still confused between many open source OpenFlow controller such as POX, NOX, Beacon, Floodlight, Opendaylight... Honestly, I do not have large knowledge of OpenFlow protocol and also open source SDN controller at the beginning of project. When the project was in progress, I chose Floodlight, a safe solution because of its rich of functionality and good documents. However, Sebastien Goasguen, CloudStack GSoC manager, recommended me to try Opendaylight. From the collected information, I found that Opendaylight are getting a lot of attentions from the community. - At the moment, ODL has three main projects: - - Opendaylight Controller. - Opendaylight Network Virtualization Platform. - Opendaylight Virtual Tennant Network. - - It also has six incubating projects: - - YANG Tools. - LISP Flow Mapping. - OVSDB Integration. - Openflow Protocol Library. - BGP-LS/PCEP. - Defense4All. - - For integrating Opendaylight to control and manage network services, I chose ODL Controller project, which is developed by Cisco programmers. The ODL controller is a pure software and as a JVM it can be run on any OS as long as it supports Java. The structure of the ODL controller is shown below: - - - - - odl_structure.jpg: Opendaylight Controller architecture. - - The structure is separated to three layers: - - Network Apps and Orchestration: the top layer consists of applications that utilize the network for normal network communications. Also included in this layer are business and network logic applications that control and monitor network behavior. - Controller Platform: the middle layer is the framework in which the SDN abstractions can manifest; providing a set of common APIs to the application layer (commonly referred to as the northbound interface), while implementing one or more protocols for command and control of the physical hardware within the network (typically referred to as the southbound interface). - Physical and Virtual Network Devices: The bottom layer consists of the physical and virtual devices, switches, routers, etc., that make up the connective fabric between all endpoints within the network. - - This controller is implemented strictly in software and is contained within its own Java Virtual Machine (JVM). - Source code can be cloned from git: - - git clone https://git.opendaylight.org/gerrit/p/controller.git - - Applications make request to ODL Northbound API via HTTP. Currently, ODL supports not too much services. All REST API we can find here: ODL Controller REST API - For example, we can add query list of exist flows configured on a Node in a give container. - - GET http://controller-ip/controller/nb/v2/flow/{containerName}/{nodeType}/{nodeId} - {containername}: name of the container. The container name for the base controller is “default†- {nodeType}: type of the node being programmed - {nodeId}: node identifier - - Or we can add a new flow - - POST http://controller-ip/controller/nb/v2/flow/{containerName}/{nodeType}/{nodeId}/{name} - - with request body in XML or JSON format - - { "actions" : [ "...", ... ], - "nwDst" : "...", - "hardTimeout" : "...", - "installInHw" : "...", - "tosBits" : "...", - "cookie" : "...", - "node" : { "id" : "...", "type" : "..." }, - "dlDst" : "...", - "name" : "...", - "nwSrc" : "...", - "vlanPriority" : "...", - "protocol" : "...", - "priority" : "...", - "vlanId" : "...", - "tpDst" : "...", - "etherType" : "...", - "tpSrc" : "...", - "ingressPort" : "...", - "idleTimeout" : "...", - "dlSrc" : "..." } - - The following python client writen by Dwcarder describe more specific about using REST API:https://github.com/dwcarder/python-OpenDaylight/blob/master/OpenDaylight.py - In project, I learnt how to make HTTP request from CloudStack to ODL for controlling and managing network services. However, there is a problem that ODL currently don't support L2 configuration, while integration ODL to CloudStack requires this. I found an incubating project, led by Brent Salisbury and Evan Zeller from the University of Kentucky, is currently trying to integrate OpenvSwitch database management protocol to ODL, which will allow ODL to view, modify and delete OpenvSwitch object such as bridges and ports by way of the OpenvSwitch databse. In short, this project mainly creates a module acts like OVSDB-client and uses JSON-RPC for remote management. I talked to them and jumped into this project. Thus, I'll do an extra work on ODL community to improve ODL Controller support L2 configuration while still integrate ODL to CloudStack by making a new ODL plugin with the same behavior of NiciraNvp and Ovs. - Full information about the incubating project can be found here:https://wiki.opendaylight.org/view/Project_Proposals:OVSDB-Integration - The next session I will take a short description about XenAPI (also called Xapi), which applications use to interact virtualization resources in Xen hosts. -
-
- Xen API - There are many tool stacks we can use to manage Xen hosts, such as: XL, Xapi, libvirt or Xend. Of which, Xapi is the default. Xapi (or Xen API) is called from applications to control and manage virtualization resources in Xen hosts via XML-RPC. Xapi is the core component of XCP and XenServer and writen by Ocaml language. - It's possible to talk directly to Xapi using XML-RPC. This is a way to make remote procedure calls using http requests. In fact, it's possible to send and receive messages using telnet but this is not recommended. The XML-RPC calls are the fixed standard, but we also have bindings to that XML-RPC for Python, C and Java. - For example about using XML-RPC calls, I make a simple request written by python to list all VMs on a Xen host. - First thing we need to import XenAPI lib: - - >>> import XenAPI - - Then we have to authenticate to XenServer or XCP addressed from url with user and password - - >>> session = XenAPI.Session('https://url') - >>> session.login_with_password('user','password') - - If this works, we've done the hard bit and established communications with our server. Function bellow will list all Vms on this server. - - >>> session.xenapi.VM.get_all() - - The answer should be something like: - - ['OpaqueRef:7b737e4f-58d8-b493-ea31-31324a2de528', 'OpaqueRef:7237b8af-b80c-c021-fbdc-68146d98d7f5', ........., 'OpaqueRef:c3b752b9-1926-9ceb-f36a-408497c3478b'] - - Which is a list of strings, each of which represents a unique identifier for a particular 'object' on the server. In this case of each 'OpaqueRef' represents a virtual machine. For each VM we can get the name (name_label) - - >>> [session.xenapi.VM.get_name_label(x) for x in session.xenapi.VM.get_all()] - - There are a lot of machines in this list. Some of them however are 'template Vms', frozen copies which can't actually run, but which can be cloned in oder to make real virtual machines. We can find out which Vms are templates by calling the VM.get_is_a_template() function. So let's combinate the two in order to produce a list of all the real Vms on my server: - - >>> [session.xenapi.VM.get_name_label(x) for x in session.xenapi.VM.get_all() if not session.xenapi.VM.get_is_a_template(x)] - - The answer should be something like: - - ['Debian Etch 4.0 (2)', 'Debian Etch 4.0 (1)', 'test9', 'test4', 'Control domain on host: ebony', 'Control domain on host: localhost.localdomain', 'test3', 'Debian Sarge 3.1 (1)', 'test2', 'Debian Etch 4.0 (3)', 'test1', 'test3', 'test7', 'test5'] - - Finally it's only polite to log out of the server. This allows it to garbage collect the no-longer active session. - - >>> session.logout() - - Full python script can be found here: Xapi python client - We can find Xapi source code from: https://github.com/xen-org/xen-api - Xapi come with some main classes, each of them refer to a virtual resource object in Xen such as: - - VM: refer to virtual machine. - VIF: refer to virtual NIC. - VDI: refer to virtual volume or hard disk. - ... - - Full information about Xapi source code we can find here. http://docs.vmd.citrix.com/XenServer/6.0.0/1.0/en_gb/api/ Click on each item we can see more detail. - Xapi plugin - Xapi has an extension mechanism that allows one to install a Python script (usually but it can be any executable) on the Xen host, and then call that through the Xapi. Writing a Xapi plugin in Python is simplified by using the XenAPIPlugin module, which is by default installed in dom0 in XCP. In my GsoC project, I have to call some plugin scripts to control and manage virtual switches. For example, I inserted a new function to get network name-label in vmops script. - Then, we can call it directly from XE command line or via XML-RPC. Here is a simple call from XE: - - $xe host-call-plugin host-uuid=host-uuid plugin=vmops fn=getLabel - - If the plugins has some arguments, it should be inserted with "args:" keyword. - In ACS, almost plugins are called from CitrixResourceBase.java. With my above function, I inserted a new method into CitrixResourceBase.java and called to the plugin as below: - - private String getLabel() { - Connection conn = getConnection(); - String result = callHostPlugin(conn, "ovstunnel", "getLabel"); - return result; - } - - Of which, Connection class will init a session to Xen host and callHostPlugin method executes a XML-RPC call to plugin. - Note that every Xapi plugin scripts must be placed into /etc/xapi.d/plugins. -
-
- What I've done - In one-and-a-half month, I have understood all of above knowledge and finished two things: - - improve gre controller to support XCP. - re-factor GRE source code by following NiciraNVP plugin design. - - improve gre controller to support XCP - From the understanding of how the native SDN works, a small patch has been made to help it works with Xen Cloud Platform (XCP) version 1.6. Without the patch, this controller can serve XenServer only, the commercial version of XCP. I did try SDN with XCP and debug to find out what errors are and why they occur. After some efforts, I figured out following problems: - - The SDN controller has to know what interface it'll deploy GRE tunnels. To do this check, it looks into network to find out the PIF's interface. It has a network name-label, which user defined in the deploy zone phase. If not, it will be replaced by a default label. However, XCP's network has no user-defined or default name-label. Therefore in this step I have made a trick. I used whatever name-label found in the XCP host to bypass this check. - When creating an OVS bridge, the controller creates a new dom0 vif, plugs to the bridge and immediately unplugs it. This action aims to ask XenServer create the bridge without running ovs-vsctl or brctl script. I saw that it is not very important to XCP hosts and also generates an error from xenopsd daemon, so I ignored this step. - The script playing a direct role to interact with openvswitch is ovstunnel. It requires a lib named cloudstack_pluginlib, which does not exist in XCP. Thus, I inserted this file into copying process from CloudStack to XCP when add-host phase occurs. - The "setup_ovs_bridge" function in ovstunnel takes a look into XenServer version to act a blocking IPv6. However, product_version parameter does not exist on XCP. It uses platform_version parameter instead. So, I decided to ignore this step. - - The patch is already committed to sdnextensions branch. It is also the primary branch I have been working on this GSoC period. - re-factor GRE source code by following NiciraNVP plugin design - GRE source code was re-factored with following changes: - - add Connectivity service checking: All of L2 configuration methods now have to check whether Ovs plugin can handle Connectivity service.. - move commands / answers to a new package: com.cloud.agent.api. - add new NetworkProvider: Ovs. - add L3 services to Ovs Capabilities: Ovs Capability now is set enabled to such L3 services as SourceNat, StaticNat, PortForwarding, RedundantRouter, Gateway. L2 service Connectivity is also set enabled. - add L3 services prototype code to OvsElement.java - - With the knowledge about CloudStack's network architecture I have learned and represented above, I made a patch which permits guest networks can reach each other via private IPaddress without using VPC mode. Proposal can be found here: Routing between guest networks - In next days, I will done the following things: - - implement L3 services with Virtual Router. - improve Ovs to support KVM hypervisor. - add new ODL plugin using ODL controller to control and manager network services. - -
-
diff --git a/docs/en-US/gsoc-midsummer-shiva.xml b/docs/en-US/gsoc-midsummer-shiva.xml deleted file mode 100644 index c26c5a808a5..00000000000 --- a/docs/en-US/gsoc-midsummer-shiva.xml +++ /dev/null @@ -1,283 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Mid-Summer Progress Updates - This section describes Mid-Summer Progress of Shiva Teja - "Create A New Modular UI for Apache CloudStack" -
- Introduction - - The progress on my project has been very smooth so far and I got to learn a lot. I started with learning git and backbone.js and then went on to learn angular.js and evantually made a basic usable UI with angular.js. Sebastien has been guiding me and helping me throughout the period. Both CloudStack and Angular.js communities have been helpful along the way. - - I am happy with the progress so far and it is possible to reach the goals with a slightly faster pace. -
-
- Progress and Experience So Far - - I made a basic UI from which a user can list a bunch of collections, launch VMs(and similar actions), edit configurations, add accounts, search through some of the fields. I've also added a very basic notification service and work is in progress for making a dropdown notification similar to the current UI. - - - I started by learning backbone.js and improving the prototype that I've made with my proposal. Then I looked into the current UI's code and tried to make plugins. There was a lot of repeated DOM manipulation and ajax calls throughout the UI.Then I almost took a week looking into angular.js and experimenting with it. I finally chose angular.js because it does lot more than backbone and lets you do the same stuff in lesser and more elegant code, thus, easily maintainable. It was obvious that most of repetitive DOM manipulation can be removed with angular's directives and AJAX calls with, obviously, models. This is one of important reasons I feel that CloudStack should move from just jQuery to an MVC like angular. Apart from code reusabilty for custom UIs, angular offers much lesser, more structured and elegant code. Rolling out new features becomes a much easier task. Implementing features like Quick View or UI tooltips that are present in the current UI is just a matter of implementing another directive. - - - Learning the framework and developing the app while following best practices was not easy at the beginning. I had difficulties in deciding things like structure of the app. Looking into existing apps like angular-app and famous threads on the mailing list helped. - - - Another slightly challenging task was to desing the angular.js models for cloudstack. Angular.js documentation say just use any Plain Old Javascript Objects. Given that statement, there are so many possible ways of doing it. So deciding the best one was frustrating at the beginning, but turned out to be simple. A rule of thumb that I think should be followed throughout the app is to return promises whenever possible. Promises remove unnecessary callbacks and offers a much more elegant structuring of code. All the models and collections in the current UI return promises which allows us to take actions after the specified actions on models and collections takes place. - - - Making complex directives can also be frustrating at the beginning. Videos from egghead.io came handy for understanding directives in depth. I feel that these are the next most powerful things that angular offers after 'the ability to use POJOs for models'. All the DOM manipulations can be put into directives and can be reused easily. - -
-
- Screenshots - I'll try to explain the things that you can do with the UI developed so far with some screenshots and a bit of the code assosciated -
- Instances tab - - - - - - - - instances-screen.png: Instances tab - - - - - Simple confirmation modal when you click start vm button - - - - - - start-vm-screen.png: Start vm screen - - - This is simple directive which launches such modal on click and can perform actions for 'yes' and 'no' clicks.(can be found at static/js/common/directives/confirm.js). In this case it'll call model.start() which will call the requester service to start the vm - - - And the vm is running! - - - - - - vm-running.png: Running vm - - - Labels automatically get updated by watching model changes - - - Async calls - - - - - - async-calls.png: Example Async Calls - - - Async calls are taken care by a service named requester which returns a promise. It resolves the promise when the query-async-job request returns with a result - - -
- -
- Edit Configurations - - - - - - - - configurations-screen.png: Configuration Screen - - - I've moved the description of the configurations from a column in the current UI to a tooltip. These tooltips appear when you hover over the configurations. - - - An input text box like this appears when you click edit - - - - - - edit-configuration.png: Configurations edit screen - - - This is handled by edit-in-place directive that I wrote - - - This shows that the configuration has been updated and the basic notification service that pops up - - - - - - configuration-edit-success.png: Configurations edit success screen - - - It is as simple as calling model.update when the save button is clicked. As it returns a promise, it can be used to call the notification service whenever there are model changes. - - - I tried my best to give an overview on code along with the screenshots. For more on the code, I'd recommend going through it thoroughly, as I'd love to have someone look at my code point out mistakes at this early stage. -
-
-
- RESTful API - I worked on the RESTful API for a while. I read a lot about REST but I could not get an elegant way of designing the API for the non RESTful verbs like start, stop etc. I have finished working the on the verbs that are RESTful(like list, update, delete..etc). The API can also handle sub-entities like listing virtual machines in a domain - Here are some screenshots: - - - List all virtual machines. Anything similar should work - - - - - - list-virtualmachines.png: List All Virtual Machines - - - - - List the properties of a specific vm - - - - - - list-specific-vm.png: List Properties of a specific vm - - - - - List virtual machines of a domain. Anything similar should work - - - - - - list-domain-vms.png: List virtual machines of a domain - - - - - Create an account with a POST request. You can also do update, delete etc. - - - - - - create-account-post.png: Create Account with POST request - - - - -
-
- Miscellaneous - There are lot of other things that I've experimented with along the way which are not shown in screenshots. Although my initial timeline was designed keeping backbone.js in mind, I've been following a similar version of it till now. It has been a bit slow as I had to learn and implement at the same time. I've been rolling out things very fast for the past couple of weeks as I am good to go with most of the angular.js concepts. The project can be finished very easily if I continue the same pace. Here's a list of important things that will be implemented next, in the same order(I have already experimented with most of them.) - - - Authentication handling: This is a slightly tough task. I looked into existing apps and made a basic security service which can be used for this purpose. - - - Infinite scroll directive: I am loading all the data at a time in the current UI. This does not work well with huge production clouds. Again, changes the structure of collections slightly, important thing to be taken care of before doing further development. - - - A modal wizard directive required for adding instances. - - - After finishing those three I'd be equipped with enough UI stuff that can let me concentrate on my models. I'll try to add as many functionalities to the models which can easily used throught this UI, and also reusable in custon UIs. After finishing these, I'll implement a better notification system. - - - Tests: Although I should've done these parallelly while developing the UI, given the lack of experience, it took me some time to realize that tests are important. I have setup a test environment karma and I'll soon write tests for whatever I've written so far. - - -
-
- Experience gained working on OSS and CloudStack - Working on OSS has been very different and offered much more to learn what a university project could offer me. Asking and answering questions is one of the important thing that really helped me and I feel this was the important part of the development so far. Although I was a bit shy to ask questions at the beginning, I really loved the way angular.js community has helped even for silly questions. Soon, I realized the same happens on the CloudStack mailing list or any OSS mailing list for that matter. Solving others problems also helps a lot in building up knowledge. So, answering questions is also one of the important thing about working on Open Source Software. Being nice and polite on the public discussions like this improves personality. I am really glad to be a part of it now and very thankful to Google for such a wonderful program that introduces students to real-world software problems at very early stages of student's experience. - I did not know much about CloudStack itself when I started working on the project. Following the discussions on mailing list, I googled for different terms used, watched a few videos on cloud and I'm really interested in learning more. I really hope to join the real CloudStack development soon. -
-
- Conclusion - You can find a demo of the UI here live in action. - I am really happy with the progress and experience so far. The goals of the project look easily reachable with the experience I have now. I still have RESTful API to be handled at the end. So I'll have to finish most of the project by the end of the august. Each of the task in the next todo list I've mentioned above should not take much time if things go well and models required for the UI should be ready by august last week so that I can take care of any UI specific things and RESTful stuff. - - Here's small list of things that I've learned so far: - - - Git concepts, along with using JIRA and Review Board. - - - Some advanced JS concepts and JS frameworks like jQuery, backbone.js, angular.js. Using Twitter Bootstrap for faster UI development. - - - Basics of designing and structuring RESTful APIs - - - Cloudmonkey's code and usage. I had to look into its code when I was designing the RESTful API. - - - A bit more in depth understanding of Flask web framework - - - Exposure to testing environment like karma and testing the UI in different browsers - - - Code written so far is available here and here - I thank Google and CloudStack for giving me this oppurtunity, Sebastien and Kelcey for helping me along the way. -
-
diff --git a/docs/en-US/gsoc-midsummer.xml b/docs/en-US/gsoc-midsummer.xml deleted file mode 100644 index 74ca62a107e..00000000000 --- a/docs/en-US/gsoc-midsummer.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Mid-Summer Progress Updates - This chapter describes the progress of each &PRODUCT; Google Summer of Code project. - - - - - - - - diff --git a/docs/en-US/gsoc-proposals.xml b/docs/en-US/gsoc-proposals.xml deleted file mode 100644 index 7c4b50c6511..00000000000 --- a/docs/en-US/gsoc-proposals.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Google Summer of Code Proposals - This chapter contains the five proposals awarded to &PRODUCT; for the 2013 Google Summer of Code project. - - - - - - - - diff --git a/docs/en-US/gsoc-shiva.xml b/docs/en-US/gsoc-shiva.xml deleted file mode 100644 index fe36d8ef050..00000000000 --- a/docs/en-US/gsoc-shiva.xml +++ /dev/null @@ -1,70 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Shiva Teja's 2013 GSoC Proposal - This chapter describes Shiva Teja's 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. -
- Abstract - - The aim of this project is to create a new modular UI for Apache CloudStack using Bootstrap by Twitter and Backbone.js. To achieve this easily, I'll be creating a RESTful wrapper API on top of the current CloudStack API. I hope this project will make custom UIs for CloudStack very easy. - - Why does CloudStack need a new UI? - - The current UI cannot be reused easliy to make a custom UI. The UI I will be making using backbone.js can be reused very easily to make custom UIs. The models, views, routers etc can remain the same in all the UIs. The user interface can be changed just by changing the templates. Check the implementation details below for further details. - - Why does it need a RESTful wrapper API ? - - Backbone.js heavily depends on RESTful architecture. Making a new UI with backbone.js using a query based API might not be easy. -
-
- List of deliverables - - A new UI for CloudStack(with almost all features in the current UI and new ones, if any). - A RESTful wrapper API on top of the CloudStack API - Some documentation about using this UI to make a custom UI. - -
-
- Approach - Wrapper API: Backbone.js, by default, uses four HTTP methods(GET, PUT, POST, DELETE) for communicating with the server. It uses GET to fetch a resource from the server, POST to create a resource, PUT to update the resource and DELETE to delete the resource. A query based API can probably be used to make the UI by overriding backbone's default sync function. But it makes more sense to have an API which supports the above mentioned method and is resource based. This RESTful API works on top of the CloudStack API. The main task is to map the combinations of these HTTP methods and the resources to appropriate CloudStack API command. The other task is to decide on how the URLs should look like. Say for starting a virtual machine, for it to be RESTful, we have to use POST as we are creating a resource, or a PUT as we are changing the state of a virtual machine. So the possible options on the URL could be to do a POST /runningvirtualmachines and respond with 201 Created code or a PUT on /virtualmachines/id and respond with 200 OK. If these are decided, the wrapper can be generated or be written manually, which can use defined patters to map to appropriate CloudStack API commands(Similar to what cloudmonkey does. See this prototype. I can use cloudmonkey's code to generate the required API entity verb relationships. Each verb will have a set of rules saying what method should be used in the RESTful API and how should it look like in the URL. Another possible way could be to group entities first manually and write the wrapper manually(something like zone/pods/cluster). Some possibilities have been discussed in this thread. - - UI: It will be a single page app. It'll use client side templating for rendering. This makes it very easy to make a custom UI because it can be achieved just by changing the templates. Backbone views will make use of these templates to render the appropriate models/collections. A completely new interface can be written just by changing the templates. Javascript code can completely remain the same. The views will take care of appropriate DOM events. Such event will correspond to appropriate model/collection chages, thus causing appropriate API calls. -
-
- Approximate Schedle - Till June 17 - Decide on how the RESTful API should look like and design algorithms to generate the wrapper. - July 5(soft deadline), July 10(hard deadline) : Wrapper API will be ready. - July 12(soft) - July 15(hard): Make basic wireframes and designs for the website and get them approved. - July 29(mid term evaluation) : All the basic models, views, routes of the UI should be ready along with a few templates. - August 15(hard deadline, shouldn't take much time actually) - A basic usable UI where users can just list all the entities which are present in the current UI's main navigation( Like Instances, Templates, Accounts etc) - September 1(hard) - From this UI, users should be able to launch instances, edit settings of most of the entities. - September 16(Pencil down!) - Fix some design tweaks and finish a completely usable interface with functions similar to current UI. - September 23 - Finish the documentation on how to use this UI to make custom UIs. -
-
- About Me - I am a 2nd year computer science undergrad studying at IIT Mandi, India. I've been using Python for an year and a half now. I've used Django, Flask and Tornado for my small projects. Along with Python, I use C++ for competitive programming. Recently, I fell in love with Haskell. I've always been fascinated about web technologies. -
-
diff --git a/docs/en-US/gsoc-tuna.xml b/docs/en-US/gsoc-tuna.xml deleted file mode 100644 index aa9726f095c..00000000000 --- a/docs/en-US/gsoc-tuna.xml +++ /dev/null @@ -1,231 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Nguyen's 2013 GSoC Proposal - This chapter describes Nguyen 2013 Google Summer of Code project within the &PRODUCT; ASF project. It is a copy paste of the submitted proposal. -
- Add Xen/XCP support for GRE SDN controller - - "This project aims to enhance the current native SDN controller in supporting Xen/XCP and integrate successfully the open source SDN controller (FloodLight) driving Open vSwitch through its interfaces." - -
-
- Abstract - - SDN, standing for Software-Defined Networking, is an approach to building data network equipments and softwares. It were invented by ONRC, Stanford University. SDN basically decouples the control from physical networking boxes and given to a software application called a controller. SDN has three parts: controller, protocols and switch; In which, OpenFlow is an open standard to deploy innovative protocols. Nowaday, more and more datacenters use SDN instead of traditional physical networking boxes. For example, Google announced that they completely built its own switches and SDN confrollers for use in its internal backbone network. - - - OpenvSwitch, an open source software switch, is widely used as a virtual switch in virtualized server environments. It can currently run on any Linux-based virtualization platform, such as: KVM, Xen (XenServer, XCP, Xen hypervisor), VirtualBox... It also has been ported to a number of different operating systems and hardware platforms: Linux, FreeBSD, Windows and even non-POSIX embedded systems. In cloud computing IaaS, using OpenvSwitch instead of Linux bridge on compute nodes becomes an inevitable trend because of its powerful features and the ability of OpenFlow integration as well. - - - In CloudStack, we already have a native SDN controller. With KVM hypervisor, developers can easily install OpenvSwitch module; whereas, Xen even has a build-in one. The combination of SDN controller and OpenvSwitch gives us many advanced things. For example, creating GRE tunnels as an isolation method instead of VLAN is a good try. In this project, we are planning to support GRE tunnels in Xen/XCP hypervisor with the native SDN controller. When it's done, substituting open-sources SDN controllers (floodlight, beacon, pox, nox) for the current one is an amazing next step. - -
-
- Design description - - CloudStack currently has a native SDN Controller that is used to build meshes of GRE tunnels between Xen hosts. There consists of 4 parts: OVS tunnel manager, OVS Dao/VO, Command/Answer and Ovs tunnel plugin. The details are as follow: - - - OVS tunnel manager: Consist of OvsElement and OvsTunnelManager. - - - OvsElement is used for controlling Ovs tunnel lifecycle (prepare, release) - - - - prepare(network, nic, vm, dest): create tunnel for vm on network to dest - - - release(network, nic, vm): destroy tunnel for vm on network - - - - OvsTunnelManager drives bridge configuration and tunnel creation via calling respective commands to Agent. - - - - destroyTunnel(vm, network): call OvsDestroyTunnelCommand to destroy tunnel for vm on network - - - createTunnel(vm, network, dest): call OvsCreateTunnelCommand to create tunnel for vm on network to dest - - - - OVS tunnel plugin: These are ovstunnel and ovs-vif-flows.py script, writen as XAPI plugin. The OVS tunnel manager will call them via XML-RPC. - - - Ovstunnel plugin calls corresponding vsctl commands for setting up the OVS bridge, creating GRE tunnels or destroying them. - - - - setup_ovs_bridge() - - - destroy_ovs_bridge() - - - create_tunnel() - - - destroy_tunnel() - - - - Ovs-vif-flow.py clears or applies rule for VIFs every time it is plugged or unplugged from a OVS bridge. - - - - clear_flow() - - - apply_flow() - - - - OVS command/answer: It is designed under the format of requests and answers between Manager and Plugin. These commands will correspondence exactly the mentioned manipulations. - - - - OvsSetupBridgeCommand - - - OvsSetupBridgeAnswer - - - OvsDestroyBridgeCommand - - - OvsDestroyBridgeAnswer - - - OvsCreateTunnelCommand - - - OvsCreateTunnelAnswer - - - OvsDestroyTunnelCommand - - - OvsDestroyTunnelAnswer - - - OvsFetchInterfaceCommand - - - OvsFetchInterfaceAnswer - - - - OVS Dao/VO - - - - OvsTunnelInterfaceDao - - - OvsTunnelInterfaceVO - - - OvsTunnelNetworkDao - - - OvsTunnelNetworkVO - - -
-
- Integrate FloodLight as SDN controller - - I think that we maybe deploy FloodLight Server as a new SystemVM. This VM acts like current SystemVMs. One Floodlight SystemVM per Zone, so it can manage for virtual switches under this zone. - -
-
- Deliverables - - GRE has been used as isolation method in CloudStack when deploy with Xen/XCP hosts. - - - - User set sdn.ovs.controller parameter in Global Setting to true. He deploys Advance Networking and chooses GRE as isolation method - - - Make use of Floodlight instead of native SDN controller. - - -
-
- About me - - My name is Nguyen Anh Tu, a young and enthusiastic researcher in Cloud Computing Center - Viettel Research and Development Institute, Vietnam. Since last year, we has built Cloud Platform based on CloudStack, starting with version 3.0.2. As the results, some advanced modules were successfully developed, consists of: - - - - Encrypt Data Volume for VMs. - - - Dynamic Allocate Memory for VMs by changing policy on Squeeze Daemon. - - - AutoScale without using NetScale. - - - Deploy a new SystemVM type for Intrustion Detection System. - - - - Given the working experience and recent researches, I have obtained remarkably the understanding of specific knowledges to carry on this project, details as follow: - - - - Java source code on CloudStack: Design Pattern, Spring framework. - - - Bash, Python programming. - - - XAPI plugin. - - - XML-RPC. - - - OpenVSwitch on Xen. - - - - Other knowledges: - - - - XAPI RRD, XenStore. - - - Ocaml Programming (XAPI functions). - - -
-
diff --git a/docs/en-US/guest-ip-ranges.xml b/docs/en-US/guest-ip-ranges.xml deleted file mode 100644 index c49dc6a76f8..00000000000 --- a/docs/en-US/guest-ip-ranges.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Guest IP Ranges - The IP ranges for guest network traffic are set on a per-account basis by the user. This - allows the users to configure their network in a fashion that will enable VPN linking between - their guest network and their clients. - In shared networks in Basic zone and Security Group-enabled Advanced networks, you will have - the flexibility to add multiple guest IP ranges from different subnets. You can add or remove - one IP range at a time. For more information, see . -
diff --git a/docs/en-US/guest-network.xml b/docs/en-US/guest-network.xml deleted file mode 100644 index 692eb29f525..00000000000 --- a/docs/en-US/guest-network.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Guest Network - In a &PRODUCT; cloud, guest VMs can communicate with each other using shared infrastructure with the security and user perception that the guests have a private LAN. - The &PRODUCT; virtual router is the main component providing networking features for guest traffic. -
diff --git a/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml b/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml deleted file mode 100644 index d6fc10bca52..00000000000 --- a/docs/en-US/guest-nw-usage-with-traffic-sentinel.xml +++ /dev/null @@ -1,72 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Guest Network Usage Integration for Traffic Sentinel - To collect usage data for a guest network, &PRODUCT; needs to pull the data from an external - network statistics collector installed on the network. Metering statistics for guest networks - are available through &PRODUCT;’s integration with inMon Traffic Sentinel. - Traffic Sentinel is a network traffic usage data collection package. &PRODUCT; can feed - statistics from Traffic Sentinel into its own usage records, providing a basis for billing users - of cloud infrastructure. Traffic Sentinel uses the traffic monitoring protocol sFlow. Routers - and switches generate sFlow records and provide them for collection by Traffic Sentinel, then - &PRODUCT; queries the Traffic Sentinel database to obtain this information - To construct the query, &PRODUCT; determines what guest IPs were in use during the current - query interval. This includes both newly assigned IPs and IPs that were assigned in a previous - time period and continued to be in use. &PRODUCT; queries Traffic Sentinel for network - statistics that apply to these IPs during the time period they remained allocated in &PRODUCT;. - The returned data is correlated with the customer account that owned each IP and the timestamps - when IPs were assigned and released in order to create billable metering records in &PRODUCT;. - When the Usage Server runs, it collects this data. - To set up the integration between &PRODUCT; and Traffic Sentinel: - - - On your network infrastructure, install Traffic Sentinel and configure it to gather - traffic data. For installation and configuration steps, see inMon documentation at Traffic Sentinel Documentation. - - - In the Traffic Sentinel UI, configure Traffic Sentinel to accept script querying from - guest users. &PRODUCT; will be the guest user performing the remote queries to gather - network usage for one or more IP addresses. - Click File > Users > Access Control > Reports Query, then select Guest from the - drop-down list. - - - On &PRODUCT;, add the Traffic Sentinel host by calling the &PRODUCT; API command - addTrafficMonitor. Pass in the URL of the Traffic Sentinel as protocol + host + port - (optional); for example, http://10.147.28.100:8080. For the addTrafficMonitor command - syntax, see the API Reference at API - Documentation. - For information about how to call the &PRODUCT; API, see the Developer’s Guide at - - &PRODUCT; API Developer's Guide. - - - Log in to the &PRODUCT; UI as administrator. - - - Select Configuration from the Global Settings page, and set the following: - direct.network.stats.interval: How often you want &PRODUCT; to query Traffic - Sentinel. - - -
diff --git a/docs/en-US/guest-traffic.xml b/docs/en-US/guest-traffic.xml deleted file mode 100644 index 943073ebc97..00000000000 --- a/docs/en-US/guest-traffic.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Guest Traffic - A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address. - See a typical guest traffic setup given below: - - - - - guest-traffic-setup.png: Depicts a guest traffic setup - - Typically, the Management Server automatically creates a virtual router for each network. A - virtual router is a special virtual machine that runs on the hosts. Each virtual router in an - isolated network has three network interfaces. If multiple public VLAN is used, the router will - have multiple public interfaces. Its eth0 interface serves as the gateway for the guest traffic - and has the IP address of 10.1.1.1. Its eth1 interface is used by the system to configure the - virtual router. Its eth2 interface is assigned a public IP address for public traffic. If - multiple public VLAN is used, the router will have multiple public interfaces. - The virtual router provides DHCP and will automatically assign an IP address for each guest VM within the IP range assigned for the network. The user can manually reconfigure guest VMs to assume different IP addresses. - Source NAT is automatically configured in the virtual router to forward outbound traffic for all guest VMs -
diff --git a/docs/en-US/ha-enabled-vm.xml b/docs/en-US/ha-enabled-vm.xml deleted file mode 100644 index 19666a4db27..00000000000 --- a/docs/en-US/ha-enabled-vm.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- HA-Enabled Virtual Machines - The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. - HA features work with iSCSI or NFS primary storage. HA with local storage is not supported. -
diff --git a/docs/en-US/ha-for-hosts.xml b/docs/en-US/ha-for-hosts.xml deleted file mode 100644 index 15b5fa73f0b..00000000000 --- a/docs/en-US/ha-for-hosts.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- HA for Hosts - The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster. - HA features work with iSCSI or NFS primary storage. HA with local storage is not supported. - -
diff --git a/docs/en-US/ha-management-server.xml b/docs/en-US/ha-management-server.xml deleted file mode 100644 index 1afebce3bf3..00000000000 --- a/docs/en-US/ha-management-server.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- HA for Management Server - The &PRODUCT; Management Server should be deployed in a multi-node configuration such that it is not susceptible to individual server failures. The Management Server itself (as distinct from the MySQL database) is stateless and may be placed behind a load balancer. - Normal operation of Hosts is not impacted by an outage of all Management Serves. All guest VMs will continue to work. - When the Management Server is down, no new VMs can be created, and the end user and admin UI, API, dynamic load distribution, and HA will cease to work. -
diff --git a/docs/en-US/hardware-config-eg.xml b/docs/en-US/hardware-config-eg.xml deleted file mode 100644 index 3174bfa8576..00000000000 --- a/docs/en-US/hardware-config-eg.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Example Hardware Configuration - This section contains an example configuration of specific switch models for zone-level - layer-3 switching. It assumes VLAN management protocols, such as VTP or GVRP, have been - disabled. The example scripts must be changed appropriately if you choose to use VTP or - GVRP. - - -
diff --git a/docs/en-US/hardware-firewall.xml b/docs/en-US/hardware-firewall.xml deleted file mode 100644 index efab3c73806..00000000000 --- a/docs/en-US/hardware-firewall.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Hardware Firewall - All deployments should have a firewall protecting the management server; see Generic - Firewall Provisions. Optionally, some deployments may also have a Juniper SRX firewall that will - be the default gateway for the guest networks; see . - - - - -
diff --git a/docs/en-US/health-checks-for-lb-rules.xml b/docs/en-US/health-checks-for-lb-rules.xml deleted file mode 100644 index 4c7e091c1ce..00000000000 --- a/docs/en-US/health-checks-for-lb-rules.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- - Health Checks for Load Balancer Rules - (NetScaler load balancer only; requires NetScaler version 10.0) - - Health checks are used in load-balanced applications to ensure that requests are forwarded - only to running, available services. - When creating a load balancer rule, you can specify a health check policy. - This is in addition to specifying the - stickiness policy, algorithm, and other load balancer rule options. - You can configure one health check policy per load balancer rule. - Any load balancer rule defined on a NetScaler load balancer in &PRODUCT; can have a health check policy. - The policy consists of a ping path, thresholds to define "healthy" and "unhealthy" states, - health check frequency, and timeout wait interval. - When a health check policy is in effect, - the load balancer will stop forwarding requests to any resources that are found to be unhealthy. - If the resource later becomes available again, the periodic health check - will discover it, and the resource will once again be added to the pool of resources that can - receive requests from the load balancer. - At any given time, the most recent result of the health check is displayed in the UI. - For any VM that is attached to a load balancer rule with a health check configured, - the state will be shown as UP or DOWN in the UI depending on the result of the most recent health check. - You can delete or modify existing health check policies. - To configure how often the health check is performed by default, use the global - configuration setting healthcheck.update.interval (default value is 600 seconds). - You can override this value for an individual health check policy. - For details on how to set a health check policy using the UI, see . -
diff --git a/docs/en-US/host-add-vsphere.xml b/docs/en-US/host-add-vsphere.xml deleted file mode 100644 index b47846448d7..00000000000 --- a/docs/en-US/host-add-vsphere.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding a Host (vSphere) - For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add Cluster: vSphere. -
diff --git a/docs/en-US/host-add-xenserver-kvm-ovm.xml b/docs/en-US/host-add-xenserver-kvm-ovm.xml deleted file mode 100644 index 91c36aba7f6..00000000000 --- a/docs/en-US/host-add-xenserver-kvm-ovm.xml +++ /dev/null @@ -1,157 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Adding a Host (XenServer or KVM) - XenServer and KVM hosts can be added to a cluster at any time. -
- Requirements for XenServer and KVM Hosts - - Make sure the hypervisor host does not have any VMs already running before you add it to - &PRODUCT;. - - Configuration requirements: - - - Each cluster must contain only hosts with the identical hypervisor. - - - For XenServer, do not put more than 8 hosts in a cluster. - - - For KVM, do not put more than 16 hosts in a cluster. - - - For hardware requirements, see the installation section for your hypervisor in the - &PRODUCT; Installation Guide. -
- XenServer Host Additional Requirements - If network bonding is in use, the administrator must cable the new host identically to - other hosts in the cluster. - For all additional hosts to be added to the cluster, run the following command. This - will cause the host to join the master in a XenServer pool. - # xe pool-join master-address=[master IP] master-username=root master-password=[your password] - - When copying and pasting a command, be sure the command has pasted as a single line - before executing. Some document viewers may introduce unwanted line breaks in copied - text. - - With all hosts added to the XenServer pool, run the cloud-setup-bond script. This script - will complete the configuration and setup of the bonds on the new hosts in the - cluster. - - - Copy the script from the Management Server in - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/cloud-setup-bonding.sh to the - master host and ensure it is executable. - - - Run the script: - # ./cloud-setup-bonding.sh - - -
-
- KVM Host Additional Requirements - - - If shared mountpoint storage is in use, the administrator should ensure that the new - host has all the same mountpoints (with storage mounted) as the other hosts in the - cluster. - - - Make sure the new host has the same network configuration (guest, private, and - public network) as other hosts in the cluster. - - - If you are using OpenVswitch bridges edit the file agent.properties on the KVM host - and set the parameter network.bridge.type to - openvswitch before adding the host to &PRODUCT; - - -
- -
-
- Adding a XenServer or KVM Host - - - If you have not already done so, install the hypervisor software on the host. You will - need to know which version of the hypervisor software version is supported by &PRODUCT; - and what additional configuration is required to ensure the host will work with &PRODUCT;. - To find these installation details, see the appropriate section for your hypervisor in the - &PRODUCT; Installation Guide. - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation, choose Infrastructure. In Zones, click View More, then click - the zone in which you want to add the host. - - - Click the Compute tab. In the Clusters node, click View All. - - - Click the cluster where you want to add the host. - - - Click View Hosts. - - - Click Add Host. - - - Provide the following information. - - - Host Name. The DNS name or IP address of the host. - - - Username. Usually root. - - - Password. This is the password for the user from your XenServer or KVM - install). - - - Host Tags (Optional). Any labels that you use to categorize hosts for ease of - maintenance. For example, you can set to the cloud's HA tag (set in the ha.tag global - configuration parameter) if you want this host to be used only for VMs with the "high - availability" feature enabled. For more information, see HA-Enabled Virtual Machines - as well as HA for Hosts. - - - There may be a slight delay while the host is provisioned. It should automatically - display in the UI. - - - Repeat for additional hosts. - - -
-
diff --git a/docs/en-US/host-add.xml b/docs/en-US/host-add.xml deleted file mode 100644 index 74509d69be7..00000000000 --- a/docs/en-US/host-add.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding a Host - - Before adding a host to the &PRODUCT; configuration, you must first install your chosen hypervisor on the host. &PRODUCT; can manage hosts running VMs under a variety of hypervisors. - The &PRODUCT; Installation Guide provides instructions on how to install each supported hypervisor - and configure it for use with &PRODUCT;. See the appropriate section in the Installation Guide for information about which version of your chosen hypervisor is supported, as well as crucial additional steps to configure the hypervisor hosts for use with &PRODUCT;. - Be sure you have performed the additional &PRODUCT;-specific configuration steps described in the hypervisor installation section for your particular hypervisor. - - Now add the hypervisor host to &PRODUCT;. The technique to use varies depending on the hypervisor. - - - - - - - - -
diff --git a/docs/en-US/host-allocation.xml b/docs/en-US/host-allocation.xml deleted file mode 100644 index dddffd553ac..00000000000 --- a/docs/en-US/host-allocation.xml +++ /dev/null @@ -1,123 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Assigning VMs to Hosts - At any point in time, each virtual machine instance is running on a single host. - How does &PRODUCT; determine which host to place a VM on? There are several ways: - - Automatic default host allocation. &PRODUCT; can automatically pick - the most appropriate host to run each virtual machine. - Instance type preferences. &PRODUCT; administrators can specify that certain hosts should have a preference for particular types of guest instances. - For example, an administrator could state that a host should have a preference to run Windows guests. - The default host allocator will attempt to place guests of that OS type on such hosts first. - If no such host is available, the allocator will place the instance wherever there is sufficient physical capacity. - Vertical and horizontal allocation. - Vertical allocation consumes all the resources of a given host before allocating any guests on a second host. - This reduces power consumption in the cloud. Horizontal allocation places a guest on each host in a round-robin fashion. - This may yield better performance to the guests in some cases. - End user preferences. - Users can not control exactly which host will run a given VM instance, - but they can specify a zone for the VM. - &PRODUCT; is then restricted to allocating the VM only to one of the hosts in that zone. - Host tags. The administrator can assign tags to hosts. These tags can be used to - specify which host a VM should use. - The &PRODUCT; administrator decides whether to define host tags, then create a service offering using those tags and offer it to the user. - - Affinity groups. - By defining affinity groups and assigning VMs to them, the user or administrator can - influence (but not dictate) which VMs should run on separate hosts. - This feature is to let users specify that certain VMs won't be on the same host. - &PRODUCT; also provides a pluggable interface for adding new allocators. - These custom allocators can provide any policy the administrator desires. - -
- Affinity Groups - By defining affinity groups and assigning VMs to them, the user or administrator can - influence (but not dictate) which VMs should run on separate hosts. - This feature is to let users specify that VMs with the same “host anti-affinity†type won’t be on the same host. - This serves to increase fault tolerance. - If a host fails, another VM offering the same service (for example, hosting the user's website) is still up and running on another host. - The scope of an affinity group is per user account. - Creating a New Affinity Group - To add an affinity group: - - Log in to the &PRODUCT; UI as an administrator or user. - In the left navigation bar, click Affinity Groups. - Click Add affinity group. In the dialog box, fill in the following fields: - - Name. Give the group a name. - Description. Any desired text to tell more about the purpose of the group. - Type. The only supported type shipped with &PRODUCT; is Host Anti-Affinity. - This indicates that the VMs in this group should avoid being placed on the same VM with each other. - If you see other types in this list, it means that your installation of &PRODUCT; has been extended - with customized affinity group plugins. - - - - Assign a New VM to an Affinity Group - To assign a new VM to an affinity group: - - Create the VM as usual, as described in . - In the Add Instance wizard, there is a new Affinity tab where you can select the affinity group. - - Change Affinity Group for an Existing VM - To assign an existing VM to an affinity group: - - Log in to the &PRODUCT; UI as an administrator or user. - In the left navigation bar, click Instances. - Click the name of the VM you want to work with. - Stop the VM by clicking the Stop button. - Click the Change Affinity button. - - - - - change-affinity-button.png: button to assign an affinity group - to a virtual machine - - - - - View Members of an Affinity Group - To see which VMs are currently assigned to a particular affinity group: - - In the left navigation bar, click Affinity Groups. - Click the name of the group you are interested in. - Click View Instances. The members of the group are listed. - From here, you can click the name of any VM in the list to access all its details and controls. - - Delete an Affinity Group - To delete an affinity group: - - In the left navigation bar, click Affinity Groups. - Click the name of the group you are interested in. - Click Delete. - Any VM that is a member of the affinity group will be disassociated from the group. - The former group members will continue to run normally on the current hosts, but if the - VM is restarted, it will no longer follow the host allocation rules from its former - affinity group. - -
-
diff --git a/docs/en-US/hypervisor-host-install-agent.xml b/docs/en-US/hypervisor-host-install-agent.xml deleted file mode 100644 index e339165d0da..00000000000 --- a/docs/en-US/hypervisor-host-install-agent.xml +++ /dev/null @@ -1,79 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Install and configure the Agent - To manage KVM instances on the host &PRODUCT; uses a Agent. This Agent communicates with the Management server and controls all the instances on the host. - First we start by installing the agent: - In RHEL or CentOS: - $ yum install cloudstack-agent - In Ubuntu: - $ apt-get install cloudstack-agent - The host is now ready to be added to a cluster. This is covered in a later section, see . It is recommended that you continue to read the documentation before adding the host! -
- Configure CPU model for KVM guest (Optional) - In additional,the &PRODUCT; Agent allows host administrator to control the guest CPU model which is exposed to KVM instances. By default, the CPU model of KVM instance is likely QEMU Virtual CPU version x.x.x with least CPU features exposed. There are a couple of reasons to specify the CPU model: - - To maximise performance of instances by exposing new host CPU features to the KVM instances; - To ensure a consistent default CPU across all machines,removing reliance of variable QEMU defaults; - - For the most part it will be sufficient for the host administrator to specify the guest CPU config in the per-host configuration file (/etc/cloudstack/agent/agent.properties). This will be achieved by introducing two new configuration parameters: - guest.cpu.mode=custom|host-model|host-passthrough -guest.cpu.model=from /usr/share/libvirt/cpu_map.xml(only valid when guest.cpu.mode=custom) - - There are three choices to fulfill the cpu model changes: - - - custom: you can explicitly specify one of the supported named model in /usr/share/libvirt/cpu_map.xml - - - host-model: libvirt will identify the CPU model in /usr/share/libvirt/cpu_map.xml which most closely matches the host, and then request additional CPU flags to complete the match. This should give close to maximum functionality/performance, which maintaining good reliability/compatibility if the guest is migrated to another host with slightly different host CPUs. - - - host-passthrough: libvirt will tell KVM to passthrough the host CPU with no modifications. The difference to host-model, instead of just matching feature flags, every last detail of the host CPU is matched. This gives absolutely best performance, and can be important to some apps which check low level CPU details, but it comes at a cost with respect to migration: the guest can only be migrated to an exactly matching host CPU. - - - Here are some examples: - - - custom - guest.cpu.mode=custom -guest.cpu.model=SandyBridge - - - - host-model - guest.cpu.mode=host-model - - - host-passthrough - guest.cpu.mode=host-passthrough - - - - host-passthrough may lead to migration failure,if you have this problem,you should use host-model or custom - -
- -
diff --git a/docs/en-US/hypervisor-host-install-finish.xml b/docs/en-US/hypervisor-host-install-finish.xml deleted file mode 100644 index ff530c79038..00000000000 --- a/docs/en-US/hypervisor-host-install-finish.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Add the host to CloudStack - The host is now ready to be added to a cluster. This is covered in a later section, see . It is recommended that you continue to read the documentation before adding the host! -
diff --git a/docs/en-US/hypervisor-host-install-firewall.xml b/docs/en-US/hypervisor-host-install-firewall.xml deleted file mode 100644 index c6658731819..00000000000 --- a/docs/en-US/hypervisor-host-install-firewall.xml +++ /dev/null @@ -1,59 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configuring the firewall - The hypervisor needs to be able to communicate with other hypervisors and the management server needs to be able to reach the hypervisor. - In order to do so we have to open the following TCP ports (if you are using a firewall): - - 22 (SSH) - 1798 - 16509 (libvirt) - 5900 - 6100 (VNC consoles) - 49152 - 49216 (libvirt live migration) - - It depends on the firewall you are using how to open these ports. Below you'll find examples how to open these ports in RHEL/CentOS and Ubuntu. -
- Open ports in RHEL/CentOS - RHEL and CentOS use iptables for firewalling the system, you can open extra ports by executing the following iptable commands: - $ iptables -I INPUT -p tcp -m tcp --dport 22 -j ACCEPT - $ iptables -I INPUT -p tcp -m tcp --dport 1798 -j ACCEPT - $ iptables -I INPUT -p tcp -m tcp --dport 16509 -j ACCEPT - $ iptables -I INPUT -p tcp -m tcp --dport 5900:6100 -j ACCEPT - $ iptables -I INPUT -p tcp -m tcp --dport 49152:49216 -j ACCEPT - These iptable settings are not persistent accross reboots, we have to save them first. - $ iptables-save > /etc/sysconfig/iptables -
-
- Open ports in Ubuntu - The default firewall under Ubuntu is UFW (Uncomplicated FireWall), which is a Python wrapper around iptables. - To open the required ports, execute the following commands: - $ ufw allow proto tcp from any to any port 22 - $ ufw allow proto tcp from any to any port 1798 - $ ufw allow proto tcp from any to any port 16509 - $ ufw allow proto tcp from any to any port 5900:6100 - $ ufw allow proto tcp from any to any port 49152:49216 - By default UFW is not enabled on Ubuntu. Executing these commands with the firewall disabled does not enable the firewall. -
-
diff --git a/docs/en-US/hypervisor-host-install-libvirt.xml b/docs/en-US/hypervisor-host-install-libvirt.xml deleted file mode 100644 index d3d6b9b4e80..00000000000 --- a/docs/en-US/hypervisor-host-install-libvirt.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Install and Configure libvirt - &PRODUCT; uses libvirt for managing virtual machines. Therefore it is vital that libvirt is configured correctly. Libvirt is a dependency of cloudstack-agent and should already be installed. - - - In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf - Set the following parameters: - listen_tls = 0 - listen_tcp = 1 - tcp_port = "16509" - auth_tcp = "none" - mdns_adv = 0 - - - Turning on "listen_tcp" in libvirtd.conf is not enough, we have to change the parameters as well: - On RHEL or CentOS modify /etc/sysconfig/libvirtd: - Uncomment the following line: - #LIBVIRTD_ARGS="--listen" - On Ubuntu: modify /etc/default/libvirt-bin - Add "-l" to the following line:: - libvirtd_opts="-d" - so it looks like: - libvirtd_opts="-d -l" - - - Restart libvirt - In RHEL or CentOS: - $ service libvirtd restart - In Ubuntu: - $ service libvirt-bin restart - - -
diff --git a/docs/en-US/hypervisor-host-install-network-openvswitch.xml b/docs/en-US/hypervisor-host-install-network-openvswitch.xml deleted file mode 100644 index a16dc8e0e8d..00000000000 --- a/docs/en-US/hypervisor-host-install-network-openvswitch.xml +++ /dev/null @@ -1,116 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configure the network using OpenVswitch - This is a very important section, please make sure you read this thoroughly. - In order to forward traffic to your instances you will need at least two bridges: public and private. - By default these bridges are called cloudbr0 and cloudbr1, but you do have to make sure they are available on each hypervisor. - The most important factor is that you keep the configuration consistent on all your hypervisors. -
- Preparing - To make sure that the native bridge module will not interfere with openvswitch the bridge module should be added to the blacklist. See the modprobe documentation for your distribution on where to find the blacklist. Make sure the module is not loaded either by rebooting or executing rmmod bridge before executing next steps. - The network configurations below depend on the ifup-ovs and ifdown-ovs scripts which are part of the openvswitch installation. They should be installed in /etc/sysconfig/network-scripts/ -
-
- Network example - There are many ways to configure your network. In the Basic networking mode you should have two (V)LAN's, one for your private network and one for the public network. - We assume that the hypervisor has one NIC (eth0) with three tagged VLAN's: - - VLAN 100 for management of the hypervisor - VLAN 200 for public network of the instances (cloudbr0) - VLAN 300 for private network of the instances (cloudbr1) - - On VLAN 100 we give the Hypervisor the IP-Address 192.168.42.11/24 with the gateway 192.168.42.1 - The Hypervisor and Management server don't have to be in the same subnet! -
-
- Configuring the network bridges - It depends on the distribution you are using how to configure these, below you'll find - examples for RHEL/CentOS. - The goal is to have three bridges called 'mgmt0', 'cloudbr0' and 'cloudbr1' after this - section. This should be used as a guideline only. The exact configuration will - depend on your network layout. -
- Configure OpenVswitch - The network interfaces using OpenVswitch are created using the ovs-vsctl command. This command will configure the interfaces and persist them to the OpenVswitch database. - First we create a main bridge connected to the eth0 interface. Next we create three fake bridges, each connected to a specific vlan tag. - -
-
- Configure in RHEL or CentOS - The required packages were installed when openvswitch and libvirt were installed, - we can proceed to configuring the network. - First we configure eth0 - vi /etc/sysconfig/network-scripts/ifcfg-eth0 - Make sure it looks similar to: - - We have to configure the base bridge with the trunk. - vi /etc/sysconfig/network-scripts/ifcfg-cloudbr - - We now have to configure the three VLAN bridges: - vi /etc/sysconfig/network-scripts/ifcfg-mgmt0 - - vi /etc/sysconfig/network-scripts/ifcfg-cloudbr0 - - vi /etc/sysconfig/network-scripts/ifcfg-cloudbr1 - - With this configuration you should be able to restart the network, although a reboot is recommended to see if everything works properly. - Make sure you have an alternative way like IPMI or ILO to reach the machine in case you made a configuration error and the network stops functioning! -
-
-
diff --git a/docs/en-US/hypervisor-host-install-network.xml b/docs/en-US/hypervisor-host-install-network.xml deleted file mode 100644 index 80156d9b6a9..00000000000 --- a/docs/en-US/hypervisor-host-install-network.xml +++ /dev/null @@ -1,150 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configure the network bridges - This is a very important section, please make sure you read this thoroughly. - This section details how to configure bridges using the native implementation in Linux. Please refer to the next section if you intend to use OpenVswitch - In order to forward traffic to your instances you will need at least two bridges: public and private. - By default these bridges are called cloudbr0 and cloudbr1, but you do have to make sure they are available on each hypervisor. - The most important factor is that you keep the configuration consistent on all your hypervisors. -
- Network example - There are many ways to configure your network. In the Basic networking mode you should have two (V)LAN's, one for your private network and one for the public network. - We assume that the hypervisor has one NIC (eth0) with three tagged VLAN's: - - VLAN 100 for management of the hypervisor - VLAN 200 for public network of the instances (cloudbr0) - VLAN 300 for private network of the instances (cloudbr1) - - On VLAN 100 we give the Hypervisor the IP-Address 192.168.42.11/24 with the gateway 192.168.42.1 - The Hypervisor and Management server don't have to be in the same subnet! -
-
- Configuring the network bridges - It depends on the distribution you are using how to configure these, below you'll find examples for RHEL/CentOS and Ubuntu. - The goal is to have two bridges called 'cloudbr0' and 'cloudbr1' after this section. This should be used as a guideline only. The exact configuration will depend on your network layout. -
- Configure in RHEL or CentOS - The required packages were installed when libvirt was installed, we can proceed to configuring the network. - First we configure eth0 - vi /etc/sysconfig/network-scripts/ifcfg-eth0 - Make sure it looks similar to: - - We now have to configure the three VLAN interfaces: - vi /etc/sysconfig/network-scripts/ifcfg-eth0.100 - - vi /etc/sysconfig/network-scripts/ifcfg-eth0.200 - - vi /etc/sysconfig/network-scripts/ifcfg-eth0.300 - - Now we have the VLAN interfaces configured we can add the bridges on top of them. - vi /etc/sysconfig/network-scripts/ifcfg-cloudbr0 - Now we just configure it is a plain bridge without an IP-Address - - We do the same for cloudbr1 - vi /etc/sysconfig/network-scripts/ifcfg-cloudbr1 - - With this configuration you should be able to restart the network, although a reboot is recommended to see if everything works properly. - Make sure you have an alternative way like IPMI or ILO to reach the machine in case you made a configuration error and the network stops functioning! -
-
- Configure in Ubuntu - All the required packages were installed when you installed libvirt, so we only have to configure the network. - vi /etc/network/interfaces - Modify the interfaces file to look like this: - - With this configuration you should be able to restart the network, although a reboot is recommended to see if everything works properly. - Make sure you have an alternative way like IPMI or ILO to reach the machine in case you made a configuration error and the network stops functioning! -
-
-
diff --git a/docs/en-US/hypervisor-host-install-overview.xml b/docs/en-US/hypervisor-host-install-overview.xml deleted file mode 100644 index 716b43ddf91..00000000000 --- a/docs/en-US/hypervisor-host-install-overview.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- KVM Installation Overview - If you want to use the Linux Kernel Virtual Machine (KVM) hypervisor to run guest virtual machines, install KVM on the host(s) in your cloud. The material in this section doesn't duplicate KVM installation docs. It provides the &PRODUCT;-specific steps that are needed to prepare a KVM host to work with &PRODUCT;. - Before continuing, make sure that you have applied the latest updates to your host. - It is NOT recommended to run services on this host not controlled by &PRODUCT;. - The procedure for installing a KVM Hypervisor Host is: - - Prepare the Operating System - Install and configure libvirt - Configure Security Policies (AppArmor and SELinux) - Install and configure the Agent - -
\ No newline at end of file diff --git a/docs/en-US/hypervisor-host-install-prepare-os.xml b/docs/en-US/hypervisor-host-install-prepare-os.xml deleted file mode 100644 index 44852f21c2d..00000000000 --- a/docs/en-US/hypervisor-host-install-prepare-os.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Prepare the Operating System - The OS of the Host must be prepared to host the &PRODUCT; Agent and run KVM instances. - - Log in to your OS as root. - - Check for a fully qualified hostname. - $ hostname --fqdn - This should return a fully qualified hostname such as "kvm1.lab.example.org". If it does not, edit /etc/hosts so that it does. - - - Make sure that the machine can reach the Internet. - $ ping www.cloudstack.org - - - Turn on NTP for time synchronization. - NTP is required to synchronize the clocks of the servers in your cloud. Unsynchronized clocks can cause unexpected problems. - - Install NTP - On RHEL or CentOS: - $ yum install ntp - On Ubuntu: - $ apt-get install openntpd - - - - Repeat all of these steps on every hypervisor host. - -
\ No newline at end of file diff --git a/docs/en-US/hypervisor-host-install-security-policies.xml b/docs/en-US/hypervisor-host-install-security-policies.xml deleted file mode 100644 index 03da04b6eb3..00000000000 --- a/docs/en-US/hypervisor-host-install-security-policies.xml +++ /dev/null @@ -1,70 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configure the Security Policies - &PRODUCT; does various things which can be blocked by security mechanisms like AppArmor and SELinux. These have to be disabled to ensure the Agent has all the required permissions. - - - Configure SELinux (RHEL and CentOS) - - - Check to see whether SELinux is installed on your machine. If not, you can skip this section. - In RHEL or CentOS, SELinux is installed and enabled by default. You can verify this with: - $ rpm -qa | grep selinux - - - Set the SELINUX variable in /etc/selinux/config to "permissive". This ensures that the permissive setting will be maintained after a system reboot. - In RHEL or CentOS: - vi /etc/selinux/config - Change the following line - SELINUX=enforcing - to this - SELINUX=permissive - - - Then set SELinux to permissive starting immediately, without requiring a system reboot. - $ setenforce permissive - - - - - Configure Apparmor (Ubuntu) - - - Check to see whether AppArmor is installed on your machine. If not, you can skip this section. - In Ubuntu AppArmor is installed and enabled by default. You can verify this with: - $ dpkg --list 'apparmor' - - - Disable the AppArmor profiles for libvirt - $ ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/ - $ ln -s /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper /etc/apparmor.d/disable/ - $ apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd - $ apparmor_parser -R /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper - - - - -
\ No newline at end of file diff --git a/docs/en-US/hypervisor-installation.xml b/docs/en-US/hypervisor-installation.xml deleted file mode 100644 index 5ee7dea696a..00000000000 --- a/docs/en-US/hypervisor-installation.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Hypervisor Installation - - - - - - diff --git a/docs/en-US/hypervisor-kvm-install-flow.xml b/docs/en-US/hypervisor-kvm-install-flow.xml deleted file mode 100644 index aa19e47be77..00000000000 --- a/docs/en-US/hypervisor-kvm-install-flow.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- KVM Hypervisor Host Installation - - - - - - - - - - - -
diff --git a/docs/en-US/hypervisor-kvm-requirements.xml b/docs/en-US/hypervisor-kvm-requirements.xml deleted file mode 100644 index cdfc808e490..00000000000 --- a/docs/en-US/hypervisor-kvm-requirements.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- System Requirements for KVM Hypervisor Hosts - KVM is included with a variety of Linux-based operating systems. Although you are not required to run these distributions, the following are recommended: - - CentOS / RHEL: 6.3 - Ubuntu: 12.04(.1) - - The main requirement for KVM hypervisors is the libvirt and Qemu version. No matter what - Linux distribution you are using, make sure the following requirements are met: - - libvirt: 0.9.4 or higher - Qemu/KVM: 1.0 or higher - - The default bridge in &PRODUCT; is the Linux native bridge implementation (bridge module). &PRODUCT; includes an option to work with OpenVswitch, the requirements are listed below - - libvirt: 0.9.11 or higher - openvswitch: 1.7.1 or higher - - In addition, the following hardware requirements apply: - - Within a single cluster, the hosts must be of the same distribution version. - All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags. - Must support HVM (Intel-VT or AMD-V enabled) - 64-bit x86 CPU (more cores results in better performance) - 4 GB of memory - At least 1 NIC - When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running - -
diff --git a/docs/en-US/hypervisor-support-for-primarystorage.xml b/docs/en-US/hypervisor-support-for-primarystorage.xml deleted file mode 100644 index fdef1f2b6e0..00000000000 --- a/docs/en-US/hypervisor-support-for-primarystorage.xml +++ /dev/null @@ -1,104 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Hypervisor Support for Primary Storage - The following table shows storage options and parameters for different hypervisors. - - - - - - - - - - - VMware vSphere - Citrix XenServer - KVM - - - - - Format for Disks, Templates, and - Snapshots - VMDK - VHD - QCOW2 - - - iSCSI support - VMFS - Clustered LVM - Yes, via Shared Mountpoint - - - Fiber Channel support - VMFS - Yes, via Existing SR - Yes, via Shared Mountpoint - - - NFS support - Y - Y - Y - - - Local storage support - Y - Y - Y - - - Storage over-provisioning - NFS and iSCSI - NFS - NFS - - - - - XenServer uses a clustered LVM system to store VM images on iSCSI and Fiber Channel volumes - and does not support over-provisioning in the hypervisor. The storage server itself, however, - can support thin-provisioning. As a result the &PRODUCT; can still support storage - over-provisioning by running on thin-provisioned storage volumes. - KVM supports "Shared Mountpoint" storage. A shared mountpoint is a file system path local to - each server in a given cluster. The path must be the same across all Hosts in the cluster, for - example /mnt/primary1. This shared mountpoint is assumed to be a clustered filesystem such as - OCFS2. In this case the &PRODUCT; does not attempt to mount or unmount the storage as is done - with NFS. The &PRODUCT; requires that the administrator insure that the storage is - available - - With NFS storage, &PRODUCT; manages the overprovisioning. In this case the global - configuration parameter storage.overprovisioning.factor controls the degree of overprovisioning. - This is independent of hypervisor type. - Local storage is an option for primary storage for vSphere, XenServer, and KVM. When the - local disk option is enabled, a local disk storage pool is automatically created on each host. - To use local storage for the System Virtual Machines (such as the Virtual Router), set - system.vm.use.local.storage to true in global configuration. - &PRODUCT; supports multiple primary storage pools in a Cluster. For example, you could - provision 2 NFS servers in primary storage. Or you could provision 1 iSCSI LUN initially and - then add a second iSCSI LUN when the first approaches capacity. -
diff --git a/docs/en-US/images/1000-foot-view.png b/docs/en-US/images/1000-foot-view.png deleted file mode 100644 index 2fe3c1658b3..00000000000 Binary files a/docs/en-US/images/1000-foot-view.png and /dev/null differ diff --git a/docs/en-US/images/DevCloud-hostonly.png b/docs/en-US/images/DevCloud-hostonly.png deleted file mode 100644 index 111f93ac700..00000000000 Binary files a/docs/en-US/images/DevCloud-hostonly.png and /dev/null differ diff --git a/docs/en-US/images/DevCloud.png b/docs/en-US/images/DevCloud.png deleted file mode 100644 index 5e83ca946c7..00000000000 Binary files a/docs/en-US/images/DevCloud.png and /dev/null differ diff --git a/docs/en-US/images/VMSnapshotButton.png b/docs/en-US/images/VMSnapshotButton.png deleted file mode 100644 index 52177402198..00000000000 Binary files a/docs/en-US/images/VMSnapshotButton.png and /dev/null differ diff --git a/docs/en-US/images/Workloads.png b/docs/en-US/images/Workloads.png deleted file mode 100644 index 9282f57b344..00000000000 Binary files a/docs/en-US/images/Workloads.png and /dev/null differ diff --git a/docs/en-US/images/add-account-screen.png b/docs/en-US/images/add-account-screen.png deleted file mode 100644 index aaa798f6766..00000000000 Binary files a/docs/en-US/images/add-account-screen.png and /dev/null differ diff --git a/docs/en-US/images/add-cluster.png b/docs/en-US/images/add-cluster.png deleted file mode 100644 index 26ae3fd298e..00000000000 Binary files a/docs/en-US/images/add-cluster.png and /dev/null differ diff --git a/docs/en-US/images/add-gateway.png b/docs/en-US/images/add-gateway.png deleted file mode 100644 index da8eed955f5..00000000000 Binary files a/docs/en-US/images/add-gateway.png and /dev/null differ diff --git a/docs/en-US/images/add-gslb.png b/docs/en-US/images/add-gslb.png deleted file mode 100644 index 827a913093b..00000000000 Binary files a/docs/en-US/images/add-gslb.png and /dev/null differ diff --git a/docs/en-US/images/add-guest-network.png b/docs/en-US/images/add-guest-network.png deleted file mode 100644 index b22181e3b22..00000000000 Binary files a/docs/en-US/images/add-guest-network.png and /dev/null differ diff --git a/docs/en-US/images/add-ip-range.png b/docs/en-US/images/add-ip-range.png deleted file mode 100644 index 9f4d9d48ef9..00000000000 Binary files a/docs/en-US/images/add-ip-range.png and /dev/null differ diff --git a/docs/en-US/images/add-ldap-configuration-ad.png b/docs/en-US/images/add-ldap-configuration-ad.png deleted file mode 100644 index d4d3e789b29..00000000000 Binary files a/docs/en-US/images/add-ldap-configuration-ad.png and /dev/null differ diff --git a/docs/en-US/images/add-ldap-configuration-failure.png b/docs/en-US/images/add-ldap-configuration-failure.png deleted file mode 100644 index 312a1d6d61b..00000000000 Binary files a/docs/en-US/images/add-ldap-configuration-failure.png and /dev/null differ diff --git a/docs/en-US/images/add-ldap-configuration-openldap.png b/docs/en-US/images/add-ldap-configuration-openldap.png deleted file mode 100644 index 70ce579f87c..00000000000 Binary files a/docs/en-US/images/add-ldap-configuration-openldap.png and /dev/null differ diff --git a/docs/en-US/images/add-ldap-configuration.png b/docs/en-US/images/add-ldap-configuration.png deleted file mode 100644 index e43cbafb81c..00000000000 Binary files a/docs/en-US/images/add-ldap-configuration.png and /dev/null differ diff --git a/docs/en-US/images/add-new-gateway-vpc.png b/docs/en-US/images/add-new-gateway-vpc.png deleted file mode 100644 index 5145622a2f4..00000000000 Binary files a/docs/en-US/images/add-new-gateway-vpc.png and /dev/null differ diff --git a/docs/en-US/images/add-tier.png b/docs/en-US/images/add-tier.png deleted file mode 100644 index 0994dbd0a5a..00000000000 Binary files a/docs/en-US/images/add-tier.png and /dev/null differ diff --git a/docs/en-US/images/add-vlan-icon.png b/docs/en-US/images/add-vlan-icon.png deleted file mode 100644 index 04655dc37ad..00000000000 Binary files a/docs/en-US/images/add-vlan-icon.png and /dev/null differ diff --git a/docs/en-US/images/add-vm-vpc.png b/docs/en-US/images/add-vm-vpc.png deleted file mode 100644 index b2821a69156..00000000000 Binary files a/docs/en-US/images/add-vm-vpc.png and /dev/null differ diff --git a/docs/en-US/images/add-vpc.png b/docs/en-US/images/add-vpc.png deleted file mode 100644 index f3348623416..00000000000 Binary files a/docs/en-US/images/add-vpc.png and /dev/null differ diff --git a/docs/en-US/images/add-vpn-customer-gateway.png b/docs/en-US/images/add-vpn-customer-gateway.png deleted file mode 100644 index fdc3177e9eb..00000000000 Binary files a/docs/en-US/images/add-vpn-customer-gateway.png and /dev/null differ diff --git a/docs/en-US/images/addAccount-icon.png b/docs/en-US/images/addAccount-icon.png deleted file mode 100644 index 4743dbef2cf..00000000000 Binary files a/docs/en-US/images/addAccount-icon.png and /dev/null differ diff --git a/docs/en-US/images/addvm-tier-sharednw.png b/docs/en-US/images/addvm-tier-sharednw.png deleted file mode 100644 index e60205f7219..00000000000 Binary files a/docs/en-US/images/addvm-tier-sharednw.png and /dev/null differ diff --git a/docs/en-US/images/async-calls.png b/docs/en-US/images/async-calls.png deleted file mode 100644 index e24eee79beb..00000000000 Binary files a/docs/en-US/images/async-calls.png and /dev/null differ diff --git a/docs/en-US/images/attach-disk-icon.png b/docs/en-US/images/attach-disk-icon.png deleted file mode 100644 index 5e81d04fda2..00000000000 Binary files a/docs/en-US/images/attach-disk-icon.png and /dev/null differ diff --git a/docs/en-US/images/autoscale-config.png b/docs/en-US/images/autoscale-config.png deleted file mode 100644 index 735ae961f81..00000000000 Binary files a/docs/en-US/images/autoscale-config.png and /dev/null differ diff --git a/docs/en-US/images/basic-deployment.png b/docs/en-US/images/basic-deployment.png deleted file mode 100644 index 894a05327bf..00000000000 Binary files a/docs/en-US/images/basic-deployment.png and /dev/null differ diff --git a/docs/en-US/images/change-admin-password.png b/docs/en-US/images/change-admin-password.png deleted file mode 100644 index 938e8616a35..00000000000 Binary files a/docs/en-US/images/change-admin-password.png and /dev/null differ diff --git a/docs/en-US/images/change-affinity-button.png b/docs/en-US/images/change-affinity-button.png deleted file mode 100644 index c21ef758dc2..00000000000 Binary files a/docs/en-US/images/change-affinity-button.png and /dev/null differ diff --git a/docs/en-US/images/change-password.png b/docs/en-US/images/change-password.png deleted file mode 100644 index fbb203a5e25..00000000000 Binary files a/docs/en-US/images/change-password.png and /dev/null differ diff --git a/docs/en-US/images/change-service-icon.png b/docs/en-US/images/change-service-icon.png deleted file mode 100644 index 780e235f2f5..00000000000 Binary files a/docs/en-US/images/change-service-icon.png and /dev/null differ diff --git a/docs/en-US/images/cluster-overview.png b/docs/en-US/images/cluster-overview.png deleted file mode 100644 index 18a86c39afe..00000000000 Binary files a/docs/en-US/images/cluster-overview.png and /dev/null differ diff --git a/docs/en-US/images/clusterDefinition.png b/docs/en-US/images/clusterDefinition.png deleted file mode 100644 index 6170f9fb6ae..00000000000 Binary files a/docs/en-US/images/clusterDefinition.png and /dev/null differ diff --git a/docs/en-US/images/compute-service-offerings.png b/docs/en-US/images/compute-service-offerings.png deleted file mode 100644 index 88eb6f80597..00000000000 Binary files a/docs/en-US/images/compute-service-offerings.png and /dev/null differ diff --git a/docs/en-US/images/configuration-edit-success.png b/docs/en-US/images/configuration-edit-success.png deleted file mode 100644 index 2e21dc129a4..00000000000 Binary files a/docs/en-US/images/configuration-edit-success.png and /dev/null differ diff --git a/docs/en-US/images/configurations-screen.png b/docs/en-US/images/configurations-screen.png deleted file mode 100644 index 54586086c4c..00000000000 Binary files a/docs/en-US/images/configurations-screen.png and /dev/null differ diff --git a/docs/en-US/images/console-icon.png b/docs/en-US/images/console-icon.png deleted file mode 100644 index bf288869745..00000000000 Binary files a/docs/en-US/images/console-icon.png and /dev/null differ diff --git a/docs/en-US/images/create-account-post.png b/docs/en-US/images/create-account-post.png deleted file mode 100644 index ea5ce3feb7d..00000000000 Binary files a/docs/en-US/images/create-account-post.png and /dev/null differ diff --git a/docs/en-US/images/create-account-request.png b/docs/en-US/images/create-account-request.png deleted file mode 100644 index b36d1ff557a..00000000000 Binary files a/docs/en-US/images/create-account-request.png and /dev/null differ diff --git a/docs/en-US/images/create-vpn-connection.png b/docs/en-US/images/create-vpn-connection.png deleted file mode 100644 index cd5515f53c7..00000000000 Binary files a/docs/en-US/images/create-vpn-connection.png and /dev/null differ diff --git a/docs/en-US/images/dedicate-resource-button.png b/docs/en-US/images/dedicate-resource-button.png deleted file mode 100644 index 0ac38e00eca..00000000000 Binary files a/docs/en-US/images/dedicate-resource-button.png and /dev/null differ diff --git a/docs/en-US/images/del-tier.png b/docs/en-US/images/del-tier.png deleted file mode 100644 index aa9846cfd9b..00000000000 Binary files a/docs/en-US/images/del-tier.png and /dev/null differ diff --git a/docs/en-US/images/delete-button.png b/docs/en-US/images/delete-button.png deleted file mode 100644 index 27145cebbc7..00000000000 Binary files a/docs/en-US/images/delete-button.png and /dev/null differ diff --git a/docs/en-US/images/delete-ldap-configuration-failure.png b/docs/en-US/images/delete-ldap-configuration-failure.png deleted file mode 100644 index 2b7bfe525cf..00000000000 Binary files a/docs/en-US/images/delete-ldap-configuration-failure.png and /dev/null differ diff --git a/docs/en-US/images/delete-ldap-configuration.png b/docs/en-US/images/delete-ldap-configuration.png deleted file mode 100644 index c2f6c4695fb..00000000000 Binary files a/docs/en-US/images/delete-ldap-configuration.png and /dev/null differ diff --git a/docs/en-US/images/delete-ldap.png b/docs/en-US/images/delete-ldap.png deleted file mode 100644 index c97bb4c47c3..00000000000 Binary files a/docs/en-US/images/delete-ldap.png and /dev/null differ diff --git a/docs/en-US/images/destroy-instance.png b/docs/en-US/images/destroy-instance.png deleted file mode 100644 index aa9846cfd9b..00000000000 Binary files a/docs/en-US/images/destroy-instance.png and /dev/null differ diff --git a/docs/en-US/images/detach-disk-icon.png b/docs/en-US/images/detach-disk-icon.png deleted file mode 100644 index 536a4f8d001..00000000000 Binary files a/docs/en-US/images/detach-disk-icon.png and /dev/null differ diff --git a/docs/en-US/images/dvswitch-config.png b/docs/en-US/images/dvswitch-config.png deleted file mode 100644 index edce6e8b90e..00000000000 Binary files a/docs/en-US/images/dvswitch-config.png and /dev/null differ diff --git a/docs/en-US/images/dvswitchconfig.png b/docs/en-US/images/dvswitchconfig.png deleted file mode 100644 index 55b1ef7daf3..00000000000 Binary files a/docs/en-US/images/dvswitchconfig.png and /dev/null differ diff --git a/docs/en-US/images/ec2-s3-configuration.png b/docs/en-US/images/ec2-s3-configuration.png deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/docs/en-US/images/edit-configuration.png b/docs/en-US/images/edit-configuration.png deleted file mode 100644 index 43874bf46e3..00000000000 Binary files a/docs/en-US/images/edit-configuration.png and /dev/null differ diff --git a/docs/en-US/images/edit-icon.png b/docs/en-US/images/edit-icon.png deleted file mode 100644 index 42417e278d3..00000000000 Binary files a/docs/en-US/images/edit-icon.png and /dev/null differ diff --git a/docs/en-US/images/edit-traffic-type.png b/docs/en-US/images/edit-traffic-type.png deleted file mode 100644 index 16cda947fdb..00000000000 Binary files a/docs/en-US/images/edit-traffic-type.png and /dev/null differ diff --git a/docs/en-US/images/egress-firewall-rule.png b/docs/en-US/images/egress-firewall-rule.png deleted file mode 100644 index fa1d8ecd0bd..00000000000 Binary files a/docs/en-US/images/egress-firewall-rule.png and /dev/null differ diff --git a/docs/en-US/images/eip-ns-basiczone.png b/docs/en-US/images/eip-ns-basiczone.png deleted file mode 100644 index bc88570531a..00000000000 Binary files a/docs/en-US/images/eip-ns-basiczone.png and /dev/null differ diff --git a/docs/en-US/images/enable-disable-autoscale.png b/docs/en-US/images/enable-disable-autoscale.png deleted file mode 100644 index ee02ef21c69..00000000000 Binary files a/docs/en-US/images/enable-disable-autoscale.png and /dev/null differ diff --git a/docs/en-US/images/enable-disable.png b/docs/en-US/images/enable-disable.png deleted file mode 100644 index cab31ae3d59..00000000000 Binary files a/docs/en-US/images/enable-disable.png and /dev/null differ diff --git a/docs/en-US/images/gslb.png b/docs/en-US/images/gslb.png deleted file mode 100644 index f0a04db45e1..00000000000 Binary files a/docs/en-US/images/gslb.png and /dev/null differ diff --git a/docs/en-US/images/guest-traffic-setup.png b/docs/en-US/images/guest-traffic-setup.png deleted file mode 100644 index 52508194ac1..00000000000 Binary files a/docs/en-US/images/guest-traffic-setup.png and /dev/null differ diff --git a/docs/en-US/images/http-access.png b/docs/en-US/images/http-access.png deleted file mode 100644 index 817f197985a..00000000000 Binary files a/docs/en-US/images/http-access.png and /dev/null differ diff --git a/docs/en-US/images/icon.svg b/docs/en-US/images/icon.svg deleted file mode 100644 index 37f94c06c1b..00000000000 --- a/docs/en-US/images/icon.svg +++ /dev/null @@ -1,37 +0,0 @@ - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/images/infrastructure-overview.png b/docs/en-US/images/infrastructure-overview.png deleted file mode 100644 index 24aeecfcd1e..00000000000 Binary files a/docs/en-US/images/infrastructure-overview.png and /dev/null differ diff --git a/docs/en-US/images/installation-complete.png b/docs/en-US/images/installation-complete.png deleted file mode 100644 index 4626f86d133..00000000000 Binary files a/docs/en-US/images/installation-complete.png and /dev/null differ diff --git a/docs/en-US/images/instances-screen.png b/docs/en-US/images/instances-screen.png deleted file mode 100644 index 74a1f08e43d..00000000000 Binary files a/docs/en-US/images/instances-screen.png and /dev/null differ diff --git a/docs/en-US/images/iso-icon.png b/docs/en-US/images/iso-icon.png deleted file mode 100644 index 8d547fb397e..00000000000 Binary files a/docs/en-US/images/iso-icon.png and /dev/null differ diff --git a/docs/en-US/images/jenkins-pipeline.png b/docs/en-US/images/jenkins-pipeline.png deleted file mode 100644 index 0788c26a485..00000000000 Binary files a/docs/en-US/images/jenkins-pipeline.png and /dev/null differ diff --git a/docs/en-US/images/l3_services.png b/docs/en-US/images/l3_services.png deleted file mode 100644 index f68aaf33745..00000000000 Binary files a/docs/en-US/images/l3_services.png and /dev/null differ diff --git a/docs/en-US/images/large-scale-redundant-setup.png b/docs/en-US/images/large-scale-redundant-setup.png deleted file mode 100644 index 5d2581afb43..00000000000 Binary files a/docs/en-US/images/large-scale-redundant-setup.png and /dev/null differ diff --git a/docs/en-US/images/launchHadoopClusterApi.png b/docs/en-US/images/launchHadoopClusterApi.png deleted file mode 100644 index 6f94c744d02..00000000000 Binary files a/docs/en-US/images/launchHadoopClusterApi.png and /dev/null differ diff --git a/docs/en-US/images/launchHadoopClusterCmd.png b/docs/en-US/images/launchHadoopClusterCmd.png deleted file mode 100644 index 66a0c75ed64..00000000000 Binary files a/docs/en-US/images/launchHadoopClusterCmd.png and /dev/null differ diff --git a/docs/en-US/images/ldap-account-addition.png b/docs/en-US/images/ldap-account-addition.png deleted file mode 100644 index 0c8573ff9c9..00000000000 Binary files a/docs/en-US/images/ldap-account-addition.png and /dev/null differ diff --git a/docs/en-US/images/ldap-configuration.png b/docs/en-US/images/ldap-configuration.png deleted file mode 100644 index c840e597e1b..00000000000 Binary files a/docs/en-US/images/ldap-configuration.png and /dev/null differ diff --git a/docs/en-US/images/ldap-global-settings.png b/docs/en-US/images/ldap-global-settings.png deleted file mode 100644 index 0567de84374..00000000000 Binary files a/docs/en-US/images/ldap-global-settings.png and /dev/null differ diff --git a/docs/en-US/images/ldap-list-users.png b/docs/en-US/images/ldap-list-users.png deleted file mode 100644 index 8dabbb88663..00000000000 Binary files a/docs/en-US/images/ldap-list-users.png and /dev/null differ diff --git a/docs/en-US/images/list-domain-vms.png b/docs/en-US/images/list-domain-vms.png deleted file mode 100644 index 1717f559e12..00000000000 Binary files a/docs/en-US/images/list-domain-vms.png and /dev/null differ diff --git a/docs/en-US/images/list-ldap-configuration.png b/docs/en-US/images/list-ldap-configuration.png deleted file mode 100644 index 6bf778893dc..00000000000 Binary files a/docs/en-US/images/list-ldap-configuration.png and /dev/null differ diff --git a/docs/en-US/images/list-specific-vm.png b/docs/en-US/images/list-specific-vm.png deleted file mode 100644 index 4fa1da451d5..00000000000 Binary files a/docs/en-US/images/list-specific-vm.png and /dev/null differ diff --git a/docs/en-US/images/list-virtualmachines.png b/docs/en-US/images/list-virtualmachines.png deleted file mode 100644 index cd9401eed5a..00000000000 Binary files a/docs/en-US/images/list-virtualmachines.png and /dev/null differ diff --git a/docs/en-US/images/mesos-integration-arch.jpg b/docs/en-US/images/mesos-integration-arch.jpg deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/docs/en-US/images/migrate-instance.png b/docs/en-US/images/migrate-instance.png deleted file mode 100644 index 25ff57245b3..00000000000 Binary files a/docs/en-US/images/migrate-instance.png and /dev/null differ diff --git a/docs/en-US/images/multi-node-management-server.png b/docs/en-US/images/multi-node-management-server.png deleted file mode 100644 index 5cf5ed5456f..00000000000 Binary files a/docs/en-US/images/multi-node-management-server.png and /dev/null differ diff --git a/docs/en-US/images/multi-site-deployment.png b/docs/en-US/images/multi-site-deployment.png deleted file mode 100644 index f3ae5bb6b5c..00000000000 Binary files a/docs/en-US/images/multi-site-deployment.png and /dev/null differ diff --git a/docs/en-US/images/multi-tier-app.png b/docs/en-US/images/multi-tier-app.png deleted file mode 100644 index cec11228e26..00000000000 Binary files a/docs/en-US/images/multi-tier-app.png and /dev/null differ diff --git a/docs/en-US/images/network-acl.png b/docs/en-US/images/network-acl.png deleted file mode 100644 index 5602827f415..00000000000 Binary files a/docs/en-US/images/network-acl.png and /dev/null differ diff --git a/docs/en-US/images/network-setup-zone.png b/docs/en-US/images/network-setup-zone.png deleted file mode 100644 index 8324ff8beaa..00000000000 Binary files a/docs/en-US/images/network-setup-zone.png and /dev/null differ diff --git a/docs/en-US/images/network-singlepod.png b/docs/en-US/images/network-singlepod.png deleted file mode 100644 index e1214ea7f69..00000000000 Binary files a/docs/en-US/images/network-singlepod.png and /dev/null differ diff --git a/docs/en-US/images/network_service.png b/docs/en-US/images/network_service.png deleted file mode 100644 index 95281aa2daa..00000000000 Binary files a/docs/en-US/images/network_service.png and /dev/null differ diff --git a/docs/en-US/images/networking-in-a-pod.png b/docs/en-US/images/networking-in-a-pod.png deleted file mode 100644 index bf731712042..00000000000 Binary files a/docs/en-US/images/networking-in-a-pod.png and /dev/null differ diff --git a/docs/en-US/images/networking-in-a-zone.png b/docs/en-US/images/networking-in-a-zone.png deleted file mode 100644 index fb740da448e..00000000000 Binary files a/docs/en-US/images/networking-in-a-zone.png and /dev/null differ diff --git a/docs/en-US/images/nic-bonding-and-multipath-io.png b/docs/en-US/images/nic-bonding-and-multipath-io.png deleted file mode 100644 index 0fe60b66ed6..00000000000 Binary files a/docs/en-US/images/nic-bonding-and-multipath-io.png and /dev/null differ diff --git a/docs/en-US/images/nvp-add-controller.png b/docs/en-US/images/nvp-add-controller.png deleted file mode 100644 index e02d31f0a37..00000000000 Binary files a/docs/en-US/images/nvp-add-controller.png and /dev/null differ diff --git a/docs/en-US/images/nvp-enable-provider.png b/docs/en-US/images/nvp-enable-provider.png deleted file mode 100644 index 0f2d02ddfa9..00000000000 Binary files a/docs/en-US/images/nvp-enable-provider.png and /dev/null differ diff --git a/docs/en-US/images/nvp-network-offering.png b/docs/en-US/images/nvp-network-offering.png deleted file mode 100644 index c2d25c48c19..00000000000 Binary files a/docs/en-US/images/nvp-network-offering.png and /dev/null differ diff --git a/docs/en-US/images/nvp-physical-network-stt.png b/docs/en-US/images/nvp-physical-network-stt.png deleted file mode 100644 index 2ce7853ac54..00000000000 Binary files a/docs/en-US/images/nvp-physical-network-stt.png and /dev/null differ diff --git a/docs/en-US/images/nvp-vpc-offering-edit.png b/docs/en-US/images/nvp-vpc-offering-edit.png deleted file mode 100644 index ff235e24cd6..00000000000 Binary files a/docs/en-US/images/nvp-vpc-offering-edit.png and /dev/null differ diff --git a/docs/en-US/images/odl_structure.jpg b/docs/en-US/images/odl_structure.jpg deleted file mode 100644 index 08e0012f56b..00000000000 Binary files a/docs/en-US/images/odl_structure.jpg and /dev/null differ diff --git a/docs/en-US/images/parallel-mode.png b/docs/en-US/images/parallel-mode.png deleted file mode 100644 index 3b67a17af9d..00000000000 Binary files a/docs/en-US/images/parallel-mode.png and /dev/null differ diff --git a/docs/en-US/images/plugin1.jpg b/docs/en-US/images/plugin1.jpg deleted file mode 100644 index 970233d8475..00000000000 Binary files a/docs/en-US/images/plugin1.jpg and /dev/null differ diff --git a/docs/en-US/images/plugin2.jpg b/docs/en-US/images/plugin2.jpg deleted file mode 100644 index 9c8a6107ba9..00000000000 Binary files a/docs/en-US/images/plugin2.jpg and /dev/null differ diff --git a/docs/en-US/images/plugin3.jpg b/docs/en-US/images/plugin3.jpg deleted file mode 100644 index 07fae790e22..00000000000 Binary files a/docs/en-US/images/plugin3.jpg and /dev/null differ diff --git a/docs/en-US/images/plugin4.jpg b/docs/en-US/images/plugin4.jpg deleted file mode 100644 index 2bcec9f773a..00000000000 Binary files a/docs/en-US/images/plugin4.jpg and /dev/null differ diff --git a/docs/en-US/images/plugin_intro.jpg b/docs/en-US/images/plugin_intro.jpg deleted file mode 100644 index 113ffb32781..00000000000 Binary files a/docs/en-US/images/plugin_intro.jpg and /dev/null differ diff --git a/docs/en-US/images/pod-overview.png b/docs/en-US/images/pod-overview.png deleted file mode 100644 index c180060ba48..00000000000 Binary files a/docs/en-US/images/pod-overview.png and /dev/null differ diff --git a/docs/en-US/images/provisioning-overview.png b/docs/en-US/images/provisioning-overview.png deleted file mode 100644 index 25cc97e3557..00000000000 Binary files a/docs/en-US/images/provisioning-overview.png and /dev/null differ diff --git a/docs/en-US/images/region-overview.png b/docs/en-US/images/region-overview.png deleted file mode 100644 index 528445c9d89..00000000000 Binary files a/docs/en-US/images/region-overview.png and /dev/null differ diff --git a/docs/en-US/images/release-ip-icon.png b/docs/en-US/images/release-ip-icon.png deleted file mode 100644 index aa9846cfd9b..00000000000 Binary files a/docs/en-US/images/release-ip-icon.png and /dev/null differ diff --git a/docs/en-US/images/remove-nic.png b/docs/en-US/images/remove-nic.png deleted file mode 100644 index 27145cebbc7..00000000000 Binary files a/docs/en-US/images/remove-nic.png and /dev/null differ diff --git a/docs/en-US/images/remove-tier.png b/docs/en-US/images/remove-tier.png deleted file mode 100644 index e14d08f8052..00000000000 Binary files a/docs/en-US/images/remove-tier.png and /dev/null differ diff --git a/docs/en-US/images/remove-vpc.png b/docs/en-US/images/remove-vpc.png deleted file mode 100644 index aa9846cfd9b..00000000000 Binary files a/docs/en-US/images/remove-vpc.png and /dev/null differ diff --git a/docs/en-US/images/remove-vpn.png b/docs/en-US/images/remove-vpn.png deleted file mode 100644 index 27145cebbc7..00000000000 Binary files a/docs/en-US/images/remove-vpn.png and /dev/null differ diff --git a/docs/en-US/images/replace-acl-icon.png b/docs/en-US/images/replace-acl-icon.png deleted file mode 100644 index ae953ba2032..00000000000 Binary files a/docs/en-US/images/replace-acl-icon.png and /dev/null differ diff --git a/docs/en-US/images/replace-acl-list.png b/docs/en-US/images/replace-acl-list.png deleted file mode 100644 index 33750173b18..00000000000 Binary files a/docs/en-US/images/replace-acl-list.png and /dev/null differ diff --git a/docs/en-US/images/reset-vpn.png b/docs/en-US/images/reset-vpn.png deleted file mode 100644 index 04655dc37ad..00000000000 Binary files a/docs/en-US/images/reset-vpn.png and /dev/null differ diff --git a/docs/en-US/images/resize-volume-icon.png b/docs/en-US/images/resize-volume-icon.png deleted file mode 100644 index 48499021f06..00000000000 Binary files a/docs/en-US/images/resize-volume-icon.png and /dev/null differ diff --git a/docs/en-US/images/resize-volume.png b/docs/en-US/images/resize-volume.png deleted file mode 100644 index 6195623ab49..00000000000 Binary files a/docs/en-US/images/resize-volume.png and /dev/null differ diff --git a/docs/en-US/images/restart-vpc.png b/docs/en-US/images/restart-vpc.png deleted file mode 100644 index 04655dc37ad..00000000000 Binary files a/docs/en-US/images/restart-vpc.png and /dev/null differ diff --git a/docs/en-US/images/revert-vm.png b/docs/en-US/images/revert-vm.png deleted file mode 100644 index 04655dc37ad..00000000000 Binary files a/docs/en-US/images/revert-vm.png and /dev/null differ diff --git a/docs/en-US/images/search-button.png b/docs/en-US/images/search-button.png deleted file mode 100644 index f329aef4a25..00000000000 Binary files a/docs/en-US/images/search-button.png and /dev/null differ diff --git a/docs/en-US/images/select-vm-staticnat-vpc.png b/docs/en-US/images/select-vm-staticnat-vpc.png deleted file mode 100644 index 12fde26d883..00000000000 Binary files a/docs/en-US/images/select-vm-staticnat-vpc.png and /dev/null differ diff --git a/docs/en-US/images/separate-storage-network.png b/docs/en-US/images/separate-storage-network.png deleted file mode 100644 index 24dbbefc5b4..00000000000 Binary files a/docs/en-US/images/separate-storage-network.png and /dev/null differ diff --git a/docs/en-US/images/set-default-nic.png b/docs/en-US/images/set-default-nic.png deleted file mode 100644 index f329aef4a25..00000000000 Binary files a/docs/en-US/images/set-default-nic.png and /dev/null differ diff --git a/docs/en-US/images/small-scale-deployment.png b/docs/en-US/images/small-scale-deployment.png deleted file mode 100644 index 1c88520e7b4..00000000000 Binary files a/docs/en-US/images/small-scale-deployment.png and /dev/null differ diff --git a/docs/en-US/images/software-license.png b/docs/en-US/images/software-license.png deleted file mode 100644 index 67aa2555341..00000000000 Binary files a/docs/en-US/images/software-license.png and /dev/null differ diff --git a/docs/en-US/images/start-vm-screen.png b/docs/en-US/images/start-vm-screen.png deleted file mode 100644 index 75a604a7a0e..00000000000 Binary files a/docs/en-US/images/start-vm-screen.png and /dev/null differ diff --git a/docs/en-US/images/stop-instance-icon.png b/docs/en-US/images/stop-instance-icon.png deleted file mode 100644 index 209afce5086..00000000000 Binary files a/docs/en-US/images/stop-instance-icon.png and /dev/null differ diff --git a/docs/en-US/images/suspend-icon.png b/docs/en-US/images/suspend-icon.png deleted file mode 100644 index cab31ae3d59..00000000000 Binary files a/docs/en-US/images/suspend-icon.png and /dev/null differ diff --git a/docs/en-US/images/sysmanager.png b/docs/en-US/images/sysmanager.png deleted file mode 100644 index 5b9df347a60..00000000000 Binary files a/docs/en-US/images/sysmanager.png and /dev/null differ diff --git a/docs/en-US/images/traffic-label.png b/docs/en-US/images/traffic-label.png deleted file mode 100644 index f161c89ce19..00000000000 Binary files a/docs/en-US/images/traffic-label.png and /dev/null differ diff --git a/docs/en-US/images/traffic-type.png b/docs/en-US/images/traffic-type.png deleted file mode 100644 index 10d5ddb25ed..00000000000 Binary files a/docs/en-US/images/traffic-type.png and /dev/null differ diff --git a/docs/en-US/images/vds-name.png b/docs/en-US/images/vds-name.png deleted file mode 100644 index bf5b4fcf35c..00000000000 Binary files a/docs/en-US/images/vds-name.png and /dev/null differ diff --git a/docs/en-US/images/view-console-button.png b/docs/en-US/images/view-console-button.png deleted file mode 100644 index b321ceadefe..00000000000 Binary files a/docs/en-US/images/view-console-button.png and /dev/null differ diff --git a/docs/en-US/images/view-systemvm-details.png b/docs/en-US/images/view-systemvm-details.png deleted file mode 100755 index bce270bf258..00000000000 Binary files a/docs/en-US/images/view-systemvm-details.png and /dev/null differ diff --git a/docs/en-US/images/vm-lifecycle.png b/docs/en-US/images/vm-lifecycle.png deleted file mode 100644 index 97823fc568a..00000000000 Binary files a/docs/en-US/images/vm-lifecycle.png and /dev/null differ diff --git a/docs/en-US/images/vm-running.png b/docs/en-US/images/vm-running.png deleted file mode 100644 index e50cd16c7b2..00000000000 Binary files a/docs/en-US/images/vm-running.png and /dev/null differ diff --git a/docs/en-US/images/vmware-increase-ports.png b/docs/en-US/images/vmware-increase-ports.png deleted file mode 100644 index fe968153262..00000000000 Binary files a/docs/en-US/images/vmware-increase-ports.png and /dev/null differ diff --git a/docs/en-US/images/vmware-iscsi-datastore.png b/docs/en-US/images/vmware-iscsi-datastore.png deleted file mode 100644 index 9f6b33f01ed..00000000000 Binary files a/docs/en-US/images/vmware-iscsi-datastore.png and /dev/null differ diff --git a/docs/en-US/images/vmware-iscsi-general.png b/docs/en-US/images/vmware-iscsi-general.png deleted file mode 100644 index 863602b9eb7..00000000000 Binary files a/docs/en-US/images/vmware-iscsi-general.png and /dev/null differ diff --git a/docs/en-US/images/vmware-iscsi-initiator-properties.png b/docs/en-US/images/vmware-iscsi-initiator-properties.png deleted file mode 100644 index 1fab03143b1..00000000000 Binary files a/docs/en-US/images/vmware-iscsi-initiator-properties.png and /dev/null differ diff --git a/docs/en-US/images/vmware-iscsi-initiator.png b/docs/en-US/images/vmware-iscsi-initiator.png deleted file mode 100644 index a9a8301d74d..00000000000 Binary files a/docs/en-US/images/vmware-iscsi-initiator.png and /dev/null differ diff --git a/docs/en-US/images/vmware-iscsi-target-add.png b/docs/en-US/images/vmware-iscsi-target-add.png deleted file mode 100644 index f016da7956d..00000000000 Binary files a/docs/en-US/images/vmware-iscsi-target-add.png and /dev/null differ diff --git a/docs/en-US/images/vmware-mgt-network-properties.png b/docs/en-US/images/vmware-mgt-network-properties.png deleted file mode 100644 index 9141af9c42f..00000000000 Binary files a/docs/en-US/images/vmware-mgt-network-properties.png and /dev/null differ diff --git a/docs/en-US/images/vmware-nexus-add-cluster.png b/docs/en-US/images/vmware-nexus-add-cluster.png deleted file mode 100644 index 7c1dd73f775..00000000000 Binary files a/docs/en-US/images/vmware-nexus-add-cluster.png and /dev/null differ diff --git a/docs/en-US/images/vmware-nexus-port-profile.png b/docs/en-US/images/vmware-nexus-port-profile.png deleted file mode 100644 index 19b264f7a0a..00000000000 Binary files a/docs/en-US/images/vmware-nexus-port-profile.png and /dev/null differ diff --git a/docs/en-US/images/vmware-physical-network.png b/docs/en-US/images/vmware-physical-network.png deleted file mode 100644 index a7495c77b14..00000000000 Binary files a/docs/en-US/images/vmware-physical-network.png and /dev/null differ diff --git a/docs/en-US/images/vmware-vswitch-properties.png b/docs/en-US/images/vmware-vswitch-properties.png deleted file mode 100644 index bc247d276d6..00000000000 Binary files a/docs/en-US/images/vmware-vswitch-properties.png and /dev/null differ diff --git a/docs/en-US/images/vpc-lb.png b/docs/en-US/images/vpc-lb.png deleted file mode 100644 index 4269e8b9f9e..00000000000 Binary files a/docs/en-US/images/vpc-lb.png and /dev/null differ diff --git a/docs/en-US/images/vpc-setting.png b/docs/en-US/images/vpc-setting.png deleted file mode 100644 index 782299e9f54..00000000000 Binary files a/docs/en-US/images/vpc-setting.png and /dev/null differ diff --git a/docs/en-US/images/vpn-icon.png b/docs/en-US/images/vpn-icon.png deleted file mode 100644 index 2ac12f77c40..00000000000 Binary files a/docs/en-US/images/vpn-icon.png and /dev/null differ diff --git a/docs/en-US/images/vsphere-client.png b/docs/en-US/images/vsphere-client.png deleted file mode 100644 index 2acc8b802ad..00000000000 Binary files a/docs/en-US/images/vsphere-client.png and /dev/null differ diff --git a/docs/en-US/images/whirrDependency.png b/docs/en-US/images/whirrDependency.png deleted file mode 100644 index acdec78e5ac..00000000000 Binary files a/docs/en-US/images/whirrDependency.png and /dev/null differ diff --git a/docs/en-US/images/whirrOutput.png b/docs/en-US/images/whirrOutput.png deleted file mode 100644 index 7c3b51297e5..00000000000 Binary files a/docs/en-US/images/whirrOutput.png and /dev/null differ diff --git a/docs/en-US/images/zone-overview.png b/docs/en-US/images/zone-overview.png deleted file mode 100644 index 24aeecfcd1e..00000000000 Binary files a/docs/en-US/images/zone-overview.png and /dev/null differ diff --git a/docs/en-US/import-ami.xml b/docs/en-US/import-ami.xml deleted file mode 100644 index 16fe78a1579..00000000000 --- a/docs/en-US/import-ami.xml +++ /dev/null @@ -1,114 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Importing Amazon Machine Images - The following procedures describe how to import an Amazon Machine Image (AMI) into &PRODUCT; when using the XenServer hypervisor. - Assume you have an AMI file and this file is called CentOS_6.2_x64. Assume further that you are working on a CentOS host. If the AMI is a Fedora image, you need to be working on a Fedora host initially. - You need to have a XenServer host with a file-based storage repository (either a local ext3 SR or an NFS SR) to convert to a VHD once the image file has been customized on the Centos/Fedora host. - When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text. - - - To import an AMI: - - Set up loopback on image file:# mkdir -p /mnt/loop/centos62 -# mount -o loop CentOS_6.2_x64 /mnt/loop/centos54 - - Install the kernel-xen package into the image. This downloads the PV kernel and ramdisk to the image.# yum -c /mnt/loop/centos54/etc/yum.conf --installroot=/mnt/loop/centos62/ -y install kernel-xen - Create a grub entry in /boot/grub/grub.conf.# mkdir -p /mnt/loop/centos62/boot/grub -# touch /mnt/loop/centos62/boot/grub/grub.conf -# echo "" > /mnt/loop/centos62/boot/grub/grub.conf - - Determine the name of the PV kernel that has been installed into the image. - # cd /mnt/loop/centos62 -# ls lib/modules/ -2.6.16.33-xenU 2.6.16-xenU 2.6.18-164.15.1.el5xen 2.6.18-164.6.1.el5.centos.plus 2.6.18-xenU-ec2-v1.0 2.6.21.7-2.fc8xen 2.6.31-302-ec2 -# ls boot/initrd* -boot/initrd-2.6.18-164.6.1.el5.centos.plus.img boot/initrd-2.6.18-164.15.1.el5xen.img -# ls boot/vmlinuz* -boot/vmlinuz-2.6.18-164.15.1.el5xen boot/vmlinuz-2.6.18-164.6.1.el5.centos.plus boot/vmlinuz-2.6.18-xenU-ec2-v1.0 boot/vmlinuz-2.6.21-2952.fc8xen - - Xen kernels/ramdisk always end with "xen". For the kernel version you choose, there has to be an entry for that version under lib/modules, there has to be an initrd and vmlinuz corresponding to that. Above, the only kernel that satisfies this condition is 2.6.18-164.15.1.el5xen. - Based on your findings, create an entry in the grub.conf file. Below is an example entry.default=0 -timeout=5 -hiddenmenu -title CentOS (2.6.18-164.15.1.el5xen) - root (hd0,0) - kernel /boot/vmlinuz-2.6.18-164.15.1.el5xen ro root=/dev/xvda - initrd /boot/initrd-2.6.18-164.15.1.el5xen.img - - Edit etc/fstab, changing “sda1†to “xvda†and changing “sdb†to “xvdbâ€. - # cat etc/fstab -/dev/xvda / ext3 defaults 1 1 -/dev/xvdb /mnt ext3 defaults 0 0 -none /dev/pts devpts gid=5,mode=620 0 0 -none /proc proc defaults 0 0 -none /sys sysfs defaults 0 0 - - Enable login via the console. The default console device in a XenServer system is xvc0. Ensure that etc/inittab and etc/securetty have the following lines respectively: - # grep xvc0 etc/inittab -co:2345:respawn:/sbin/agetty xvc0 9600 vt100-nav -# grep xvc0 etc/securetty -xvc0 - - Ensure the ramdisk supports PV disk and PV network. Customize this for the kernel version you have determined above. - # chroot /mnt/loop/centos54 -# cd /boot/ -# mv initrd-2.6.18-164.15.1.el5xen.img initrd-2.6.18-164.15.1.el5xen.img.bak -# mkinitrd -f /boot/initrd-2.6.18-164.15.1.el5xen.img --with=xennet --preload=xenblk --omit-scsi-modules 2.6.18-164.15.1.el5xen - - Change the password. - # passwd -Changing password for user root. -New UNIX password: -Retype new UNIX password: -passwd: all authentication tokens updated successfully. - - Exit out of chroot.# exit - Check etc/ssh/sshd_config for lines allowing ssh login using a password. - # egrep "PermitRootLogin|PasswordAuthentication" /mnt/loop/centos54/etc/ssh/sshd_config -PermitRootLogin yes -PasswordAuthentication yes - - If you need the template to be enabled to reset passwords from the &PRODUCT; UI or API, - install the password change script into the image at this point. See - . - Unmount and delete loopback mount.# umount /mnt/loop/centos54 -# losetup -d /dev/loop0 - - Copy the image file to your XenServer host's file-based storage repository. In the example below, the Xenserver is "xenhost". This XenServer has an NFS repository whose uuid is a9c5b8c8-536b-a193-a6dc-51af3e5ff799. - # scp CentOS_6.2_x64 xenhost:/var/run/sr-mount/a9c5b8c8-536b-a193-a6dc-51af3e5ff799/ - Log in to the Xenserver and create a VDI the same size as the image. - [root@xenhost ~]# cd /var/run/sr-mount/a9c5b8c8-536b-a193-a6dc-51af3e5ff799 -[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# ls -lh CentOS_6.2_x64 --rw-r--r-- 1 root root 10G Mar 16 16:49 CentOS_6.2_x64 -[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# xe vdi-create virtual-size=10GiB sr-uuid=a9c5b8c8-536b-a193-a6dc-51af3e5ff799 type=user name-label="Centos 6.2 x86_64" -cad7317c-258b-4ef7-b207-cdf0283a7923 - - Import the image file into the VDI. This may take 10–20 minutes.[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# xe vdi-import filename=CentOS_6.2_x64 uuid=cad7317c-258b-4ef7-b207-cdf0283a7923 - Locate a the VHD file. This is the file with the VDI’s UUID as its name. Compress it and upload it to your web server. - [root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# bzip2 -c cad7317c-258b-4ef7-b207-cdf0283a7923.vhd > CentOS_6.2_x64.vhd.bz2 -[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# scp CentOS_6.2_x64.vhd.bz2 webserver:/var/www/html/templates/ - - -
diff --git a/docs/en-US/increase-management-server-max-memory.xml b/docs/en-US/increase-management-server-max-memory.xml deleted file mode 100644 index 8992ad6f16a..00000000000 --- a/docs/en-US/increase-management-server-max-memory.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Increase Management Server Maximum Memory - If the Management Server is subject to high demand, the default maximum JVM memory allocation can be insufficient. To increase the memory: - - Edit the Tomcat configuration file:/etc/cloudstack/management/tomcat6.conf - Change the command-line parameter -XmxNNNm to a higher value of N.For example, if the current value is -Xmx128m, change it to -Xmx1024m or higher. - To put the new setting into effect, restart the Management Server.# service cloudstack-management restart - - For more information about memory issues, see "FAQ: Memory" at Tomcat Wiki. -
- diff --git a/docs/en-US/incremental-snapshots-backup.xml b/docs/en-US/incremental-snapshots-backup.xml deleted file mode 100644 index ade00c90c17..00000000000 --- a/docs/en-US/incremental-snapshots-backup.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Incremental Snapshots and Backup - Snapshots are created on primary storage where a disk resides. After a snapshot is created, it is immediately backed up to secondary storage and removed from primary storage for optimal utilization of space on primary storage. - &PRODUCT; does incremental backups for some hypervisors. When incremental backups are supported, every N backup is a full backup. - - - - - - - VMware vSphere - Citrix XenServer - KVM - - - - - Support incremental backup - N - Y - N - - - - - -
diff --git a/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml b/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml deleted file mode 100644 index 332afa04ebb..00000000000 --- a/docs/en-US/initial-setup-of-external-firewalls-loadbalancers.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Initial Setup of External Firewalls and Load Balancers - When the first VM is created for a new account, &PRODUCT; programs the external firewall and load balancer to work with the VM. The following objects are created on the firewall: - - A new logical interface to connect to the account's private VLAN. The interface IP is always the first IP of the account's private subnet (e.g. 10.1.1.1). - A source NAT rule that forwards all outgoing traffic from the account's private VLAN to the public Internet, using the account's public IP address as the source address - A firewall filter counter that measures the number of bytes of outgoing traffic for the account - - The following objects are created on the load balancer: - - A new VLAN that matches the account's provisioned Zone VLAN - A self IP for the VLAN. This is always the second IP of the account's private subnet (e.g. 10.1.1.2). - -
diff --git a/docs/en-US/initialize-and-test.xml b/docs/en-US/initialize-and-test.xml deleted file mode 100644 index 2dd6e259176..00000000000 --- a/docs/en-US/initialize-and-test.xml +++ /dev/null @@ -1,77 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Initialize and Test - After everything is configured, &PRODUCT; will perform its initialization. This can take 30 minutes or more, depending on the speed of your network. When the initialization has completed successfully, the administrator's Dashboard should be displayed in the &PRODUCT; UI. - - - - Verify that the system is ready. In the left navigation bar, select Templates. Click on the CentOS 5.5 (64bit) no Gui (KVM) template. Check to be sure that the status is "Download Complete." Do not proceed to the next step until this status is displayed. - - Go to the Instances tab, and filter by My Instances. - - Click Add Instance and follow the steps in the wizard. - - - - Choose the zone you just added. - - In the template selection, choose the template to use in the VM. If this is a fresh installation, likely only the provided CentOS template is available. - - Select a service offering. Be sure that the hardware you have allows starting the selected service offering. - - In data disk offering, if desired, add another data disk. This is a second volume that will be available to but not mounted in the guest. For example, in Linux on XenServer you will see /dev/xvdb in the guest after rebooting the VM. A reboot is not required if you have a PV-enabled OS kernel in use. - - In default network, choose the primary network for the guest. In a trial installation, you would have only one option here. - Optionally give your VM a name and a group. Use any descriptive text you would like. - - Click Launch VM. Your VM will be created and started. It might take some time to download the template and complete the VM startup. You can watch the VM’s progress in the Instances screen. - - - - - - - - To use the VM, click the View Console button. - - - - - - ConsoleButton.png: button to launch a console - - - - - - For more information about using VMs, including instructions for how to allow incoming network traffic to the VM, start, stop, and delete VMs, and move a VM from one host to another, see Working With Virtual Machines in the Administrator’s Guide. - - - - - Congratulations! You have successfully completed a &PRODUCT; Installation. - - If you decide to grow your deployment, you can add more hosts, primary storage, zones, pods, and clusters. -
diff --git a/docs/en-US/install-usage-server.xml b/docs/en-US/install-usage-server.xml deleted file mode 100644 index ffd748d758e..00000000000 --- a/docs/en-US/install-usage-server.xml +++ /dev/null @@ -1,61 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Installing the Usage Server (Optional) - You can optionally install the Usage Server once the Management Server is configured properly. The Usage Server takes data from the events in the system and enables usage-based billing for accounts. - When multiple Management Servers are present, the Usage Server may be installed on any number of them. The Usage Servers will coordinate usage processing. A site that is concerned about availability should install Usage Servers on at least two Management Servers. -
- Requirements for Installing the Usage Server - - The Management Server must be running when the Usage Server is installed. - The Usage Server must be installed on the same server as a Management Server. - -
-
- Steps to Install the Usage Server - - - Run ./install.sh. - -# ./install.sh - - You should see a few messages as the installer prepares, followed by a list of choices. - - - Choose "S" to install the Usage Server. - - > S - - - - Once installed, start the Usage Server with the following command. - -# service cloudstack-usage start - - - - The Administration Guide discusses further configuration of the Usage Server. -
-
diff --git a/docs/en-US/installation-complete.xml b/docs/en-US/installation-complete.xml deleted file mode 100644 index b39040ba0cf..00000000000 --- a/docs/en-US/installation-complete.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Installation Complete! Next Steps - Congratulations! You have now installed &PRODUCT; Management Server and the database it uses to persist system data. - - - - - installation-complete.png: Finished installs with single Management Server and multiple Management Servers - - What should you do next? - - Even without adding any cloud infrastructure, you can run the UI to get a feel for what's offered and how you will interact with &PRODUCT; on an ongoing basis. See . - When you're ready, add the cloud infrastructure and try running some virtual machines on it, so you can watch how &PRODUCT; manages the infrastructure. See . - -
diff --git a/docs/en-US/installation-steps-overview.xml b/docs/en-US/installation-steps-overview.xml deleted file mode 100644 index ea00057bab3..00000000000 --- a/docs/en-US/installation-steps-overview.xml +++ /dev/null @@ -1,67 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Overview of Installation Steps - For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following: - - Choosing a Deployment Architecture - Choosing a Hypervisor: Supported Features - Network Setup - Storage Setup - Best Practices - - - - Make sure you have the required hardware ready. See - - - Install the Management Server (choose single-node or multi-node). See - - - Log in to the UI. See - - - Add a zone. Includes the first pod, cluster, and host. See - - - Add more pods (optional). See - - - Add more clusters (optional). See - - - Add more hosts (optional). See - - - Add more primary storage (optional). See - - - Add more secondary storage (optional). See - - - Try using the cloud. See - - -
diff --git a/docs/en-US/installation.xml b/docs/en-US/installation.xml deleted file mode 100644 index 5fc550edad6..00000000000 --- a/docs/en-US/installation.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Installation - - - - - - diff --git a/docs/en-US/installation_steps_overview.xml b/docs/en-US/installation_steps_overview.xml deleted file mode 100644 index 2632a4d6243..00000000000 --- a/docs/en-US/installation_steps_overview.xml +++ /dev/null @@ -1,84 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Overview of Installation Steps - For anything more than a simple trial installation, you will need - guidance for a variety of configuration choices. It is strongly - recommended that you read the following: - - Choosing a Deployment Architecture - Choosing a Hypervisor: Supported Features - Network Setup - Storage Setup - Best Practices - - - - - Prepare - - Make sure you have the required hardware ready - - - (Optional) Fill out the preparation checklists - - - Install the &PRODUCT; software - - - Install the Management Server (choose single-node or multi-node) - - - Log in to the UI - - - Provision your cloud infrastructure - - - Add a zone. Includes the first pod, cluster, and host - - - Add more pods - - - Add more clusters - - - Add more hosts - - - Add more primary storage - - - Add more secondary storage - - - Try using the cloud - - - Initialization and testing - - -
diff --git a/docs/en-US/installing-publican.xml b/docs/en-US/installing-publican.xml deleted file mode 100644 index 9f180aad375..00000000000 --- a/docs/en-US/installing-publican.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Installing Publican - &PRODUCT; documentation is built using publican. This section describes how to install publican on your own machine so that you can build the documentation guides. - - The &PRODUCT; documentation source code is located under /docs - Publican documentation itself is also very useful. - - On RHEL and RHEL derivatives, install publican with the following command: - yum install publican publican-doc - On Ubuntu, install publican with the following command: - apt-get install publican publican-doc - For other distribution refer to the publican documentation listed above. For latest versions of OSX you may have to install from source and tweak it to your own setup. - Once publican is installed, you need to setup the so-called &PRODUCT; brand defined in the docs/publican-&PRODUCT; directory. - To do so, enter the following commands: - - sudo cp -R publican-cloudstack /usr/share/publican/Common_Content/cloudstack - - If this fails or you later face errors related to the brand files, see the publican documentation. - With publican installed and the &PRODUCT; brand files in place, you should be able to build any documentation guide. - - -
diff --git a/docs/en-US/inter-vlan-routing.xml b/docs/en-US/inter-vlan-routing.xml deleted file mode 100644 index 59115deb581..00000000000 --- a/docs/en-US/inter-vlan-routing.xml +++ /dev/null @@ -1,107 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Inter-VLAN Routing (nTier Apps) - Inter-VLAN Routing (nTier Apps) is the capability to route network traffic between VLANs. - This feature enables you to build Virtual Private Clouds (VPC), an isolated segment of your - cloud, that can hold multi-tier applications. These tiers are deployed on different VLANs that - can communicate with each other. You provision VLANs to the tiers your create, and VMs can be - deployed on different tiers. The VLANs are connected to a virtual router, which facilitates - communication between the VMs. In effect, you can segment VMs by means of VLANs into different - networks that can host multi-tier applications, such as Web, Application, or Database. Such - segmentation by means of VLANs logically separate application VMs for higher security and lower - broadcasts, while remaining physically connected to the same device. - This feature is supported on XenServer, KVM, and VMware hypervisors. - The major advantages are: - - - The administrator can deploy a set of VLANs and allow users to deploy VMs on these - VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest - VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that - account. - - A VLAN allocated for an account cannot be shared between multiple accounts. - - - - The administrator can allow users create their own VPC and deploy the application. In - this scenario, the VMs that belong to the account are deployed on the VLANs allotted to that - account. - - - Both administrators and users can create multiple VPCs. The guest network NIC is plugged - to the VPC virtual router when the first VM is deployed in a tier. - - - The administrator can create the following gateways to send to or receive traffic from - the VMs: - - - VPN Gateway: For more information, see . - - - Public Gateway: The public gateway for a VPC is - added to the virtual router when the virtual router is created for VPC. The public - gateway is not exposed to the end users. You are not allowed to list it, nor allowed to - create any static routes. - - - Private Gateway: For more information, see . - - - - - Both administrators and users can create various possible destinations-gateway - combinations. However, only one gateway of each type can be used in a deployment. - For example: - - - VLANs and Public Gateway: For example, an - application is deployed in the cloud, and the Web application VMs communicate with the - Internet. - - - VLANs, VPN Gateway, and Public Gateway: For - example, an application is deployed in the cloud; the Web application VMs communicate - with the Internet; and the database VMs communicate with the on-premise devices. - - - - - The administrator can define Network Access Control List (ACL) on the virtual router to - filter the traffic among the VLANs or between the Internet and a VLAN. You can define ACL - based on CIDR, port range, protocol, type code (if ICMP protocol is selected) and - Ingress/Egress type. - - - The following figure shows the possible deployment scenarios of a Inter-VLAN setup: - - - - - - mutltier.png: a multi-tier setup. - - - To set up a multi-tier Inter-VLAN deployment, see . -
diff --git a/docs/en-US/introduction.xml b/docs/en-US/introduction.xml deleted file mode 100644 index 9aca8bdfc93..00000000000 --- a/docs/en-US/introduction.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Introduction - - - -
diff --git a/docs/en-US/ip-forwarding-firewalling.xml b/docs/en-US/ip-forwarding-firewalling.xml deleted file mode 100644 index d1beb2eb0f2..00000000000 --- a/docs/en-US/ip-forwarding-firewalling.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- IP Forwarding and Firewalling - By default, all incoming traffic to the public IP address is rejected. All outgoing traffic - from the guests is also blocked by default. - To allow outgoing traffic, follow the procedure in . - To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For - example, you can use a firewall rule to open a range of ports on the public IP address, such as - 33 through 44. Then use port forwarding rules to direct traffic from individual ports within - that range to specific ports on user VMs. For example, one port forwarding rule could route - incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP. - - - -
diff --git a/docs/en-US/ip-load-balancing.xml b/docs/en-US/ip-load-balancing.xml deleted file mode 100644 index ae569e7d969..00000000000 --- a/docs/en-US/ip-load-balancing.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- IP Load Balancing - The user may choose to associate the same public IP for multiple guests. &PRODUCT; implements a TCP-level load balancer with the following policies. - - Round-robin - Least connection - Source IP - - This is similar to port forwarding but the destination may be multiple IP addresses. -
diff --git a/docs/en-US/ip-vlan-tenant.xml b/docs/en-US/ip-vlan-tenant.xml deleted file mode 100644 index d58d49be63a..00000000000 --- a/docs/en-US/ip-vlan-tenant.xml +++ /dev/null @@ -1,212 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Reserving Public IP Addresses and VLANs for Accounts - &PRODUCT; provides you the ability to reserve a set of public IP addresses and VLANs - exclusively for an account. During zone creation, you can continue defining a set of VLANs and - multiple public IP ranges. This feature extends the functionality to enable you to dedicate a - fixed set of VLANs and guest IP addresses for a tenant. - Note that if an account has consumed all the VLANs and IPs dedicated to it, the account can - acquire two more resources from the system. &PRODUCT; provides the root admin with two - configuration parameter to modify this default behavior—use.system.public.ips and - use.system.guest.vlans. These global parameters enable the root admin to disallow an account - from acquiring public IPs and guest VLANs from the system, if the account has dedicated - resources and these dedicated resources have all been consumed. Both these configurations are - configurable at the account level. - This feature provides you the following capabilities: - - - Reserve a VLAN range and public IP address range from an Advanced zone and assign it to - an account - - - Disassociate a VLAN and public IP address range from an account - - - View the number of public IP addresses allocated to an account - - - Check whether the required range is available and is conforms to account limits. - The maximum IPs per account limit cannot be superseded. - - -
- Dedicating IP Address Ranges to an Account - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View All. - - - Choose the zone you want to work with. - - - Click the Physical Network tab. - - - In the Public node of the diagram, click Configure. - - - Click the IP Ranges tab. - You can either assign an existing IP range to an account, or create a new IP range and - assign to an account. - - - To assign an existing IP range to an account, perform the following: - - - Locate the IP range you want to work with. - - - Click Add Account - - - - - addAccount-icon.png: button to assign an IP range to an account. - - button. - The Add Account dialog is displayed. - - - Specify the following: - - - Account: The account to which you want to - assign the IP address range. - - - Domain: The domain associated with the - account. - - - To create a new IP range and assign an account, perform the following: - - - Specify the following: - - - Gateway - - - Netmask - - - VLAN - - - Start IP - - - End IP - - - Account: Perform the following: - - - Click Account. - The Add Account page is displayed. - - - Specify the following: - - - Account: The account to which you want to - assign an IP address range. - - - Domain: The domain associated with the - account. - - - - - Click OK. - - - - - - - Click Add. - - - - - - -
-
- Dedicating VLAN Ranges to an Account - - - After the &PRODUCT; Management Server is installed, log in to the &PRODUCT; UI as - administrator. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View All. - - - Choose the zone you want to work with. - - - Click the Physical Network tab. - - - In the Guest node of the diagram, click Configure. - - - Select the Dedicated VLAN Ranges tab. - - - Click Dedicate VLAN Range. - The Dedicate VLAN Range dialog is displayed. - - - Specify the following: - - - VLAN Range: The - VLAN range that you want to assign to an account. - - - Account: The - account to which you want to assign the selected VLAN range. - - - Domain: The - domain associated with the account. - - - - -
-
diff --git a/docs/en-US/ipaddress-usage-record-format.xml b/docs/en-US/ipaddress-usage-record-format.xml deleted file mode 100644 index 1a0385b999e..00000000000 --- a/docs/en-US/ipaddress-usage-record-format.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- IP Address Usage Record Format - For IP address usage the following fields exist in a usage record. - - account - name of the account - accountid - ID of the account - domainid - ID of the domain in which this account resides - zoneid - Zone where the usage occurred - description - A string describing what the usage record is tracking - usage - String representation of the usage, including the units of usage - usagetype - A number representing the usage type (see Usage Types) - rawusage - A number representing the actual usage in hours - usageid - IP address ID - startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record - issourcenat - Whether source NAT is enabled for the IP address - iselastic - True if the IP address is elastic. - -
diff --git a/docs/en-US/ipv6-support.xml b/docs/en-US/ipv6-support.xml deleted file mode 100644 index bc14c8eab0e..00000000000 --- a/docs/en-US/ipv6-support.xml +++ /dev/null @@ -1,191 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- IPv6 Support in &PRODUCT; - &PRODUCT; supports Internet Protocol version 6 (IPv6), the recent version of the Internet - Protocol (IP) that defines routing the network traffic. IPv6 uses a 128-bit address that - exponentially expands the current address space that is available to the users. IPv6 addresses - consist of eight groups of four hexadecimal digits separated by colons, for example, - 5001:0dt8:83a3:1012:1000:8s2e:0870:7454. &PRODUCT; supports IPv6 for public IPs in shared - networks. With IPv6 support, VMs in shared networks can obtain both IPv4 and IPv6 addresses from - the DHCP server. You can deploy VMs either in a IPv6 or IPv4 network, or in a dual network - environment. If IPv6 network is used, the VM generates a link-local IPv6 address by itself, and - receives a stateful IPv6 address from the DHCPv6 server. - IPv6 is supported only on KVM and XenServer hypervisors. The IPv6 support is only an - experimental feature. - Here's the sequence of events when IPv6 is used: - - - The administrator creates an IPv6 shared network in an advanced zone. - - - The user deploys a VM in an IPv6 shared network. - - - The user VM generates an IPv6 link local address by itself, and gets an IPv6 global or - site local address through DHCPv6. - For information on API changes, see . - - -
- Prerequisites and Guidelines - Consider the following: - - - CIDR size must be 64 for IPv6 networks. - - - The DHCP client of the guest VMs should support generating DUID based on Link-layer - Address (DUID- LL). DUID-LL derives from the MAC address of guest VMs, and therefore the - user VM can be identified by using DUID. See Dynamic Host Configuration Protocol for IPv6 - for more information. - - - The gateway of the guest network generates Router Advisement and Response messages to - Router Solicitation. The M (Managed Address Configuration) flag of Router Advisement - should enable stateful IP address configuration. Set the M flag to where the end nodes - receive their IPv6 addresses from the DHCPv6 server as opposed to the router or - switch. - - The M flag is the 1-bit Managed Address Configuration flag for Router Advisement. - When set, Dynamic Host Configuration Protocol (DHCPv6) is available for address - configuration in addition to any IPs set by using stateless address - auto-configuration. - - - - Use the System VM template exclusively designed to support IPv6. Download the System - VM template from http://cloudstack.apt-get.eu/systemvm/. - - - The concept of Default Network applies to IPv6 networks. However, unlike IPv4 - &PRODUCT; does not control the routing information of IPv6 in shared network; the choice - of Default Network will not affect the routing in the user VM. - - - In a multiple shared network, the default route is set by the rack router, rather than - the DHCP server, which is out of &PRODUCT; control. Therefore, in order for the user VM to - get only the default route from the default NIC, modify the configuration of the user VM, - and set non-default NIC's accept_ra to 0 explicitly. The - accept_ra parameter accepts Router Advertisements and auto-configure - /proc/sys/net/ipv6/conf/interface with received data. - - -
-
- Limitations of IPv6 in &PRODUCT; - The following are not yet supported: - - - Security groups - - - Userdata and metadata - - - Passwords - - -
-
- Guest VM Configuration for DHCPv6 - For the guest VMs to get IPv6 address, run dhclient command manually on each of the VMs. - Use DUID-LL to set up dhclient. - The IPv6 address is lost when a VM is stopped and started. Therefore, use the same procedure - to get an IPv6 address when a VM is stopped and started. - - - Set up dhclient by using DUID-LL. - Perform the following for DHCP Client 4.2 and above: - - - Run the following command on the selected VM to get the dhcpv6 offer from - VR: - dhclient -6 -D LL <dev> - - - Perform the following for DHCP Client 4.1: - - - Open the following to the dhclient configuration file: - vi /etc/dhcp/dhclient.conf - - - Add the following to the dhclient configuration file: - send dhcp6.client-id = concat(00:03:00, hardware); - - - - - Get IPv6 address from DHCP server as part of the system or network restart. - Based on the operating systems, perform the following: - On CentOS 6.2: - - - Open the Ethernet interface configuration file: - vi /etc/sysconfig/network-scripts/ifcfg-eth0 - The ifcfg-eth0 file controls the first NIC in a system. - - - Make the necessary configuration changes, as given below: - DEVICE=eth0 -HWADDR=06:A0:F0:00:00:38 -NM_CONTROLLED=no -ONBOOT=yes -BOOTPROTO=dhcp6 -TYPE=Ethernet -USERCTL=no -PEERDNS=yes -IPV6INIT=yes -DHCPV6C=yes - - - Open the following: - vi /etc/sysconfig/network - - - Make the necessary configuration changes, as given below: - NETWORKING=yes -HOSTNAME=centos62mgmt.lab.vmops.com -NETWORKING_IPV6=yes -IPV6_AUTOCONF=no - - - On Ubuntu 12.10 - - - Open the following: - etc/network/interfaces: - - - Make the necessary configuration changes, as given below: - iface eth0 inet6 dhcp -autoconf 0 -accept_ra 1 - - - - -
-
diff --git a/docs/en-US/isolated-networks.xml b/docs/en-US/isolated-networks.xml deleted file mode 100644 index c8560445d2f..00000000000 --- a/docs/en-US/isolated-networks.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Isolated Networks - An isolated network can be accessed only by virtual machines of a single account. Isolated - networks have the following properties. - - - Resources such as VLAN are allocated and garbage collected dynamically - - - There is one network offering for the entire network - - - The network offering can be upgraded or downgraded but it is for the entire - network - - - For more information, see . -
diff --git a/docs/en-US/job-status.xml b/docs/en-US/job-status.xml deleted file mode 100644 index da0f76c5dff..00000000000 --- a/docs/en-US/job-status.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Job Status - The key to using an asynchronous command is the job ID that is returned immediately once the command has been executed. With the job ID, you can periodically check the job status by making calls to queryAsyncJobResult command. The command will return three possible job status integer values: - - 0 - Job is still in progress. Continue to periodically poll for any status changes. - 1 - Job has successfully completed. The job will return any successful response values associated with command that was originally executed. - 2 - Job has failed to complete. Please check the "jobresultcode" tag for failure reason code and "jobresult" for the failure reason. - -
- diff --git a/docs/en-US/kvm-topology-req.xml b/docs/en-US/kvm-topology-req.xml deleted file mode 100644 index 0dff491b364..00000000000 --- a/docs/en-US/kvm-topology-req.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- KVM Topology Requirements - The Management Servers communicate with KVM hosts on port 22 (ssh). -
diff --git a/docs/en-US/large_scale_redundant_setup.xml b/docs/en-US/large_scale_redundant_setup.xml deleted file mode 100644 index 427a42d9182..00000000000 --- a/docs/en-US/large_scale_redundant_setup.xml +++ /dev/null @@ -1,42 +0,0 @@ - -%BOOK_ENTITIES; -]> - - -
- Large-Scale Redundant Setup - - - - - Large-Scale Redundant Setup - - This diagram illustrates the network architecture of a large-scale &PRODUCT; deployment. - - A layer-3 switching layer is at the core of the data center. A router redundancy protocol like VRRP should be deployed. Typically high-end core switches also include firewall modules. Separate firewall appliances may also be used if the layer-3 switch does not have integrated firewall capabilities. The firewalls are configured in NAT mode. The firewalls provide the following functions: - - Forwards HTTP requests and API calls from the Internet to the Management Server. The Management Server resides on the management network. - When the cloud spans multiple zones, the firewalls should enable site-to-site VPN such that servers in different zones can directly reach each other. - - - A layer-2 access switch layer is established for each pod. Multiple switches can be stacked to increase port count. In either case, redundant pairs of layer-2 switches should be deployed. - The Management Server cluster (including front-end load balancers, Management Server nodes, and the MySQL database) is connected to the management network through a pair of load balancers. - Secondary storage servers are connected to the management network. - Each pod contains storage and computing servers. Each storage and computing server should have redundant NICs connected to separate layer-2 access switches. - -
diff --git a/docs/en-US/layer2-switch.xml b/docs/en-US/layer2-switch.xml deleted file mode 100644 index acef5a7c207..00000000000 --- a/docs/en-US/layer2-switch.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Layer-2 Switch - The layer-2 switch is the access switching layer inside the pod. - - - It should trunk all VLANs into every computing host. - - - It should switch traffic for the management network containing computing and storage - hosts. The layer-3 switch will serve as the gateway for the management network. - - - - Example Configurations - This section contains example configurations for specific switch models for pod-level - layer-2 switching. It assumes VLAN management protocols such as VTP or GVRP have been - disabled. The scripts must be changed appropriately if you choose to use VTP or GVRP. - - - -
diff --git a/docs/en-US/lb-policy-pfwd-rule-usage-record-format.xml b/docs/en-US/lb-policy-pfwd-rule-usage-record-format.xml deleted file mode 100644 index e27a49d6b96..00000000000 --- a/docs/en-US/lb-policy-pfwd-rule-usage-record-format.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Load Balancer Policy or Port Forwarding Rule Usage Record Format - - account - name of the account - accountid - ID of the account - domainid - ID of the domain in which this account resides - zoneid - Zone where the usage occurred - description - A string describing what the usage record is tracking - usage - String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype - A number representing the usage type (see Usage Types) - rawusage - A number representing the actual usage in hours - usageid - ID of the load balancer policy or port forwarding rule - usagetype - A number representing the usage type (see Usage Types) - startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/libcloud-examples.xml b/docs/en-US/libcloud-examples.xml deleted file mode 100644 index d2db5269eb9..00000000000 --- a/docs/en-US/libcloud-examples.xml +++ /dev/null @@ -1,75 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Apache Libcloud - There are many tools available to interface with the &PRODUCT; API. Apache Libcloud is one of those. In this section - we provide a basic example of how to use Libcloud with &PRODUCT;. It assumes that you have access to a &PRODUCT; endpoint and that you have the API access key and secret key of a user. - To install Libcloud refer to the libcloud website. If you are familiar with Pypi simply do: - pip install apache-libcloud - You should see the following output: - -pip install apache-libcloud -Downloading/unpacking apache-libcloud - Downloading apache-libcloud-0.12.4.tar.bz2 (376kB): 376kB downloaded - Running setup.py egg_info for package apache-libcloud - -Installing collected packages: apache-libcloud - Running setup.py install for apache-libcloud - -Successfully installed apache-libcloud -Cleaning up... - - - You can then open a Python interactive shell, create an instance of a &PRODUCT; driver and call the available methods via the libcloud API. - - - >> from libcloud.compute.types import Provider ->>> from libcloud.compute.providers import get_driver ->>> Driver = get_driver(Provider.CLOUDSTACK) ->>> apikey='plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg' ->>> secretkey='VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ' ->>> host='http://localhost:8080' ->>> path='/client/api' ->>> conn=Driver(apikey,secretkey,secure='False',host='localhost:8080',path=path) ->>> conn=Driver(key=apikey,secret=secretkey,secure=False,host='localhost',port='8080',path=path) ->>> conn.list_images() -[] ->>> conn.list_sizes() -[, , ] ->>> images=conn.list_images() ->>> offerings=conn.list_sizes() ->>> node=conn.create_node(name='toto',image=images[0],size=offerings[0]) ->>> help(node) ->>> node.get_uuid() -'b1aa381ba1de7f2d5048e248848993d5a900984f' ->>> node.name -u'toto' -]]> - - - One of the interesting use cases of Libcloud is that you can use multiple Cloud Providers, such as AWS, Rackspace, OpenNebula, vCloud and so on. You can then create Driver instances to each of these clouds and create your own multi cloud application. - -
diff --git a/docs/en-US/limit-accounts-domains.xml b/docs/en-US/limit-accounts-domains.xml deleted file mode 100644 index 78a642b3a5a..00000000000 --- a/docs/en-US/limit-accounts-domains.xml +++ /dev/null @@ -1,371 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Limiting Resource Usage - &PRODUCT; allows you to control resource usage based on the types of resources, such as CPU, - RAM, Primary storage, and Secondary storage. A new set of resource types has been added to the - existing pool of resources to support the new customization model—need-basis usage, such - as large VM or small VM. The new resource types are now broadly classified as CPU, RAM, Primary - storage, and Secondary storage. The root administrator is able to impose resource usage limit by - the following resource types for Domain, Project, and Accounts. - - - CPUs - - - Memory (RAM) - - - Primary Storage (Volumes) - - - Secondary Storage (Snapshots, Templates, ISOs) - - - To control the behaviour of this feature, the following configuration parameters have been - added: - - - - - Parameter Name - Description - - - - - max.account.cpus - Maximum number of CPU cores that can be used for an account. - Default is 40. - - - max.account.ram (MB) - Maximum RAM that can be used for an account. - Default is 40960. - - - max.account.primary.storage (GB) - Maximum primary storage space that can be used for an account. - Default is 200. - - - - max.account.secondary.storage (GB) - Maximum secondary storage space that can be used for an account. - Default is 400. - - - max.project.cpus - - Maximum number of CPU cores that can be used for an account. - Default is 40. - - - - max.project.ram (MB) - - Maximum RAM that can be used for an account. - Default is 40960. - - - - max.project.primary.storage (GB) - - Maximum primary storage space that can be used for an account. - Default is 200. - - - - max.project.secondary.storage (GB) - - Maximum secondary storage space that can be used for an account. - Default is 400. - - - - - -
- User Permission - The root administrator, domain administrators and users are able to list resources. Ensure - that proper logs are maintained in the vmops.log and - api.log files. - - - The root admin will have the privilege to list and update resource limits. - - - The domain administrators are allowed to list and change these resource limits only - for the sub-domains and accounts under their own domain or the sub-domains. - - - The end users will the privilege to list resource limits. Use the listResourceLimits - API. - - -
-
- Limit Usage Considerations - - - Primary or Secondary storage space refers to the stated size of the volume and not the - physical size— the actual consumed size on disk in case of thin provisioning. - - - If the admin reduces the resource limit for an account and set it to less than the - resources that are currently being consumed, the existing VMs/templates/volumes are not - destroyed. Limits are imposed only if the user under that account tries to execute a new - operation using any of these resources. For example, the existing behavior in the case of - a VM are: - - - migrateVirtualMachine: The users under that account will be able to migrate the - running VM into any other host without facing any limit issue. - - - recoverVirtualMachine: Destroyed VMs cannot be recovered. - - - - - For any resource type, if a domain has limit X, sub-domains or accounts under that - domain can have there own limits. However, the sum of resource allocated to a sub-domain - or accounts under the domain at any point of time should not exceed the value X. - For example, if a domain has the CPU limit of 40 and the sub-domain D1 and account A1 - can have limits of 30 each, but at any point of time the resource allocated to D1 and A1 - should not exceed the limit of 40. - - - If any operation needs to pass through two of more resource limit check, then the - lower of 2 limits will be enforced, For example: if an account has the VM limit of 10 and - CPU limit of 20, and a user under that account requests 5 VMs of 4 CPUs each. The user - can deploy 5 more VMs because VM limit is 10. However, the user cannot deploy any more - instances because the CPU limit has been exhausted. - - -
-
- Limiting Resource Usage in a Domain - &PRODUCT; allows the configuration of limits on a domain basis. With a domain limit in - place, all users still have their account limits. They are additionally limited, as a group, - to not exceed the resource limits set on their domain. Domain limits aggregate the usage of - all accounts in the domain as well as all the accounts in all the sub-domains of that domain. - Limits set at the root domain level apply to the sum of resource usage by the accounts in all - the domains and sub-domains below that root domain. - To set a domain limit: - - - Log in to the &PRODUCT; UI. - - - In the left navigation tree, click Domains. - - - Select the domain you want to modify. The current domain limits are displayed. - A value of -1 shows that there is no limit in place. - - - Click the Edit button - - - - - editbutton.png: edits the settings. - - - - - Edit the following as per your requirement: - - - - - Parameter Name - Description - - - - - Instance Limits - The number of instances that can be used in a domain. - - - Public IP Limits - - The number of public IP addresses that can be used in a - domain. - - - Volume Limits - The number of disk volumes that can be created in a domain. - - - - Snapshot Limits - The number of snapshots that can be created in a domain. - - - Template Limits - The number of templates that can be registered in a - domain. - - - VPC limits - The number of VPCs that can be created in a domain. - - - CPU limits - - The number of CPU cores that can be used for a domain. - - - - Memory limits (MB) - - The number of RAM that can be used for a domain. - - - - Primary Storage limits (GB) - - The primary storage space that can be used for a domain. - - - - Secondary Storage limits (GB) - - The secondary storage space that can be used for a domain. - - - - - - - - Click Apply. - - -
-
- Default Account Resource Limits - You can limit resource use by accounts. The default limits are set by using Global - configuration parameters, and they affect all accounts within a cloud. The relevant parameters - are those beginning with max.account, for example: max.account.snapshots. - To override a default limit for a particular account, set a per-account resource - limit. - - - Log in to the &PRODUCT; UI. - - - In the left navigation tree, click Accounts. - - - Select the account you want to modify. The current limits are displayed. - A value of -1 shows that there is no limit in place. - - - Click the Edit button. - - - - - editbutton.png: edits the settings - - - - - Edit the following as per your requirement: - - - - - Parameter Name - Description - - - - - Instance Limits - The number of instances that can be used in an account. - The default is 20. - - - Public IP Limits - - The number of public IP addresses that can be used in an account. - The default is 20. - - - Volume Limits - The number of disk volumes that can be created in an account. - The default is 20. - - - Snapshot Limits - The number of snapshots that can be created in an account. - The default is 20. - - - Template Limits - The number of templates that can be registered in an account. - The default is 20. - - - VPC limits - The number of VPCs that can be created in an account. - The default is 20. - - - CPU limits - - The number of CPU cores that can be used for an account. - The default is 40. - - - Memory limits (MB) - - The number of RAM that can be used for an account. - The default is 40960. - - - Primary Storage limits (GB) - - The primary storage space that can be used for an account. - The default is 200. - - - Secondary Storage limits (GB) - - The secondary storage space that can be used for an account. - The default is 400. - - - - - - - Click Apply. - - -
-
diff --git a/docs/en-US/linux-installation.xml b/docs/en-US/linux-installation.xml deleted file mode 100644 index 28be32dad72..00000000000 --- a/docs/en-US/linux-installation.xml +++ /dev/null @@ -1,86 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Linux OS Installation - Use the following steps to begin the Linux OS installation: - - - Download the script file cloud-set-guest-password: - - - Linux: - - - - Windows: - - - - - - Copy this file to /etc/init.d. - On some Linux distributions, copy the file to - /etc/rc.d/init.d. - - - Run the following command to make the script executable: - chmod +x /etc/init.d/cloud-set-guest-password - - - Depending on the Linux distribution, continue with the appropriate step. - - - On Fedora, CentOS/RHEL, and Debian, run: - chkconfig --add cloud-set-guest-password - - - On Ubuntu with VMware tools, link the script file to the - /etc/network/if-up and /etc/network/if-down - folders, and run the script: - #ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-up/cloud-set-guest-password -#ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-down/cloud-set-guest-password - - - If you are using Ubuntu 11.04, create a directory called - /var/lib/dhcp3 on your Ubuntu machine. - This is to work around a known issue with this version of - Ubuntu. - Run the following command: - sudo update-rc.d cloud-set-guest-password defaults 98 - - - On all Ubuntu versions, run: - sudo update-rc.d cloud-set-guest-password defaults 98 - To test, run mkpasswd and check whether a - new password is generated. If the mkpasswd command does not exist, - run sudo apt-get install whois or sudo apt-get install - mkpasswd, depending on your Ubuntu version. - - - - -
diff --git a/docs/en-US/load-balancer-rules.xml b/docs/en-US/load-balancer-rules.xml deleted file mode 100644 index 884647c6f8b..00000000000 --- a/docs/en-US/load-balancer-rules.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Load Balancer Rules - A &PRODUCT; user or administrator may create load balancing rules that balance traffic - received at a public IP to one or more VMs. A user creates a rule, specifies an algorithm, and - assigns the rule to a set of VMs. - - If you create load balancing rules while using a network service offering that includes an - external load balancer device such as NetScaler, and later change the network service offering - to one that uses the &PRODUCT; virtual router, you must create a firewall rule on the virtual - router for each of your existing load balancing rules so that they continue to - function. - - - - - -
diff --git a/docs/en-US/log-in-root-admin.xml b/docs/en-US/log-in-root-admin.xml deleted file mode 100644 index 0243bd645fe..00000000000 --- a/docs/en-US/log-in-root-admin.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Logging In as the Root Administrator - After the Management Server software is installed and running, you can run the &PRODUCT; user interface. This UI is there to help you provision, view, and manage your cloud infrastructure. - - Open your favorite Web browser and go to this URL. Substitute the IP address of your own Management Server: - http://<management-server-ip-address>:8080/client - After logging into a fresh Management Server installation, a guided tour splash screen appears. On later visits, you’ll be taken directly into the Dashboard. - - If you see the first-time splash screen, choose one of the following. - - Continue with basic setup. Choose this if you're just trying &PRODUCT;, and you want a guided walkthrough of the simplest possible configuration so that you can get started right away. We'll help you set up a cloud with the following features: a single machine that runs &PRODUCT; software and uses NFS to provide storage; a single machine running VMs under the XenServer or KVM hypervisor; and a shared public network. - The prompts in this guided tour should give you all the information you need, but if you want just a bit more detail, you can follow along in the Trial Installation Guide. - - I have used &PRODUCT; before. Choose this if you have already gone through a design phase and planned a more sophisticated deployment, or you are ready to start scaling up a trial cloud that you set up earlier with the basic setup screens. In the Administrator UI, you can start using the more powerful features of &PRODUCT;, such as advanced VLAN networking, high availability, additional network elements such as load balancers and firewalls, and support for multiple hypervisors including Citrix XenServer, KVM, and VMware vSphere. - The root administrator Dashboard appears. - - - - You should set a new root administrator password. If you chose basic setup, you’ll be prompted to create a new password right away. If you chose experienced user, use the steps in . - - You are logging in as the root administrator. This account manages the &PRODUCT; deployment, including physical infrastructure. The root administrator can modify configuration settings to change basic functionality, create or delete user accounts, and take many actions that should be performed only by an authorized person. Please change the default password to a new, unique password. - -
diff --git a/docs/en-US/log-in.xml b/docs/en-US/log-in.xml deleted file mode 100644 index 84328ce4d45..00000000000 --- a/docs/en-US/log-in.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Log In to the UI - &PRODUCT; provides a web-based UI that can be used by both administrators and end users. The appropriate version of the UI is displayed depending on the credentials used to log in. The UI is available in popular browsers including IE7, IE8, IE9, Firefox 3.5+, Firefox 4, Safari 4, and Safari 5. The URL is: (substitute your own management server IP address) - http://<management-server-ip-address>:8080/client - On a fresh Management Server installation, a guided tour splash screen appears. On later visits, you’ll see a login screen where you specify the following to proceed to your Dashboard: - - Username - The user ID of your account. The default username is admin. - - - Password - The password associated with the user ID. The password for the default username is password. - - - Domain - If you are a root user, leave this field blank. - - If you are a user in the sub-domains, enter the full path to the domain, excluding the root domain. - For example, suppose multiple levels are created under the root domain, such as Comp1/hr. The users in the Comp1 domain should enter Comp1 in the Domain field, whereas the users in the Comp1/sales domain should enter Comp1/sales. - For more guidance about the choices that appear when you log in to this UI, see Logging In as the Root Administrator. - - - - -
diff --git a/docs/en-US/long-running-job-events.xml b/docs/en-US/long-running-job-events.xml deleted file mode 100644 index cae2b747586..00000000000 --- a/docs/en-US/long-running-job-events.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Long Running Job Events - The events log records three types of standard events. - - INFO. This event is generated when an operation has been successfully performed. - WARN. This event is generated in the following circumstances. - - When a network is disconnected while monitoring a template download. - When a template download is abandoned. - When an issue on the storage server causes the volumes to fail over to the mirror storage server. - - - ERROR. This event is generated when an operation has not been successfully performed - - -
- diff --git a/docs/en-US/lxc-install.xml b/docs/en-US/lxc-install.xml deleted file mode 100644 index 40f6a0aaa69..00000000000 --- a/docs/en-US/lxc-install.xml +++ /dev/null @@ -1,110 +0,0 @@ - - - %BOOK_ENTITIES; - ]> - - - -
- LXC Installation and Configuration -
- System Requirements for LXC Hosts - LXC requires the Linux kernel cgroups functionality which is available starting 2.6.24. Although you are not required to run these distributions, the following are recommended: - - CentOS / RHEL: 6.3 - Ubuntu: 12.04(.1) - - The main requirement for LXC hypervisors is the libvirt and Qemu version. No matter what - Linux distribution you are using, make sure the following requirements are met: - - libvirt: 1.0.0 or higher - Qemu/KVM: 1.0 or higher - - The default bridge in &PRODUCT; is the Linux native bridge implementation (bridge module). &PRODUCT; includes an option to work with OpenVswitch, the requirements are listed below - - libvirt: 1.0.0 or higher - openvswitch: 1.7.1 or higher - - In addition, the following hardware requirements apply: - - Within a single cluster, the hosts must be of the same distribution version. - All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags. - Must support HVM (Intel-VT or AMD-V enabled) - 64-bit x86 CPU (more cores results in better performance) - 4 GB of memory - At least 1 NIC - When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running - -
-
- LXC Installation Overview - LXC does not have any native system VMs, instead KVM will be used to run system VMs. This means that your host will need to support both LXC and KVM, thus most of the installation and configuration will be identical to the KVM installation. The material in this section doesn't duplicate KVM installation docs. It provides the &PRODUCT;-specific steps that are needed to prepare a KVM host to work with &PRODUCT;. - Before continuing, make sure that you have applied the latest updates to your host. - It is NOT recommended to run services on this host not controlled by &PRODUCT;. - The procedure for installing an LXC Host is: - - Prepare the Operating System - Install and configure libvirt - Configure Security Policies (AppArmor and SELinux) - Install and configure the Agent - -
-
- -
-
- Install and configure the Agent - To manage LXC instances on the host &PRODUCT; uses a Agent. This Agent communicates with the Management server and controls all the instances on the host. - First we start by installing the agent: - In RHEL or CentOS: - $ yum install cloudstack-agent - In Ubuntu: - $ apt-get install cloudstack-agent - Next step is to update the Agent configuration setttings. The settings are in /etc/cloudstack/agent/agent.properties - - - Set the Agent to run in LXC mode: - hypervisor.type=lxc - - - Optional: If you would like to use direct networking (instead of the default bridge networking), configure these lines: - libvirt.vif.driver=com.cloud.hypervisor.kvm.resource.DirectVifDriver - network.direct.source.mode=private - network.direct.device=eth0 - - - The host is now ready to be added to a cluster. This is covered in a later section, see . It is recommended that you continue to read the documentation before adding the host! -
-
- -
-
- -
-
- -
-
- -
-
- -
-
diff --git a/docs/en-US/lxc-topology-req.xml b/docs/en-US/lxc-topology-req.xml deleted file mode 100644 index 315863dd34c..00000000000 --- a/docs/en-US/lxc-topology-req.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- LXC Topology Requirements - The Management Servers communicate with LXC hosts on port 22 (ssh). -
diff --git a/docs/en-US/maintain-hypervisors-on-hosts.xml b/docs/en-US/maintain-hypervisors-on-hosts.xml deleted file mode 100644 index 43f3f790733..00000000000 --- a/docs/en-US/maintain-hypervisors-on-hosts.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Maintaining Hypervisors on Hosts - When running hypervisor software on hosts, be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. - The lack of up-do-date hotfixes can lead to data corruption and lost VMs. - (XenServer) For more information, see Highly Recommended Hotfixes for XenServer in the &PRODUCT; Knowledge Base. -
diff --git a/docs/en-US/maintenance-mode-for-primary-storage.xml b/docs/en-US/maintenance-mode-for-primary-storage.xml deleted file mode 100644 index 54c3a0d8901..00000000000 --- a/docs/en-US/maintenance-mode-for-primary-storage.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Maintenance Mode for Primary Storage - Primary storage may be placed into maintenance mode. This is useful, for example, to replace faulty RAM in a storage device. Maintenance mode for a storage device will first stop any new guests from being provisioned on the storage device. Then it will stop all guests that have any volume on that storage device. When all such guests are stopped the storage device is in maintenance mode and may be shut down. When the storage device is online again you may cancel maintenance mode for the device. The &PRODUCT; will bring the device back online and attempt to start all guests that were running at the time of the entry into maintenance mode. -
diff --git a/docs/en-US/making-api-request.xml b/docs/en-US/making-api-request.xml deleted file mode 100644 index 49ea158bb21..00000000000 --- a/docs/en-US/making-api-request.xml +++ /dev/null @@ -1,54 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Making API Requests - All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS: - - - &PRODUCT; API URL: This is the web services API entry point(for example, http://www.cloud.com:8080/client/api) - Command: The web services command you wish to execute, such as start a virtual machine or create a disk volume - Parameters: Any additional required or optional parameters for the command - - A sample API GET request looks like the following: - http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - - Or in a more readable format: - -1. http://localhost:8080/client/api -2. ?command=deployVirtualMachine -3. &serviceOfferingId=1 -4. &diskOfferingId=1 -5. &templateId=2 -6. &zoneId=4 -7. &apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXqjB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ -8. &signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - - The first line is the &PRODUCT; API URL. This is the Cloud instance you wish to interact with. - The second line refers to the command you wish to execute. In our example, we are attempting to deploy a fresh new virtual machine. It is preceded by a (?) to separate itself from the &PRODUCT; API URL. - Lines 3-6 are the parameters for this given command. To see the command and its request parameters, please refer to the appropriate section in the &PRODUCT; API documentation. Each parameter field-value pair (field=value) is preceded by an ampersand character (&). - Line 7 is the user API Key that uniquely identifies the account. See Signing API Requests on page 7. - Line 8 is the signature hash created to authenticate the user account executing the API command. See Signing API Requests on page 7. -
- diff --git a/docs/en-US/manage-cloud.xml b/docs/en-US/manage-cloud.xml deleted file mode 100644 index 6bc45e21de2..00000000000 --- a/docs/en-US/manage-cloud.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Managing the Cloud - - - - - - - diff --git a/docs/en-US/management-server-install-client.xml b/docs/en-US/management-server-install-client.xml deleted file mode 100644 index 2c5ded76352..00000000000 --- a/docs/en-US/management-server-install-client.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Install the Management Server on the First Host - The first step in installation, whether you are installing the Management Server on one host - or many, is to install the software on a single node. - - If you are planning to install the Management Server on multiple nodes for high - availability, do not proceed to the additional nodes yet. That step will come later. - - The &PRODUCT; Management server can be installed using either RPM or DEB packages. These - packages will depend on everything you need to run the Management server. -
- Install on CentOS/RHEL - We start by installing the required packages: - yum install cloudstack-management -
-
- Install on Ubuntu - apt-get install cloudstack-mangagement -
- -
- Downloading vhd-util - This procedure is required only for installations where XenServer is installed on the - hypervisor hosts. - Before setting up the Management Server, download vhd-util from vhd-util. - If the Management Server is RHEL or CentOS, copy vhd-util to - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver. - If the Management Server is Ubuntu, copy vhd-util to - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver. -
-
diff --git a/docs/en-US/management-server-install-complete.xml b/docs/en-US/management-server-install-complete.xml deleted file mode 100644 index 8f4aa6f68de..00000000000 --- a/docs/en-US/management-server-install-complete.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Installation Complete! Next Steps - Congratulations! You have now installed &PRODUCT; Management Server and the database it uses to persist system data. - - - - - installation-complete.png: Finished installs with single Management Server and multiple Management Servers - - What should you do next? - - Even without adding any cloud infrastructure, you can run the UI to get a feel for what's offered and how you will interact with &PRODUCT; on an ongoing basis. See Log In to the UI. - When you're ready, add the cloud infrastructure and try running some virtual machines on it, so you can watch how &PRODUCT; manages the infrastructure. See Provision Your Cloud Infrastructure. - -
diff --git a/docs/en-US/management-server-install-db-external.xml b/docs/en-US/management-server-install-db-external.xml deleted file mode 100644 index 29507209fbf..00000000000 --- a/docs/en-US/management-server-install-db-external.xml +++ /dev/null @@ -1,145 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Install the Database on a Separate Node - This section describes how to install MySQL on a standalone machine, separate from the - Management Server. This technique is intended for a deployment that includes several Management - Server nodes. If you have a single-node Management Server deployment, you will typically use the - same node for MySQL. See . - - The management server doesn't require a specific distribution for the MySQL node. You can - use a distribution or Operating System of your choice. Using the same distribution as the - management server is recommended, but not required. See . - - - - Install MySQL from the package repository from your distribution: - On RHEL or CentOS: - yum install mysql-server - On Ubuntu: - apt-get install mysql-server - - - Edit the MySQL configuration (/etc/my.cnf or /etc/mysql/my.cnf, depending on your OS) - and insert the following lines in the [mysqld] section. You can put these lines below the - datadir line. The max_connections parameter should be set to 350 multiplied by the number of - Management Servers you are deploying. This example assumes two Management Servers. - - On Ubuntu, you can also create /etc/mysql/conf.d/cloudstack.cnf file and add these - directives there. Don't forget to add [mysqld] on the first line of the file. - - innodb_rollback_on_timeout=1 -innodb_lock_wait_timeout=600 -max_connections=700 -log-bin=mysql-bin -binlog-format = 'ROW' -bind-address = 0.0.0.0 - - - Start or restart MySQL to put the new configuration into effect. - On RHEL/CentOS, MySQL doesn't automatically start after installation. Start it - manually. - service mysqld start - On Ubuntu, restart MySQL. - service mysqld restart - - - (CentOS and RHEL only; not required on Ubuntu) - - On RHEL and CentOS, MySQL does not set a root password by default. It is very strongly - recommended that you set a root password as a security precaution. - - Run the following command to secure your installation. You can answer "Y" to all - questions except "Disallow root login remotely?". Remote root login is required to set up - the databases. - mysql_secure_installation - - - If a firewall is present on the system, open TCP port 3306 so external MySQL connections - can be established. - On Ubuntu, UFW is the default firewall. Open the port with this command: - ufw allow mysql - On RHEL/CentOS: - - - Edit the /etc/sysconfig/iptables file and add the following line at the beginning of - the INPUT chain. - -A INPUT -p tcp --dport 3306 -j ACCEPT - - - Now reload the iptables rules. - service iptables restart - - - - - Return to the root shell on your first Management Server. - - - Set up the database. The following command creates the cloud user on the - database. - - - In dbpassword, specify the password to be assigned to the cloud user. You can choose - to provide no password. - - - In deploy-as, specify the username and password of the user deploying the database. - In the following command, it is assumed the root user is deploying the database and - creating the cloud user. - - - (Optional) For encryption_type, use file or web to indicate the technique used to - pass in the database encryption password. Default: file. See . - - - (Optional) For management_server_key, substitute the default key that is used to - encrypt confidential parameters in the &PRODUCT; properties file. Default: password. It - is highly recommended that you replace this with a more secure value. See About Password - and Key Encryption. - - - (Optional) For database_key, substitute the default key that is used to encrypt - confidential parameters in the &PRODUCT; database. Default: password. It is highly - recommended that you replace this with a more secure value. See . - - - (Optional) For management_server_ip, you may explicitly specify cluster management - server node IP. If not specified, the local IP address will be used. - - - cloudstack-setup-databases cloud:<dbpassword>@<ip address mysql server> \ ---deploy-as=root:<password> \ --e <encryption_type> \ --m <management_server_key> \ --k <database_key> \ --i <management_server_ip> - When this script is finished, you should see a message like “Successfully initialized - the database.†- - -
diff --git a/docs/en-US/management-server-install-db-local.xml b/docs/en-US/management-server-install-db-local.xml deleted file mode 100644 index ff5ab60b91f..00000000000 --- a/docs/en-US/management-server-install-db-local.xml +++ /dev/null @@ -1,167 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Install the Database on the Management Server Node - This section describes how to install MySQL on the same machine with the Management Server. - This technique is intended for a simple deployment that has a single Management Server node. If - you have a multi-node Management Server deployment, you will typically use a separate node for - MySQL. See . - - - Install MySQL from the package repository of your distribution: - On RHEL or CentOS: - yum install mysql-server - On Ubuntu: - apt-get install mysql-server - - - Open the MySQL configuration file. The configuration file is /etc/my.cnf or - /etc/mysql/my.cnf, depending on your OS. - - - Insert the following lines in the [mysqld] section. - You can put these lines below the datadir line. The max_connections parameter should be - set to 350 multiplied by the number of Management Servers you are deploying. This example - assumes one Management Server. - - On Ubuntu, you can also create a file /etc/mysql/conf.d/cloudstack.cnf and add these - directives there. Don't forget to add [mysqld] on the first line of the file. - - innodb_rollback_on_timeout=1 -innodb_lock_wait_timeout=600 -max_connections=350 -log-bin=mysql-bin -binlog-format = 'ROW' - - - Start or restart MySQL to put the new configuration into effect. - On RHEL/CentOS, MySQL doesn't automatically start after installation. Start it - manually. - service mysqld start - On Ubuntu, restart MySQL. - service mysqld restart - - - (CentOS and RHEL only; not required on Ubuntu) - - On RHEL and CentOS, MySQL does not set a root password by default. It is very strongly - recommended that you set a root password as a security precaution. - - Run the following command to secure your installation. You can answer "Y" to all - questions. - mysql_secure_installation - - - &PRODUCT; can be blocked by security mechanisms, such as SELinux. Disable SELinux to - ensure + that the Agent has all the required permissions. - Configure SELinux (RHEL and CentOS): - - - Check whether SELinux is installed on your machine. If not, you can skip this - section. - In RHEL or CentOS, SELinux is installed and enabled by default. You can verify this - with: - $ rpm -qa | grep selinux - - - Set the SELINUX variable in /etc/selinux/config to - "permissive". This ensures that the permissive setting will be maintained after a system - reboot. - In RHEL or CentOS: - vi /etc/selinux/config - Change the following line - SELINUX=enforcing - to this: - SELINUX=permissive - - - Set SELinux to permissive starting immediately, without requiring a system - reboot. - $ setenforce permissive - - - - - Set up the database. The following command creates the "cloud" user on the - database. - - - In dbpassword, specify the password to be assigned to the "cloud" user. You can - choose to provide no password although that is not recommended. - - - In deploy-as, specify the username and password of the user deploying the database. - In the following command, it is assumed the root user is deploying the database and - creating the "cloud" user. - - - (Optional) For encryption_type, use file or web to indicate the technique used to - pass in the database encryption password. Default: file. See . - - - (Optional) For management_server_key, substitute the default key that is used to - encrypt confidential parameters in the &PRODUCT; properties file. Default: password. It - is highly recommended that you replace this with a more secure value. See . - - - (Optional) For database_key, substitute the default key that is used to encrypt - confidential parameters in the &PRODUCT; database. Default: password. It is highly - recommended that you replace this with a more secure value. See . - - - (Optional) For management_server_ip, you may explicitly specify cluster management - server node IP. If not specified, the local IP address will be used. - - - cloudstack-setup-databases cloud:<dbpassword>@localhost \ ---deploy-as=root:<password> \ --e <encryption_type> \ --m <management_server_key> \ --k <database_key> \ --i <management_server_ip> - When this script is finished, you should see a message like “Successfully initialized - the database.†- - If the script is unable to connect to the MySQL database, check - the "localhost" loopback address in /etc/hosts. It should - be pointing to the IPv4 loopback address "127.0.0.1" and not the IPv6 loopback - address ::1. Alternatively, reconfigure MySQL to bind to the IPv6 loopback - interface. - - - - - If you are running the KVM hypervisor on the same machine with the Management Server, - edit /etc/sudoers and add the following line: - Defaults:cloud !requiretty - - - Now that the database is set up, you can finish configuring the OS for the Management - Server. This command will set up iptables, sudoers, and start the Management Server. - # cloudstack-setup-management - You should see the message “&PRODUCT; Management Server setup is done.†- - -
diff --git a/docs/en-US/management-server-install-db.xml b/docs/en-US/management-server-install-db.xml deleted file mode 100644 index 9d41af2562b..00000000000 --- a/docs/en-US/management-server-install-db.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Install the database server - The &PRODUCT; management server uses a MySQL database server to store its data. - When you are installing the management server on a single node, you can install the MySQL server locally. - For an installation that has multiple management server nodes, we assume the MySQL database also runs on a separate node. - - &PRODUCT; has been tested with MySQL 5.1 and 5.5. These versions are included in RHEL/CentOS and Ubuntu. - - -
diff --git a/docs/en-US/management-server-install-flow.xml b/docs/en-US/management-server-install-flow.xml deleted file mode 100644 index cd73c69e587..00000000000 --- a/docs/en-US/management-server-install-flow.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Management Server Installation - - - - - - - - - - -
diff --git a/docs/en-US/management-server-install-multi-node.xml b/docs/en-US/management-server-install-multi-node.xml deleted file mode 100644 index 480d84ea94f..00000000000 --- a/docs/en-US/management-server-install-multi-node.xml +++ /dev/null @@ -1,69 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Prepare and Start Additional Management Servers - For your second and subsequent Management Servers, you will install the Management Server - software, connect it to the database, and set up the OS for the Management Server. - - - Perform the steps in and or as - appropriate. - - - This step is required only for installations where XenServer is installed on the hypervisor hosts. - Download vhd-util from vhd-util - Copy vhd-util to - /usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver. - - - Ensure that necessary services are started and set to start on boot. - # service rpcbind start -# service nfs start -# chkconfig nfs on -# chkconfig rpcbind on - - - - - Configure the database client. Note the absence of the --deploy-as argument in this - case. (For more details about the arguments to this command, see .) - # cloudstack-setup-databases cloud:dbpassword@dbhost -e encryption_type -m management_server_key -k database_key -i management_server_ip - - - - Configure the OS and start the Management Server: - # cloudstack-setup-management - The Management Server on this node should now be running. - - - Repeat these steps on each additional Management Server. - - - Be sure to configure a load balancer for the Management Servers. See . - - -
diff --git a/docs/en-US/management-server-install-nfs-shares.xml b/docs/en-US/management-server-install-nfs-shares.xml deleted file mode 100644 index a12e09c3eca..00000000000 --- a/docs/en-US/management-server-install-nfs-shares.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Prepare NFS Shares - &PRODUCT; needs a place to keep primary and secondary storage (see Cloud Infrastructure Overview). Both of these can be NFS shares. This section tells how to set up the NFS shares before adding the storage to &PRODUCT;. - Alternative Storage - NFS is not the only option for primary or secondary storage. For example, you may use Ceph RBD, GlusterFS, iSCSI, and others. The choice of storage system will depend on the choice of hypervisor and whether you are dealing with primary or secondary storage. - - The requirements for primary and secondary storage are described in: - - - - - A production installation typically uses a separate NFS server. See . - You can also use the Management Server node as the NFS server. This is more typical of a trial installation, but is technically possible in a larger deployment. See . - - -
diff --git a/docs/en-US/management-server-install-overview.xml b/docs/en-US/management-server-install-overview.xml deleted file mode 100644 index 5f46b0099bd..00000000000 --- a/docs/en-US/management-server-install-overview.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Management Server Installation Overview - This section describes installing the Management Server. There are two slightly different installation flows, depending on how many Management Server nodes will be in your cloud: - - A single Management Server node, with MySQL on the same node. - Multiple Management Server nodes, with MySQL on a node separate from the Management Servers. - - In either case, each machine must meet the system requirements described in System Requirements. - For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server. - The procedure for installing the Management Server is: - - - Prepare the Operating System - - - (XenServer only) Download and install vhd-util. - - Install the First Management Server - Install and Configure the MySQL database - Prepare NFS Shares - Prepare and Start Additional Management Servers (optional) - Prepare the System VM Template - -
diff --git a/docs/en-US/management-server-install-prepare-os.xml b/docs/en-US/management-server-install-prepare-os.xml deleted file mode 100644 index 02453a0b207..00000000000 --- a/docs/en-US/management-server-install-prepare-os.xml +++ /dev/null @@ -1,54 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Prepare the Operating System - The OS must be prepared to host the Management Server using the following steps. These steps must be performed on each Management Server node. - - Log in to your OS as root. - - Check for a fully qualified hostname. - hostname --fqdn - This should return a fully qualified hostname such as "management1.lab.example.org". If it does not, edit /etc/hosts so that it does. - - - Make sure that the machine can reach the Internet. - ping www.cloudstack.org - - - Turn on NTP for time synchronization. - NTP is required to synchronize the clocks of the servers in your cloud. - - - Install NTP. - On RHEL or CentOS: - yum install ntp - On Ubuntu: - apt-get install openntpd - - - - Repeat all of these steps on every host where the Management Server will be installed. - -
diff --git a/docs/en-US/management-server-install-systemvm.xml b/docs/en-US/management-server-install-systemvm.xml deleted file mode 100644 index 0d930ad62e0..00000000000 --- a/docs/en-US/management-server-install-systemvm.xml +++ /dev/null @@ -1,76 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Prepare the System VM Template - Secondary storage must be seeded with a template that is used for &PRODUCT; system - VMs. - - When copying and pasting a command, be sure the command has pasted as a single line before - executing. Some document viewers may introduce unwanted line breaks in copied text. - - - - On the Management Server, run one or more of the following cloud-install-sys-tmplt - commands to retrieve and decompress the system VM template. Run the command for each - hypervisor type that you expect end users to run in this Zone. - If your secondary storage mount point is not named /mnt/secondary, substitute your own - mount point name. - If you set the &PRODUCT; database encryption type to "web" when you set up the database, - you must now add the parameter -s <management-server-secret-key>. See . - This process will require approximately 5 GB of free space on the local file system and - up to 30 minutes each time it runs. - - - For XenServer: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2 -h xenserver -s <optional-management-server-secret-key> -F - - - For vSphere: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova -h vmware -s <optional-management-server-secret-key> -F - - - For KVM: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -s <optional-management-server-secret-key> -F - - - For LXC: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h lxc -s <optional-management-server-secret-key> -F - - - On Ubuntu, use the following path instead: - # /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt - - - If you are using a separate NFS server, perform this step. If you are using the - Management Server as the NFS server, you MUST NOT perform this step. - When the script has finished, unmount secondary storage and remove the created - directory. - # umount /mnt/secondary -# rmdir /mnt/secondary - - - Repeat these steps for each secondary storage server. - - -
diff --git a/docs/en-US/management-server-lb.xml b/docs/en-US/management-server-lb.xml deleted file mode 100644 index 13f87560e10..00000000000 --- a/docs/en-US/management-server-lb.xml +++ /dev/null @@ -1,66 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Management Server Load Balancing - &PRODUCT; can use a load balancer to provide a virtual IP for multiple Management - Servers. The administrator is responsible for creating the load balancer rules for the - Management Servers. The application requires persistence or stickiness across multiple sessions. - The following chart lists the ports that should be load balanced and whether or not persistence - is required. - Even if persistence is not required, enabling it is permitted. - - - - - Source Port - Destination Port - Protocol - Persistence Required? - - - - - 80 or 443 - 8080 (or 20400 with AJP) - HTTP (or AJP) - Yes - - - 8250 - 8250 - TCP - Yes - - - 8096 - 8096 - HTTP - No - - - - - In addition to above settings, the administrator is responsible for setting the 'host' global - config value from the management server IP to load balancer virtual IP address. - If the 'host' value is not set to the VIP for Port 8250 and one of your management servers crashes, - the UI is still available but the system VMs will not be able to contact the management server. - -
diff --git a/docs/en-US/management-server-overview.xml b/docs/en-US/management-server-overview.xml deleted file mode 100644 index b8e2d53f052..00000000000 --- a/docs/en-US/management-server-overview.xml +++ /dev/null @@ -1,76 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Management Server Overview - - The Management Server is the &PRODUCT; software that manages cloud - resources. By interacting with the Management Server through its UI or - API, you can configure and manage your cloud infrastructure. - - - The Management Server runs on a dedicated server or VM. It controls - allocation of virtual machines to hosts and assigns storage and IP - addresses to the virtual machine instances. The Management Server - runs in a Tomcat container and requires a MySQL database for persistence. - - - The machine must meet the system requirements described in System - Requirements. - - The Management Server: - - - - - Provides the web user interface for the administrator and a - reference user interface for end users. - - - - Provides the APIs for &PRODUCT;. - - - Manages the assignment of guest VMs to particular hosts. - - - - Manages the assignment of public and private IP addresses to - particular accounts. - - - - Manages the allocation of storage to guests as virtual disks. - - - - Manages snapshots, templates, and ISO images, possibly - replicating them across data centers. - - - - Provides a single point of configuration for the cloud. - - -
diff --git a/docs/en-US/manual-live-migration.xml b/docs/en-US/manual-live-migration.xml deleted file mode 100644 index 1daa6d3d937..00000000000 --- a/docs/en-US/manual-live-migration.xml +++ /dev/null @@ -1,56 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Moving VMs Between Hosts (Manual Live Migration) - The &PRODUCT; administrator can move a running VM from one host to another without interrupting service to users or going into maintenance mode. This is called manual live migration, and can be done under the following conditions: - - The root administrator is logged in. Domain admins and users can not perform manual live migration of VMs. - The VM is running. Stopped VMs can not be live migrated. - The destination host must have enough available capacity. If not, the VM will remain in the "migrating" state until memory becomes available. - (KVM) The VM must not be using local disk storage. (On XenServer and VMware, VM live migration - with local disk is enabled by &PRODUCT; support for XenMotion and vMotion.) - (KVM) The destination host must be in the same cluster as the original host. - (On XenServer and VMware, VM live migration from one cluster to another is enabled by &PRODUCT; support for XenMotion and vMotion.) - - - To manually live migrate a virtual machine - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation, click Instances. - Choose the VM that you want to migrate. - Click the Migrate Instance button. - - - - Migrateinstance.png: button to migrate an instance - - - From the list of suitable hosts, choose the one to which you want to move the VM. - If the VM's storage has to be migrated along with the VM, this will be noted in the host - list. &PRODUCT; will take care of the storage migration for you. - - Click OK. - -
- diff --git a/docs/en-US/marvin.xml b/docs/en-US/marvin.xml deleted file mode 100644 index 8fd2c96fe3f..00000000000 --- a/docs/en-US/marvin.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Marvin - Marvin is the &PRODUCT; automation framework. It originated as a tool for integration testing but is now also used to build DevCloud as well as to provide a Python &PRODUCT; API binding. - - Marvin's complete documenation is on the wiki at https://cwiki.apache.org/CLOUDSTACK/testing-with-python.html - The source code is located at tools/marvin - - -
diff --git a/docs/en-US/max-result-page-returned.xml b/docs/en-US/max-result-page-returned.xml deleted file mode 100644 index fdbf63962d4..00000000000 --- a/docs/en-US/max-result-page-returned.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Maximum Result Pages Returned - - For each cloud, there is a default upper limit on the number of results that any API command will return in a single page. This is to help prevent overloading the cloud servers and prevent DOS attacks. For example, if the page size limit is 500 and a command returns 10,000 results, the command will return 20 pages. - - The default page size limit can be different for each cloud. It is set in the global configuration parameter default.page.size. If your cloud has many users with lots of VMs, you might need to increase the value of this parameter. At the same time, be careful not to set it so high that your site can be taken down by an enormous return from an API call. For more information about how to set global configuration parameters, see "Describe Your Deployment" in the Installation Guide. - To decrease the page size limit for an individual API command, override the global setting with the page and pagesize parameters, which are available in any list* command (listCapabilities, listDiskOfferings, etc.). - - Both parameters must be specified together. - The value of the pagesize parameter must be smaller than the value of default.page.size. That is, you can not increase the number of possible items in a result page, only decrease it. - - For syntax information on the list* commands, see the API Reference. -
- diff --git a/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml b/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml deleted file mode 100644 index 1ed6bbd7cd3..00000000000 --- a/docs/en-US/migrate-datadisk-volume-new-storage-pool.xml +++ /dev/null @@ -1,78 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Migrating a Data Volume to a New Storage Pool - There are two situations when you might want to migrate a disk: - - Move the disk to new storage, but leave it attached to the same running VM. - Detach the disk from its current VM, move it to new storage, and attach it to a new VM. - -
- Migrating Storage For a Running VM - (Supported on XenServer and VMware) - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation bar, click Instances, click the VM name, and click View Volumes. - Click the volume you want to migrate. - Detach the disk from the VM. - See but skip the “reattach†step at the end. You - will do that after migrating to new storage. - Click the Migrate Volume button - - - - - Migrateinstance.png: button to migrate a volume - - - and choose the destination from the dropdown list. - Watch for the volume status to change to Migrating, then back to Ready. - -
-
- Migrating Storage and Attaching to a Different VM - - Log in to the &PRODUCT; UI as a user or admin. - Detach the disk from the VM. - See but skip the “reattach†step at the end. You - will do that after migrating to new storage. - Click the Migrate Volume button - - - - - Migrateinstance.png: button to migrate a volume - - - and choose the destination from the dropdown list. - Watch for the volume status to change to Migrating, then back to Ready. You can find the - volume by clicking Storage in the left navigation bar. Make sure that Volumes is - displayed at the top of the window, in the Select View dropdown. - Attach the volume to any desired VM running in the same cluster as the new storage server. See - - - -
-
diff --git a/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml b/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml deleted file mode 100644 index 3bcaff53c63..00000000000 --- a/docs/en-US/migrate-vm-rootvolume-volume-new-storage-pool.xml +++ /dev/null @@ -1,47 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Migrating a VM Root Volume to a New Storage Pool - (XenServer, VMware) You can live migrate a VM's root disk from one storage pool to another, without stopping the VM first. - (KVM) When migrating the root disk volume, the VM must first be stopped, and users can not access the VM. After migration is complete, the VM can be restarted. - - Log in to the &PRODUCT; UI as a user or admin. - In the left navigation bar, click Instances, and click the VM name. - (KVM only) Stop the VM. - Click the Migrate button - - - - - Migrateinstance.png: button to migrate a VM or volume - - - and choose the destination from the dropdown list. - If the VM's storage has to be migrated along with the VM, this will be noted in the host - list. &PRODUCT; will take care of the storage migration for you. - Watch for the volume status to change to Migrating, then back to Running (or Stopped, in the case of KVM). This - can take some time. - (KVM only) Restart the VM. - -
\ No newline at end of file diff --git a/docs/en-US/minimum-system-requirements.xml b/docs/en-US/minimum-system-requirements.xml deleted file mode 100644 index 870ef68eae4..00000000000 --- a/docs/en-US/minimum-system-requirements.xml +++ /dev/null @@ -1,74 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Minimum System Requirements -
- Management Server, Database, and Storage System Requirements - - The machines that will run the Management Server and MySQL database must meet the following requirements. - The same machines can also be used to provide primary and secondary storage, such as via localdisk or NFS. - The Management Server may be placed on a virtual machine. - - - Operating system: - - Preferred: CentOS/RHEL 6.3+ or Ubuntu 12.04(.1) - - - 64-bit x86 CPU (more cores results in better performance) - 4 GB of memory - 250 GB of local disk (more results in better capability; 500 GB recommended) - At least 1 NIC - Statically allocated IP address - Fully qualified domain name as returned by the hostname command - -
-
- Host/Hypervisor System Requirements - The host is where the cloud services run in the form of guest virtual machines. Each host is one machine that meets the following requirements: - - Must support HVM (Intel-VT or AMD-V enabled). - 64-bit x86 CPU (more cores results in better performance) - Hardware virtualization support required - 4 GB of memory - 36 GB of local disk - At least 1 NIC - If DHCP is used for hosts, ensure that no conflict occurs between DHCP server used for these hosts and the DHCP router created by &PRODUCT;. - Latest hotfixes applied to hypervisor software - When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running - All hosts within a cluster must be homogeneous. The CPUs must be of the same type, count, and feature flags. - - Hosts have additional requirements depending on the hypervisor. See the requirements listed at the top of the Installation section for your chosen hypervisor: - - Be sure you fulfill the additional hypervisor requirements and installation steps provided in this Guide. Hypervisor hosts must be properly prepared to work with CloudStack. For example, the requirements for XenServer are listed under Citrix XenServer Installation. - - - - - - - - -
-
diff --git a/docs/en-US/modify-delete-service-offerings.xml b/docs/en-US/modify-delete-service-offerings.xml deleted file mode 100644 index b917af48252..00000000000 --- a/docs/en-US/modify-delete-service-offerings.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Modifying or Deleting a Service Offering - Service offerings cannot be changed once created. This applies to both compute offerings and disk offerings. - A service offering can be deleted. If it is no longer in use, it is deleted immediately and permanently. If the service offering is still in use, it will remain in the database until all the virtual machines referencing it have been deleted. After deletion by the administrator, a service offering will not be available to end users that are creating new instances. -
diff --git a/docs/en-US/multi_node_management_server.xml b/docs/en-US/multi_node_management_server.xml deleted file mode 100644 index 1ff713dbd16..00000000000 --- a/docs/en-US/multi_node_management_server.xml +++ /dev/null @@ -1,36 +0,0 @@ - -%BOOK_ENTITIES; -]> - - -
- Multi-Node Management Server - The &PRODUCT; Management Server is deployed on one or more front-end servers connected to a single MySQL database. Optionally a pair of hardware load balancers distributes requests from the web. A backup management server set may be deployed using MySQL replication at a remote site to add DR capabilities. - - - - - Multi-Node Management Server - - The administrator must decide the following. - - Whether or not load balancers will be used. - How many Management Servers will be deployed. - Whether MySQL replication will be deployed to enable disaster recovery. - -
diff --git a/docs/en-US/multi_node_overview.xml b/docs/en-US/multi_node_overview.xml deleted file mode 100644 index 1eee0377ba9..00000000000 --- a/docs/en-US/multi_node_overview.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Management Server Multi-Node Installation Overview - - This section describes installing multiple Management Servers and installing MySQL on a node separate from the Management Servers. The machines must meet the system requirements described in System Requirements. - - For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server. - - The procedure for a multi-node installation is: - - - Prepare the Operating System - Install the First Management Server - Install and Configure the Database - Prepare NFS Shares - Prepare and Start Additional Management Servers - Prepare the System VM Template - -
- diff --git a/docs/en-US/multi_site_deployment.xml b/docs/en-US/multi_site_deployment.xml deleted file mode 100644 index 8ad94aa2a70..00000000000 --- a/docs/en-US/multi_site_deployment.xml +++ /dev/null @@ -1,50 +0,0 @@ - -%BOOK_ENTITIES; -]> - - -
- Multi-Site Deployment - The &PRODUCT; platform scales well into multiple sites through the use of zones. The following diagram shows an example of a multi-site deployment. - - - - - Example Of A Multi-Site Deployment - - Data Center 1 houses the primary Management Server as well as zone 1. The MySQL database is replicated in real time to the secondary Management Server installation in Data Center 2. - - - - - Separate Storage Network - - This diagram illustrates a setup with a separate storage network. Each server has four NICs, two connected to pod-level network switches and two connected to storage network switches. - There are two ways to configure the storage network: - - Bonded NIC and redundant switches can be deployed for NFS. In NFS deployments, redundant switches and bonded NICs still result in one network (one CIDR block+ default gateway address). - iSCSI can take advantage of two separate storage networks (two CIDR blocks each with its own default gateway). Multipath iSCSI client can failover and load balance between separate storage networks. - - - - - - NIC Bonding And Multipath I/O - - This diagram illustrates the differences between NIC bonding and Multipath I/O (MPIO). NIC bonding configuration involves only one network. MPIO involves two separate networks. -
diff --git a/docs/en-US/multiple-ip-nic.xml b/docs/en-US/multiple-ip-nic.xml deleted file mode 100644 index 344dc8df16f..00000000000 --- a/docs/en-US/multiple-ip-nic.xml +++ /dev/null @@ -1,95 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring Multiple IP Addresses on a Single NIC - &PRODUCT; provides you the ability to associate multiple private IP addresses per guest VM - NIC. In addition to the primary IP, you can assign additional IPs to the guest VM NIC. This - feature is supported on all the network configurations—Basic, Advanced, and VPC. Security - Groups, Static NAT and Port forwarding services are supported on these additional IPs. - As always, you can specify an IP from the guest subnet; if not specified, an IP is - automatically picked up from the guest VM subnet. You can view the IPs associated with for each - guest VM NICs on the UI. You can apply NAT on these additional guest IPs by using network - configuration option in the &PRODUCT; UI. You must specify the NIC to which the IP should be - associated. - This feature is supported on XenServer, KVM, and VMware hypervisors. Note that Basic zone - security groups are not supported on VMware. -
- Use Cases - Some of the use cases are described below: - - - Network devices, such as firewalls and load balancers, generally work best when they - have access to multiple IP addresses on the network interface. - - - Moving private IP addresses between interfaces or instances. Applications that are - bound to specific IP addresses can be moved between instances. - - - Hosting multiple SSL Websites on a single instance. You can install multiple SSL - certificates on a single instance, each associated with a distinct IP address. - - -
-
- Guidelines - To prevent IP conflict, configure different subnets when multiple networks are connected - to the same VM. -
-
- Assigning Additional IPs to a VM - - - Log in to the &PRODUCT; UI. - - - In the left navigation bar, click Instances. - - - Click the name of the instance you want to work with. - - - In the Details tab, click NICs. - - - Click View Secondary IPs. - - - Click Acquire New Secondary IP, and click Yes in the confirmation dialog. - You need to configure the IP on the guest VM NIC manually. &PRODUCT; will not - automatically configure the acquired IP address on the VM. Ensure that the IP address - configuration persist on VM reboot. - Within a few moments, the new IP address should appear with the state Allocated. You - can now use the IP address in Port Forwarding or StaticNAT rules. - - -
-
- Port Forwarding and StaticNAT Services Changes - Because multiple IPs can be associated per NIC, you are allowed to select a desired IP for - the Port Forwarding and StaticNAT services. The default is the primary IP. To enable this - functionality, an extra optional parameter 'vmguestip' is added to the Port forwarding and - StaticNAT APIs (enableStaticNat, createIpForwardingRule) to indicate on what IP address NAT - need to be configured. If vmguestip is passed, NAT is configured on the specified private IP - of the VM. if not passed, NAT is configured on the primary IP of the VM. -
-
diff --git a/docs/en-US/multiple-ip-range.xml b/docs/en-US/multiple-ip-range.xml deleted file mode 100644 index 42e0c2a9555..00000000000 --- a/docs/en-US/multiple-ip-range.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Multiple IP Ranges - - The feature can only be implemented on IPv4 addresses. - - &PRODUCT; provides you with the flexibility to add guest IP ranges from different subnets in - Basic zones and security groups-enabled Advanced zones. For security groups-enabled Advanced - zones, it implies multiple subnets can be added to the same VLAN. With the addition of this - feature, you will be able to add IP address ranges from the same subnet or from a different one - when IP address are exhausted. This would in turn allows you to employ higher number of subnets - and thus reduce the address management overhead. To support this feature, the capability of - createVlanIpRange API is extended to add IP ranges also from a different - subnet. - Ensure that you manually configure the gateway of the new subnet before adding the IP range. - Note that &PRODUCT; supports only one gateway for a subnet; overlapping subnets are not - currently supported. - Use the deleteVlanRange API to delete IP ranges. This operation fails if an IP - from the remove range is in use. If the remove range contains the IP address on which the DHCP - server is running, &PRODUCT; acquires a new IP from the same subnet. If no IP is available in - the subnet, the remove operation fails. - This feature is supported on KVM, xenServer, and VMware hypervisors. -
diff --git a/docs/en-US/multiple-system-vm-vmware.xml b/docs/en-US/multiple-system-vm-vmware.xml deleted file mode 100644 index 014dfa1f329..00000000000 --- a/docs/en-US/multiple-system-vm-vmware.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Multiple System VM Support for VMware - Every &PRODUCT; zone has single System VM for template processing tasks such as downloading templates, uploading templates, and uploading ISOs. In a zone where VMware is being used, additional System VMs can be launched to process VMware-specific tasks such as taking snapshots and creating private templates. The &PRODUCT; management server launches additional System VMs for VMware-specific tasks as the load increases. The management server monitors and weights all commands sent to these System VMs and performs dynamic load balancing and scaling-up of more System VMs. -
diff --git a/docs/en-US/network-offering-usage-record-format.xml b/docs/en-US/network-offering-usage-record-format.xml deleted file mode 100644 index a1b0da96221..00000000000 --- a/docs/en-US/network-offering-usage-record-format.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Network Offering Usage Record Format - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – ID of the network offering - usagetype – A number representing the usage type (see Usage Types) - offeringid – Network offering ID - virtualMachineId – The ID of the virtual machine - virtualMachineId – The ID of the virtual machine - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/network-offerings.xml b/docs/en-US/network-offerings.xml deleted file mode 100644 index 8c685bfc903..00000000000 --- a/docs/en-US/network-offerings.xml +++ /dev/null @@ -1,87 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Network Offerings - - For the most up-to-date list of supported network services, see the &PRODUCT; UI or call - listNetworkServices. - - A network offering is a named set of network services, such as: - - - DHCP - - - DNS - - - Source NAT - - - Static NAT - - - Port Forwarding - - - Load Balancing - - - Firewall - - - VPN - - - (Optional) Name one of several available providers to use for a given service, such as - Juniper for the firewall - - - (Optional) Network tag to specify which physical network to use - - - When creating a new VM, the user chooses one of the available network offerings, and that - determines which network services the VM can use. - The &PRODUCT; administrator can create any number of custom network offerings, in addition - to the default network offerings provided by &PRODUCT;. By creating multiple custom network - offerings, you can set up your cloud to offer different classes of service on a single - multi-tenant physical network. For example, while the underlying physical wiring may be the same - for two tenants, tenant A may only need simple firewall protection for their website, while - tenant B may be running a web server farm and require a scalable firewall solution, load - balancing solution, and alternate networks for accessing the database backend. - - If you create load balancing rules while using a network service offering that includes an - external load balancer device such as NetScaler, and later change the network service offering - to one that uses the &PRODUCT; virtual router, you must create a firewall rule on the virtual - router for each of your existing load balancing rules so that they continue to - function. - - When creating a new virtual network, the &PRODUCT; administrator chooses which network - offering to enable for that network. Each virtual network is associated with one network - offering. A virtual network can be upgraded or downgraded by changing its associated network - offering. If you do this, be sure to reprogram the physical network to match. - &PRODUCT; also has internal network offerings for use by &PRODUCT; system VMs. These network - offerings are not visible to users but can be modified by administrators. - -
diff --git a/docs/en-US/network-rate.xml b/docs/en-US/network-rate.xml deleted file mode 100644 index 56fe25c04a5..00000000000 --- a/docs/en-US/network-rate.xml +++ /dev/null @@ -1,144 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Network Throttling - Network throttling is the process of controlling the network access and bandwidth usage - based on certain rules. &PRODUCT; controls this behaviour of the guest networks in the cloud by - using the network rate parameter. This parameter is defined as the default data transfer rate in - Mbps (Megabits Per Second) allowed in a guest network. It defines the upper limits for network - utilization. If the current utilization is below the allowed upper limits, access is granted, - else revoked. - You can throttle the network bandwidth either to control the usage above a certain limit for - some accounts, or to control network congestion in a large cloud environment. The network rate - for your cloud can be configured on the following: - - - Network Offering - - - Service Offering - - - Global parameter - - - If network rate is set to NULL in service offering, the value provided in the - vm.network.throttling.rate global parameter is applied. If the value is set to NULL for network - offering, the value provided in the network.throttling.rate global parameter is - considered. - For the default public, storage, and management networks, network rate is set to 0. This - implies that the public, storage, and management networks will have unlimited bandwidth by - default. For default guest networks, network rate is set to NULL. In this case, network rate is - defaulted to the global parameter value. - The following table gives you an overview of how network rate is applied on different types - of networks in &PRODUCT;. - - - - - - - Networks - Network Rate Is Taken from - - - - - Guest network of Virtual Router - Guest Network Offering - - - Public network of Virtual Router - Guest Network Offering - - - Storage network of Secondary Storage VM - System Network Offering - - - Management network of Secondary Storage VM - System Network Offering - - - Storage network of Console Proxy VM - System Network Offering - - - Management network of Console Proxy VM - System Network Offering - - - Storage network of Virtual Router - System Network Offering - - - Management network of Virtual Router - System Network Offering - - - Public network of Secondary Storage VM - System Network Offering - - - Public network of Console Proxy VM - System Network Offering - - - Default network of a guest VM - Compute Offering - - - Additional networks of a guest VM - Corresponding Network Offerings - - - - - A guest VM must have a default network, and can also have many additional networks. - Depending on various parameters, such as the host and virtual switch used, you can observe a - difference in the network rate in your cloud. For example, on a VMware host the actual network - rate varies based on where they are configured (compute offering, network offering, or both); - the network type (shared or isolated); and traffic direction (ingress or egress). - The network rate set for a network offering used by a particular network in &PRODUCT; is - used for the traffic shaping policy of a port group, for example: port group A, for that - network: a particular subnet or VLAN on the actual network. The virtual routers for that network - connects to the port group A, and by default instances in that network connects to this port - group. However, if an instance is deployed with a compute offering with the network rate set, - and if this rate is used for the traffic shaping policy of another port group for the network, - for example port group B, then instances using this compute offering are connected to the port - group B, instead of connecting to port group A. - The traffic shaping policy on standard port groups in VMware only applies to the egress - traffic, and the net effect depends on the type of network used in &PRODUCT;. In shared - networks, ingress traffic is unlimited for &PRODUCT;, and egress traffic is limited to the rate - that applies to the port group used by the instance if any. If the compute offering has a - network rate configured, this rate applies to the egress traffic, otherwise the network rate set - for the network offering applies. For isolated networks, the network rate set for the network - offering, if any, effectively applies to the ingress traffic. This is mainly because the network - rate set for the network offering applies to the egress traffic from the virtual router to the - instance. The egress traffic is limited by the rate that applies to the port group used by the - instance if any, similar to shared networks. - For example: - Network rate of network offering = 10 Mbps - Network rate of compute offering = 200 Mbps - In shared networks, ingress traffic will not be limited for &PRODUCT;, while egress traffic - will be limited to 200 Mbps. In an isolated network, ingress traffic will be limited to 10 Mbps - and egress to 200 Mbps. -
diff --git a/docs/en-US/network-service-providers.xml b/docs/en-US/network-service-providers.xml deleted file mode 100644 index 32f36ae3d47..00000000000 --- a/docs/en-US/network-service-providers.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Network Service Providers - - For the most up-to-date list of supported network service providers, see the &PRODUCT; UI - or call listNetworkServiceProviders. - - A service provider (also called a network element) is hardware or virtual appliance that - makes a network service possible; for example, a firewall appliance can be installed in the - cloud to provide firewall service. On a single network, multiple providers can provide the same - network service. For example, a firewall service may be provided by Cisco or Juniper devices in - the same physical network. - You can have multiple instances of the same service provider in a network (say, more than - one Juniper SRX device). - If different providers are set up to provide the same service on the network, the - administrator can create network offerings so users can specify which network service provider - they prefer (along with the other choices offered in network offerings). Otherwise, &PRODUCT; - will choose which provider to use whenever the service is called for. - - Supported Network Service Providers - &PRODUCT; ships with an internal list of the supported service providers, and you can - choose from this list when creating a network offering. - - - - - - - - - - - - - - Virtual Router - Citrix NetScaler - Juniper SRX - F5 BigIP - Host based (KVM/Xen) - Cisco VNMC - - - - - Remote Access VPN - Yes - No - No - No - No - No - - - DNS/DHCP/User Data - Yes - No - No - No - No - No - - - Firewall - Yes - No - Yes - No - No - Yes - - - Load Balancing - Yes - Yes - No - Yes - No - No - - - Elastic IP - No - Yes - No - No - No - No - - - Elastic LB - No - Yes - No - No - No - No - - - Source NAT - Yes - No - Yes - No - No - Yes - - - Static NAT - Yes - Yes - Yes - No - No - Yes - - - Port Forwarding - Yes - No - Yes - No - No - Yes - - - - -
diff --git a/docs/en-US/network-setup.xml b/docs/en-US/network-setup.xml deleted file mode 100644 index ceee190d4ca..00000000000 --- a/docs/en-US/network-setup.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Network Setup - Achieving the correct networking setup is crucial to a successful &PRODUCT; - installation. This section contains information to help you make decisions and follow the right - procedures to get your network set up correctly. - - - - - - - - - - diff --git a/docs/en-US/network-usage-record-format.xml b/docs/en-US/network-usage-record-format.xml deleted file mode 100644 index 34b8f2d4955..00000000000 --- a/docs/en-US/network-usage-record-format.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Network Usage Record Format - For network usage (bytes sent/received), the following fields exist in a usage record. - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – Device ID (virtual router ID or external device ID) - type – Device type (domain router, external load balancer, etc.) - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/networking-in-a-pod.xml b/docs/en-US/networking-in-a-pod.xml deleted file mode 100644 index 5a569bf4d1f..00000000000 --- a/docs/en-US/networking-in-a-pod.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Networking in a Pod - The figure below illustrates network setup within a single pod. The hosts are connected to a - pod-level switch. At a minimum, the hosts should have one physical uplink to each switch. - Bonded NICs are supported as well. The pod-level switch is a pair of redundant gigabit - switches with 10 G uplinks. - - - - - - networksinglepod.png: diagram showing logical view of network in a pod - - - Servers are connected as follows: - - Storage devices are connected to only the network that carries management traffic. - Hosts are connected to networks for both management traffic and public traffic. - Hosts are also connected to one or more networks carrying guest traffic. - - We recommend the use of multiple physical Ethernet cards to implement each network interface as well as redundant switch fabric in order to maximize throughput and improve reliability. - -
diff --git a/docs/en-US/networking-in-a-zone.xml b/docs/en-US/networking-in-a-zone.xml deleted file mode 100644 index e50efbac9ab..00000000000 --- a/docs/en-US/networking-in-a-zone.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Networking in a Zone - The following figure illustrates the network setup within a single zone. - - - - - - networksetupzone.png: Depicts network setup in a single zone - - - A firewall for management traffic operates in the NAT mode. The network typically is assigned IP addresses in the 192.168.0.0/16 Class B private address space. Each pod is assigned IP addresses in the 192.168.*.0/24 Class C private address space. - Each zone has its own set of public IP addresses. Public IP addresses from different zones do not overlap. - -
diff --git a/docs/en-US/networking-overview.xml b/docs/en-US/networking-overview.xml deleted file mode 100644 index a71fe95a864..00000000000 --- a/docs/en-US/networking-overview.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Networking Overview - &PRODUCT; offers two types of networking scenario: - - - Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). - Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks. - - For more details, see Network Setup. -
- diff --git a/docs/en-US/networking_overview.xml b/docs/en-US/networking_overview.xml deleted file mode 100644 index a5f27c31402..00000000000 --- a/docs/en-US/networking_overview.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Networking Overview - - CloudStack offers two types of networking scenario: - - - Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). - Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks. - - For more details, see Network Setup. -
- diff --git a/docs/en-US/networks-for-users-overview.xml b/docs/en-US/networks-for-users-overview.xml deleted file mode 100644 index 19602c48b2a..00000000000 --- a/docs/en-US/networks-for-users-overview.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Overview of Setting Up Networking for Users - People using cloud infrastructure have a variety of needs and preferences when it comes to the networking services provided by the cloud. As a &PRODUCT; administrator, you can do the following things to set up networking for your users: - - Set up physical networks in zones - Set up several different providers for the same service on a single physical network (for example, both Cisco and Juniper firewalls) - Bundle different types of network services into network offerings, so users can choose the desired network services for any given virtual machine - Add new network offerings as time goes on so end users can upgrade to a better class of service on their network - Provide more ways for a network to be accessed by a user, such as through a project of which the user is a member - -
diff --git a/docs/en-US/networks.xml b/docs/en-US/networks.xml deleted file mode 100644 index b28f985a147..00000000000 --- a/docs/en-US/networks.xml +++ /dev/null @@ -1,58 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Managing Networks and Traffic - In a &PRODUCT;, guest VMs can communicate with each other using shared infrastructure with - the security and user perception that the guests have a private LAN. The &PRODUCT; virtual - router is the main component providing networking features for guest traffic. - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/docs/en-US/nfs-shares-on-management-server.xml b/docs/en-US/nfs-shares-on-management-server.xml deleted file mode 100644 index 881ca8d7600..00000000000 --- a/docs/en-US/nfs-shares-on-management-server.xml +++ /dev/null @@ -1,117 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using the Management Server as the NFS Server - This section tells how to set up NFS shares for primary and secondary storage on the same node with the Management Server. This is more typical of a trial installation, but is technically possible in a larger deployment. It is assumed that you will have less than 16TB of storage on the host. - The exact commands for the following steps may vary depending on your operating system version. - - On RHEL/CentOS systems, you'll need to install the nfs-utils package: - -$ sudo yum install nfs-utils - - - On the Management Server host, create two directories that you will use for primary and secondary storage. For example: - -# mkdir -p /export/primary -# mkdir -p /export/secondary - - - To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example: - # vi /etc/exports - Insert the following line. - /export *(rw,async,no_root_squash) - - Export the /export directory. - # exportfs -a - - Edit the /etc/sysconfig/nfs file. - # vi /etc/sysconfig/nfs - Uncomment the following lines: - -LOCKD_TCPPORT=32803 -LOCKD_UDPPORT=32769 -MOUNTD_PORT=892 -RQUOTAD_PORT=875 -STATD_PORT=662 -STATD_OUTGOING_PORT=2020 - - - Edit the /etc/sysconfig/iptables file. - # vi /etc/sysconfig/iptables - Add the following lines at the beginning of the INPUT chain where <NETWORK> is the network that you'll be using: - --A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 111 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 111 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 2049 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 32803 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 32769 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 892 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 892 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 875 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 875 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 662 -j ACCEPT --A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 662 -j ACCEPT - - - Run the following commands: - -# service iptables restart -# service iptables save - - - If NFS v4 communication is used between client and server, add your domain to /etc/idmapd.conf on both the hypervisor host and Management Server. - # vi /etc/idmapd.conf - Remove the character # from the beginning of the Domain line in idmapd.conf and replace the value in the file with your own domain. In the example below, the domain is company.com. - Domain = company.com - - Reboot the Management Server host. - Two NFS shares called /export/primary and /export/secondary are now set up. - - It is recommended that you test to be sure the previous steps have been successful. - - Log in to the hypervisor host. - Be sure NFS and rpcbind are running. The commands might be different depending on your OS. For example: - -# service rpcbind start -# service nfs start -# chkconfig nfs on -# chkconfig rpcbind on -# reboot - - - Log back in to the hypervisor host and try to mount the /export directories. For example (substitute your own management server name): - -# mkdir /primarymount -# mount -t nfs <management-server-name>:/export/primary /primarymount -# umount /primarymount -# mkdir /secondarymount -# mount -t nfs <management-server-name>:/export/secondary /secondarymount -# umount /secondarymount - - - - - -
diff --git a/docs/en-US/nfs-shares-on-separate-server.xml b/docs/en-US/nfs-shares-on-separate-server.xml deleted file mode 100644 index 947106dcd4f..00000000000 --- a/docs/en-US/nfs-shares-on-separate-server.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using a Separate NFS Server - This section tells how to set up NFS shares for secondary and (optionally) primary storage on an NFS server running on a separate node from the Management Server. - The exact commands for the following steps may vary depending on your operating system version. - (KVM only) Ensure that no volume is already mounted at your NFS mount point. - - On the storage server, create an NFS share for secondary storage and, if you are using NFS for primary storage as well, create a second NFS share. For example: - -# mkdir -p /export/primary -# mkdir -p /export/secondary - - - To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example: - # vi /etc/exports - Insert the following line. - /export *(rw,async,no_root_squash) - - Export the /export directory. - # exportfs -a - - On the management server, create a mount point for secondary storage. For example: - # mkdir -p /mnt/secondary - - Mount the secondary storage on your Management Server. Replace the example NFS server name and NFS share paths below with your own. - # mount -t nfs nfsservername:/nfs/share/secondary /mnt/secondary - - -
diff --git a/docs/en-US/non-contiguous-vlan.xml b/docs/en-US/non-contiguous-vlan.xml deleted file mode 100644 index 193b91697c3..00000000000 --- a/docs/en-US/non-contiguous-vlan.xml +++ /dev/null @@ -1,67 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Adding Non Contiguous VLAN Ranges - &PRODUCT; provides you with the flexibility to add non contiguous VLAN ranges to your - network. The administrator can either update an existing VLAN range or add multiple non - contiguous VLAN ranges while creating a zone. You can also use the UpdatephysicalNetwork API to - extend the VLAN range. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - Ensure that the VLAN range does not already exist. - - - In the left navigation, choose Infrastructure. - - - On Zones, click View More, then click the zone to which you want to work with. - - - Click Physical Network. - - - In the Guest node of the diagram, click Configure. - - - Click Edit - - - - - edit-icon.png: button to edit the VLAN range. - - - The VLAN Ranges field now is editable. - - - Specify the start and end of the VLAN range in comma-separated list. - Specify all the VLANs you want to use, VLANs not specified will be removed if you are - adding new ranges to the existing list. - - - Click Apply. - - -
diff --git a/docs/en-US/offerings.xml b/docs/en-US/offerings.xml deleted file mode 100644 index c880a9c4810..00000000000 --- a/docs/en-US/offerings.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Service Offerings - In this chapter we discuss compute, disk, and system service offerings. Network offerings - are discussed in the section on setting up networking for users. - - - - - diff --git a/docs/en-US/ongoing-config-of-external-firewalls-lb.xml b/docs/en-US/ongoing-config-of-external-firewalls-lb.xml deleted file mode 100644 index f5864da2b2d..00000000000 --- a/docs/en-US/ongoing-config-of-external-firewalls-lb.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Ongoing Configuration of External Firewalls and Load Balancers - Additional user actions (e.g. setting a port forward) will cause further programming of the - firewall and load balancer. A user may request additional public IP addresses and forward - traffic received at these IPs to specific VMs. This is accomplished by enabling static NAT for a - public IP address, assigning the IP to a VM, and specifying a set of protocols and port ranges - to open. When a static NAT rule is created, &PRODUCT; programs the zone's external firewall with - the following objects: - - - A static NAT rule that maps the public IP address to the private IP address of a - VM. - - - A security policy that allows traffic within the set of protocols and port ranges that - are specified. - - - A firewall filter counter that measures the number of bytes of incoming traffic to the - public IP. - - - The number of incoming and outgoing bytes through source NAT, static NAT, and load balancing - rules is measured and saved on each external element. This data is collected on a regular basis - and stored in the &PRODUCT; database. -
diff --git a/docs/en-US/over-provisioning-service-offering-limits.xml b/docs/en-US/over-provisioning-service-offering-limits.xml deleted file mode 100644 index 5a403a30536..00000000000 --- a/docs/en-US/over-provisioning-service-offering-limits.xml +++ /dev/null @@ -1,161 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Over-Provisioning and Service Offering Limits - (Supported for XenServer, KVM, and VMware) - CPU and memory (RAM) over-provisioning factors can be set for each cluster to change the - number of VMs that can run on each host in the cluster. This helps optimize the use of - resources. By increasing the over-provisioning ratio, more resource capacity will be used. If - the ratio is set to 1, no over-provisioning is done. - The administrator can also set global default over-provisioning ratios - in the cpu.overprovisioning.factor and mem.overprovisioning.factor global configuration variables. - The default value of these variables is 1: over-provisioning is turned off by default. - - Over-provisioning ratios are dynamically substituted in &PRODUCT;'s capacity - calculations. For example: - Capacity = 2 GB - Over-provisioning factor = 2 - Capacity after over-provisioning = 4 GB - With this configuration, suppose you deploy 3 VMs of 1 GB each: - Used = 3 GB - Free = 1 GB - The administrator can specify a memory over-provisioning ratio, and can specify both CPU and - memory over-provisioning ratios on a per-cluster basis. - In any given cloud, the optimum number of VMs for each host is affected by such things as - the hypervisor, storage, and hardware configuration. These may be different for each cluster in - the same cloud. A single global over-provisioning setting can not provide the best utilization - for all the different clusters in the cloud. It has to be set for the lowest common denominator. - The per-cluster setting provides a finer granularity for better utilization of resources, no - matter where the &PRODUCT; placement algorithm decides to place a VM. - The overprovisioning settings can be used along with dedicated resources (assigning a - specific cluster to an account) to effectively offer different levels of service to - different accounts. For example, an account paying for a more expensive level of service - could be assigned to a dedicated cluster with an over-provisioning ratio of 1, and a - lower-paying account to a cluster with a ratio of 2. - When a new host is added to a cluster, &PRODUCT; will assume the host has the - capability to perform the CPU and RAM over-provisioning which is configured for that - cluster. It is up to the administrator to be sure the host is actually suitable for the - level of over-provisioning which has been set. -
- Limitations on Over-Provisioning in XenServer and KVM - - In XenServer, due to a constraint of this hypervisor, you can not use an - over-provisioning factor greater than 4. - The KVM hypervisor can not manage memory allocation to VMs dynamically. - &PRODUCT; sets the minimum and maximum amount of memory that a VM can use. - The hypervisor adjusts the memory within the set limits based on the memory contention. - -
-
- Requirements for Over-Provisioning - Several prerequisites are required in order for over-provisioning to function - properly. The feature is dependent on the OS type, hypervisor capabilities, and certain - scripts. It is the administrator's responsibility to ensure that these requirements are - met. -
- Balloon Driver - All VMs should have a balloon driver installed in them. The hypervisor - communicates with the balloon driver to free up and make the memory available to a - VM. - - XenServer - The balloon driver can be found as a part of xen pv or PVHVM drivers. The xen - pvhvm drivers are included in upstream linux kernels 2.6.36+. - - - VMware - The balloon driver can be found as a part of the VMware tools. All the VMs that - are deployed in a over-provisioned cluster should have the VMware tools - installed. - - - KVM - All VMs are required to support the virtio drivers. These drivers are installed - in all Linux kernel versions 2.6.25 and greater. The administrator must set - CONFIG_VIRTIO_BALLOON=y in the virtio configuration. - -
-
- Hypervisor capabilities - The hypervisor must be capable of using the memory ballooning. - - XenServer - The DMC (Dynamic Memory Control) capability of the hypervisor should be enabled. - Only XenServer Advanced and above versions have this feature. - - - VMware, KVM - Memory ballooning is supported by default. - -
-
-
- Setting Over-Provisioning Ratios - There are two ways the root admin can set CPU and RAM over-provisioning ratios. First, the - global configuration settings cpu.overprovisioning.factor and mem.overprovisioning.factor will - be applied when a new cluster is created. Later, the ratios can be modified for an existing - cluster. - Only VMs deployed after the change are affected by the new setting. - If you want VMs deployed before the change to adopt the new over-provisioning ratio, - you must stop and restart the VMs. - When this is done, &PRODUCT; recalculates or scales the used and - reserved capacities based on the new over-provisioning ratios, - to ensure that &PRODUCT; is correctly tracking the amount of free capacity. - It is safer not to deploy additional new VMs while the capacity recalculation is underway, in - case the new values for available capacity are not high enough to accommodate the new VMs. - Just wait for the new used/available values to become available, to be sure there is room - for all the new VMs you want. - To change the over-provisioning ratios for an existing cluster: - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation bar, click Infrastructure. - - - Under Clusters, click View All. - - - Select the cluster you want to work with, and click the Edit button. - - - Fill in your desired over-provisioning multipliers in the fields CPU overcommit - ratio and RAM overcommit ratio. The value which is intially shown in these - fields is the default value inherited from the global configuration settings. - - - In XenServer, due to a constraint of this hypervisor, you can not use an - over-provisioning factor greater than 4. - - - -
-
- Service Offering Limits and Over-Provisioning - Service offering limits (e.g. 1 GHz, 1 core) are strictly enforced for core count. For example, a guest with a service offering of one core will have only one core available to it regardless of other activity on the Host. - Service offering limits for gigahertz are enforced only in the presence of contention for CPU resources. For example, suppose that a guest was created with a service offering of 1 GHz on a Host that has 2 GHz cores, and that guest is the only guest running on the Host. The guest will have the full 2 GHz available to it. When multiple guests are attempting to use the CPU a weighting factor is used to schedule CPU resources. The weight is based on the clock speed in the service offering. Guests receive a CPU allocation that is proportionate to the GHz in the service offering. For example, a guest created from a 2 GHz service offering will receive twice the CPU allocation as a guest created from a 1 GHz service offering. &PRODUCT; does not perform memory over-provisioning. -
-
\ No newline at end of file diff --git a/docs/en-US/ovm-install.xml b/docs/en-US/ovm-install.xml deleted file mode 100644 index fa4a86b0776..00000000000 --- a/docs/en-US/ovm-install.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Installing OVM for &PRODUCT; - TODO - -
diff --git a/docs/en-US/ovm-requirements.xml b/docs/en-US/ovm-requirements.xml deleted file mode 100644 index 70a8920a8ac..00000000000 --- a/docs/en-US/ovm-requirements.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- System Requirements for OVM - TODO -
diff --git a/docs/en-US/password-storage-engine.xml b/docs/en-US/password-storage-engine.xml deleted file mode 100644 index 8bbc96fcac2..00000000000 --- a/docs/en-US/password-storage-engine.xml +++ /dev/null @@ -1,74 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Changing the Default Password Encryption - Passwords are encoded when creating or updating users. &PRODUCT; allows you to determine the - default encoding and authentication mechanism for admin and user logins. Two new configurable - lists have been introduced—userPasswordEncoders and userAuthenticators. - userPasswordEncoders allows you to configure the order of preference for encoding passwords, - whereas userAuthenticators allows you to configure the order in which authentication schemes are - invoked to validate user passwords. - Additionally, the plain text user authenticator has been modified not to convert supplied - passwords to their md5 sums before checking them with the database entries. It performs a simple - string comparison between retrieved and supplied login passwords instead of comparing the - retrieved md5 hash of the stored password against the supplied md5 hash of the password because - clients no longer hash the password. The following method determines what encoding scheme is - used to encode the password supplied during user creation or modification. - When a new user is created, the user password is encoded by using the first valid encoder - loaded as per the sequence specified in the UserPasswordEncoders property in the - ComponentContext.xml or nonossComponentContext.xml - files. The order of authentication schemes is determined by the UserAuthenticators - property in the same files. If Non-OSS components, such as VMware environments, are to be - deployed, modify the UserPasswordEncoders and UserAuthenticators lists - in the nonossComponentContext.xml file, for OSS environments, such as - XenServer or KVM, modify the ComponentContext.xml file. It is recommended - to make uniform changes across both the files. When a new authenticator or encoder is added, you - can add them to this list. While doing so, ensure that the new authenticator or encoder is - specified as a bean in both these files. The administrator can change the ordering of both these - properties as preferred to change the order of schemes. Modify the following list properties - available in client/tomcatconf/nonossComponentContext.xml.in or - client/tomcatconf/componentContext.xml.in as applicable, to the desired - order: - <property name="UserAuthenticators"> - <list> - <ref bean="SHA256SaltedUserAuthenticator"/> - <ref bean="MD5UserAuthenticator"/> - <ref bean="LDAPUserAuthenticator"/> - <ref bean="PlainTextUserAuthenticator"/> - </list> - </property> - <property name="UserPasswordEncoders"> - <list> - <ref bean="SHA256SaltedUserAuthenticator"/> - <ref bean="MD5UserAuthenticator"/> - <ref bean="LDAPUserAuthenticator"/> - <ref bean="PlainTextUserAuthenticator"/> - </list> - In the above default ordering, SHA256Salt is used first for - UserPasswordEncoders. If the module is found and encoding returns a valid value, - the encoded password is stored in the user table's password column. If it fails for any reason, - the MD5UserAuthenticator will be tried next, and the order continues. For - UserAuthenticators, SHA256Salt authentication is tried first. If it succeeds, the - user is logged into the Management server. If it fails, md5 is tried next, and attempts - continues until any of them succeeds and the user logs in . If none of them works, the user is - returned an invalid credential message. -
diff --git a/docs/en-US/per-domain-limits.xml b/docs/en-US/per-domain-limits.xml deleted file mode 100644 index c20e84d4a58..00000000000 --- a/docs/en-US/per-domain-limits.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Per-Domain Limits - &PRODUCT; allows the configuration of limits on a domain basis. With a domain limit in place, all users still have their account limits. They are additionally limited, as a group, to not exceed the resource limits set on their domain. Domain limits aggregate the usage of all accounts in the domain as well as all accounts in all subdomains of that domain. Limits set at the root domain level apply to the sum of resource usage by the accounts in all domains and sub-domains below that root domain. - To set a domain limit: - - Log in to the &PRODUCT; UI. - In the left navigation tree, click Domains. - Select the domain you want to modify. The current domain limits are displayed. A value of -1 shows that there is no limit in place. - Click the Edit button - - - - editbutton.png: edits the settings. - - -
diff --git a/docs/en-US/performance-monitoring.xml b/docs/en-US/performance-monitoring.xml deleted file mode 100644 index 70efbf783df..00000000000 --- a/docs/en-US/performance-monitoring.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Performance Monitoring - Host and guest performance monitoring is available to end users and administrators. This allows the user to monitor their utilization of resources and determine when it is appropriate to choose a more powerful service offering or larger disk. -
- diff --git a/docs/en-US/persistent-network.xml b/docs/en-US/persistent-network.xml deleted file mode 100644 index 1ccc99c59a6..00000000000 --- a/docs/en-US/persistent-network.xml +++ /dev/null @@ -1,100 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Persistent Networks - The network that you can provision without having to deploy any VMs on it is called a - persistent network. A persistent network can be part of a VPC or a non-VPC environment. - When you create other types of network, a network is only a database entry until the first - VM is created on that network. When the first VM is created, a VLAN ID is assigned and the - network is provisioned. Also, when the last VM is destroyed, the VLAN ID is released and the - network is no longer available. With the addition of persistent network, you will have the - ability to create a network in &PRODUCT; in which physical devices can be deployed without - having to run any VMs. Additionally, you can deploy physical devices on that network. - One of the advantages of having a persistent network is that you can create a VPC with a tier - consisting of only physical devices. For example, you might create a VPC for a three-tier - application, deploy VMs for Web and Application tier, and use physical machines for the - Database tier. Another use case is that if you are providing services by using physical - hardware, you can define the network as persistent and therefore even if all its VMs are - destroyed the services will not be discontinued. -
- Persistent Network Considerations - - - Persistent network is designed for isolated networks. - - - All default network offerings are non-persistent. - - - A network offering cannot be editable because changing it affects the behavior of the - existing networks that were created using this network offering. - - - When you create a guest network, the network offering that you select defines the - network persistence. This in turn depends on whether persistent network is enabled in the - selected network offering. - - - An existing network can be made persistent by changing its network offering to an - offering that has the Persistent option enabled. While setting this property, even if the - network has no running VMs, the network is provisioned. - - - An existing network can be made non-persistent by changing its network offering to an - offering that has the Persistent option disabled. If the network has no running VMs, - during the next network garbage collection run the network is shut down. - - - When the last VM on a network is destroyed, the network garbage collector checks if - the network offering associated with the network is persistent, and shuts down the network - only if it is non-persistent. - - -
-
- Creating a Persistent Guest Network - To create a persistent network, perform the following: - - - Create a network offering with the Persistent option enabled. - See . - See the Administration Guide. - - - Select Network from the left navigation pane. - - - Select the guest network that you want to offer this network service to. - - - Click the Edit button. - - - From the Network Offering drop-down, select the persistent network offering you have - just created. - - - Click OK. - - -
-
diff --git a/docs/en-US/physical-network-configuration-settings.xml b/docs/en-US/physical-network-configuration-settings.xml deleted file mode 100644 index 4ab18b01d30..00000000000 --- a/docs/en-US/physical-network-configuration-settings.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Configurable Characteristics of Physical Networks - &PRODUCT; provides configuration settings you can use to set up a physical network in a zone, including: - - What type of network traffic it carries (guest, public, management, storage) - VLANs - Unique name that the hypervisor can use to find that particular network - Enabled or disabled. When a network is first set up, it is disabled – not in use yet. The administrator sets the physical network to enabled, and it begins to be used. The administrator can later disable the network again, which prevents any new virtual networks from being created on that physical network; the existing network traffic continues even though the state is disabled. - Speed - Tags, so network offerings can be matched to physical networks - Isolation method - -
diff --git a/docs/en-US/plugin-development.xml b/docs/en-US/plugin-development.xml deleted file mode 100644 index 0492877eba4..00000000000 --- a/docs/en-US/plugin-development.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Plugin Development - - diff --git a/docs/en-US/plugin-midonet-about.xml b/docs/en-US/plugin-midonet-about.xml deleted file mode 100644 index dd9b3ad08e0..00000000000 --- a/docs/en-US/plugin-midonet-about.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - The MidoNet Plugin - - - diff --git a/docs/en-US/plugin-midonet-features.xml b/docs/en-US/plugin-midonet-features.xml deleted file mode 100644 index f242d63d0ee..00000000000 --- a/docs/en-US/plugin-midonet-features.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Features of the MidoNet Plugin - - - - In &PRODUCT; 4.2.0 only the KVM hypervisor is supported for use in combination with MidoNet. - - - - In &PRODUCT; release 4.2.0 this plugin supports several services in the Advanced Isolated network mode. - - - - When tenants create new isolated layer 3 networks, instead of spinning up extra Virtual Router VMs, the relevant L3 elements (routers etc) are created in the MidoNet virtual topology by making the appropriate calls to the MidoNet API. Instead of using VLANs, isolation is provided by MidoNet. - - - - Aside from the above service (Connectivity), several extra features are supported in the 4.2.0 release: - - - - DHCP - Firewall (ingress) - Source NAT - Static NAT - Port Forwarding - - - - The plugin has been tested with MidoNet version 12.12. (Caddo). - - - - -
diff --git a/docs/en-US/plugin-midonet-introduction.xml b/docs/en-US/plugin-midonet-introduction.xml deleted file mode 100644 index 7793ecbc884..00000000000 --- a/docs/en-US/plugin-midonet-introduction.xml +++ /dev/null @@ -1,26 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Introduction to the MidoNet Plugin - The MidoNet plugin allows &PRODUCT; to use the MidoNet virtualized networking solution as a provider for &PRODUCT; networks and services. For more information on MidoNet and how it works, see http://www.midokura.com/midonet/. -
diff --git a/docs/en-US/plugin-midonet-preparations.xml b/docs/en-US/plugin-midonet-preparations.xml deleted file mode 100644 index cf78774ec2b..00000000000 --- a/docs/en-US/plugin-midonet-preparations.xml +++ /dev/null @@ -1,90 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Prerequisites - - In order to use the MidoNet plugin, the compute hosts must be running the MidoNet Agent, and the MidoNet API server must be available. Please consult the MidoNet User Guide for more information. The following section describes the &PRODUCT; side setup. - - - - &PRODUCT; needs to have at least one physical network with the isolation method set to "MIDO". This network should be enabled for the Guest and Public traffic types. - - - - Next, we need to set the following &PRODUCT; settings under "Global Settings" in the UI: - -&PRODUCT; settings - - - - Setting Name - Description - Example - - - - - midonet.apiserver.address - Specify the address at which the Midonet API server can be contacted - http://192.168.1.144:8081/midolmanj-mgmt - - - midonet.providerrouter.id - Specifies the UUID of the Midonet provider router - d7c5e6a3-e2f4-426b-b728-b7ce6a0448e5 - - - -
-
- - - - We also want MidoNet to take care of public traffic, so in componentContext.xml we need to replace this line: - - ]]> - - - With this: - - ]]> - - - -
- - - - On the compute host, MidoNet takes advantage of per-traffic type VIF driver support in &PRODUCT; KVM. - - - In agent.properties, we set the following to make MidoNet take care of Guest and Public traffic: - -libvirt.vif.driver.Guest=com.cloud.network.resource.MidoNetVifDriver -libvirt.vif.driver.Public=com.cloud.network.resource.MidoNetVifDriver - - This is explained further in MidoNet User Guide. - - - -
diff --git a/docs/en-US/plugin-midonet-provider.xml b/docs/en-US/plugin-midonet-provider.xml deleted file mode 100644 index 904828caecd..00000000000 --- a/docs/en-US/plugin-midonet-provider.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Enabling the MidoNet service provider via the API - - To enable via the API, use the following API calls: - addNetworkServiceProvider - - name = "MidoNet" - physicalnetworkid = <the uuid of the physical network> - - updateNetworkServiceProvider - - id = <the provider uuid returned by the previous call> - state = "Enabled" - - - -
\ No newline at end of file diff --git a/docs/en-US/plugin-midonet-revisions.xml b/docs/en-US/plugin-midonet-revisions.xml deleted file mode 100644 index 73def2325b5..00000000000 --- a/docs/en-US/plugin-midonet-revisions.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Revision History - - - - 0-0 - Wed Mar 13 2013 - - Dave - Cahill - dcahill@midokura.com - - - - Documentation created for 4.2.0 version of the MidoNet Plugin - - - - - - diff --git a/docs/en-US/plugin-midonet-ui.xml b/docs/en-US/plugin-midonet-ui.xml deleted file mode 100644 index 8ee9850e5a7..00000000000 --- a/docs/en-US/plugin-midonet-ui.xml +++ /dev/null @@ -1,65 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Enabling the MidoNet service provider via the UI - To allow &PRODUCT; to use the MidoNet Plugin the network service provider needs to be enabled on the physical network. - - - - The steps to enable via the UI are as follows: - - - In the left navbar, click Infrastructure - - - - In Zones, click View All - - - - Click the name of the Zone on which you are setting up MidoNet - - - - Click the Physical Network tab - - - - Click the Name of the Network on which you are setting up MidoNet - - - - Click Configure on the Network Service Providers box - - - - Click on the name MidoNet - - - - Click the Enable Provider button in the Network tab - - - - -
diff --git a/docs/en-US/plugin-midonet-usage.xml b/docs/en-US/plugin-midonet-usage.xml deleted file mode 100644 index a314581dcda..00000000000 --- a/docs/en-US/plugin-midonet-usage.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - Using the MidoNet Plugin - - - - - diff --git a/docs/en-US/plugin-niciranvp-about.xml b/docs/en-US/plugin-niciranvp-about.xml deleted file mode 100644 index cfab83c73c3..00000000000 --- a/docs/en-US/plugin-niciranvp-about.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - The Nicira NVP Plugin - - - - diff --git a/docs/en-US/plugin-niciranvp-devicemanagement.xml b/docs/en-US/plugin-niciranvp-devicemanagement.xml deleted file mode 100644 index 761c39f3179..00000000000 --- a/docs/en-US/plugin-niciranvp-devicemanagement.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Device Management - In &PRODUCT; a Nicira NVP setup is considered a "device" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network. Press the "Add NVP Controller" button on the provider panel and enter the configuration details. - - - - - - nvp-physical-network-stt.png: a screenshot of the device configuration popup. - - - -
diff --git a/docs/en-US/plugin-niciranvp-features.xml b/docs/en-US/plugin-niciranvp-features.xml deleted file mode 100644 index e439f1b4923..00000000000 --- a/docs/en-US/plugin-niciranvp-features.xml +++ /dev/null @@ -1,84 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Features of the Nicira NVP Plugin - The following table lists the CloudStack network services provided by the Nicira NVP Plugin. - - Supported Services - - - - Network Service - CloudStack version - NVP version - - - - - Virtual Networking - >= 4.0 - >= 2.2.1 - - - Source NAT - >= 4.1 - >= 3.0.1 - - - Static NAT - >= 4.1 - >= 3.0.1 - - - Port Forwarding - >= 4.1 - >= 3.0.1 - - - -
- The Virtual Networking service was originally called 'Connectivity' in CloudStack 4.0 - The following hypervisors are supported by the Nicira NVP Plugin. - - Supported Hypervisors - - - - Hypervisor - CloudStack version - - - - - XenServer - >= 4.0 - - - KVM - >= 4.1 - - - -
- Please refer to the Nicira NVP configuration guide on how to prepare the hypervisors for Nicira NVP integration. -
diff --git a/docs/en-US/plugin-niciranvp-introduction.xml b/docs/en-US/plugin-niciranvp-introduction.xml deleted file mode 100644 index a06f12317e5..00000000000 --- a/docs/en-US/plugin-niciranvp-introduction.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Introduction to the Nicira NVP Plugin - The Nicira NVP plugin adds Nicira NVP as one of the available SDN implementations in - CloudStack. With the plugin an exisiting Nicira NVP setup can be used by CloudStack to - implement isolated guest networks and to provide additional services like routing and - NAT. -
diff --git a/docs/en-US/plugin-niciranvp-networkofferings.xml b/docs/en-US/plugin-niciranvp-networkofferings.xml deleted file mode 100644 index b30437e97ba..00000000000 --- a/docs/en-US/plugin-niciranvp-networkofferings.xml +++ /dev/null @@ -1,131 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Network Offerings - Using the Nicira NVP plugin requires a network offering with Virtual Networking enabled and configured to use the NiciraNvp element. Typical use cases combine services from the Virtual Router appliance and the Nicira NVP plugin. - - Isolated network offering with regular services from the Virtual Router. - - - - Service - Provider - - - - - VPN - VirtualRouter - - - DHCP - VirtualRouter - - - DNS - VirtualRouter - - - Firewall - VirtualRouter - - - Load Balancer - VirtualRouter - - - User Data - VirtualRouter - - - Source NAT - VirtualRouter - - - Static NAT - VirtualRouter - - - Post Forwarding - VirtualRouter - - - Virtual Networking - NiciraNVP - - - -
- - - - - - nvp-physical-network-stt.png: a screenshot of a network offering. - - - The tag in the network offering should be set to the name of the physical network with the NVP provider. - Isolated network with network services. The virtual router is still required to provide network services like dns and dhcp. - - Isolated network offering with network services - - - - Service - Provider - - - - - DHCP - VirtualRouter - - - DNS - VirtualRouter - - - User Data - VirtualRouter - - - Source NAT - NiciraNVP - - - Static NAT - NiciraNVP - - - Post Forwarding - NiciraNVP - - - Virtual Networking - NiciraNVP - - - -
- -
diff --git a/docs/en-US/plugin-niciranvp-physicalnet.xml b/docs/en-US/plugin-niciranvp-physicalnet.xml deleted file mode 100644 index d3202905fb1..00000000000 --- a/docs/en-US/plugin-niciranvp-physicalnet.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Zone Configuration - &PRODUCT; needs to have at least one physical network with the isolation method set to "STT". This network should be enabled for the Guest traffic type. - The Guest traffic type should be configured with the traffic label that matches the name of - the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details - on how to set this up in XenServer or KVM. - - - - - - nvp-physical-network-stt.png: a screenshot of a physical network with the STT isolation type - - -
diff --git a/docs/en-US/plugin-niciranvp-preparations.xml b/docs/en-US/plugin-niciranvp-preparations.xml deleted file mode 100644 index 60725591fda..00000000000 --- a/docs/en-US/plugin-niciranvp-preparations.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Prerequisites - Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that. - Make sure you have the following information ready: - - The IP address of the NVP Controller - The username to access the API - The password to access the API - The UUID of the Transport Zone that contains the hypervisors in this Zone - - The UUID of the Gateway Service used to provide router and NAT services. - - - The gateway service uuid is optional and is used for Layer 3 services only (SourceNat, StaticNat and PortForwarding) -
diff --git a/docs/en-US/plugin-niciranvp-provider.xml b/docs/en-US/plugin-niciranvp-provider.xml deleted file mode 100644 index 8694478b483..00000000000 --- a/docs/en-US/plugin-niciranvp-provider.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Enabling the service provider - The Nicira NVP provider is disabled by default. Navigate to the "Network Service Providers" configuration of the physical network with the STT isolation type. Navigate to the Nicira NVP provider and press the "Enable Provider" button. - CloudStack 4.0 does not have the UI interface to configure the Nicira NVP plugin. Configuration needs to be done using the API directly. - - - - - - nvp-physical-network-stt.png: a screenshot of an enabled Nicira NVP provider - - - -
\ No newline at end of file diff --git a/docs/en-US/plugin-niciranvp-revisions.xml b/docs/en-US/plugin-niciranvp-revisions.xml deleted file mode 100644 index b58d3336aba..00000000000 --- a/docs/en-US/plugin-niciranvp-revisions.xml +++ /dev/null @@ -1,59 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Revision History - - - - 0-0 - Wed Oct 03 2012 - - Hugo - Trippaers - hugo@apache.org - - - - Documentation created for 4.0.0-incubating version of the NVP Plugin - - - - - 1-0 - Wed May 22 2013 - - Hugo - Trippaers - hugo@apache.org - - - - Documentation updated for &PRODUCT; 4.1.0 - - - - - - diff --git a/docs/en-US/plugin-niciranvp-tables.xml b/docs/en-US/plugin-niciranvp-tables.xml deleted file mode 100644 index 615f3494c09..00000000000 --- a/docs/en-US/plugin-niciranvp-tables.xml +++ /dev/null @@ -1,106 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Database tables - The following tables are added to the cloud database for the Nicira NVP Plugin - - nicira_nvp_nic_map - - - - id - auto incrementing id - - - logicalswitch - uuid of the logical switch this port is connected to - - - logicalswitchport - uuid of the logical switch port for this nic - - - nic - the &PRODUCT; uuid for this nic, reference to the nics table - - - -
- - - external_nicira_nvp_devices - - - - id - auto incrementing id - - - uuid - UUID identifying this device - - - physical_network_id - the physical network this device is configured on - - - provider_name - NiciraNVP - - - device_name - display name for this device - - - host_id - reference to the host table with the device configuration - - - -
- - - nicira_nvp_router_map - - - - id - auto incrementing id - - - logicalrouter_uuid - uuid of the logical router - - - network_id - id of the network this router is linked to - - - -
- - - nicira_nvp_router_map is only available in &PRODUCT; 4.1 and above - - -
\ No newline at end of file diff --git a/docs/en-US/plugin-niciranvp-troubleshooting.xml b/docs/en-US/plugin-niciranvp-troubleshooting.xml deleted file mode 100644 index 02b06555914..00000000000 --- a/docs/en-US/plugin-niciranvp-troubleshooting.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - Troubleshooting the Nicira NVP Plugin - - - - diff --git a/docs/en-US/plugin-niciranvp-ui.xml b/docs/en-US/plugin-niciranvp-ui.xml deleted file mode 100644 index 8b1bbad8395..00000000000 --- a/docs/en-US/plugin-niciranvp-ui.xml +++ /dev/null @@ -1,26 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Configuring the Nicira NVP plugin from the UI - In CloudStack 4.1.0-incubating the Nicira NVP plugin and its resources can be configured in the infrastructure tab of the UI. Navigate to the physical network with STT isolation and configure the network elements. The NiciraNvp is listed here. -
diff --git a/docs/en-US/plugin-niciranvp-usage.xml b/docs/en-US/plugin-niciranvp-usage.xml deleted file mode 100644 index 9f04c382bd6..00000000000 --- a/docs/en-US/plugin-niciranvp-usage.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - Configuring the Nicira NVP Plugin - - - - - - - diff --git a/docs/en-US/plugin-niciranvp-uuidreferences.xml b/docs/en-US/plugin-niciranvp-uuidreferences.xml deleted file mode 100644 index cb5f1cae834..00000000000 --- a/docs/en-US/plugin-niciranvp-uuidreferences.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- UUID References - The plugin maintains several references in the &PRODUCT; database to items created on the NVP Controller. - Every guest network that is created will have its broadcast type set to Lswitch and if the network is in state "Implemented", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller. - The Nics that are connected to one of the Logical Switches will have their Logical Switch Port UUID listed in the nicira_nvp_nic_map table - All devices created on the NVP Controller will have a tag set to domain-account of the owner of the network, this string can be used to search for items in the NVP Controller. - -
diff --git a/docs/en-US/plugin-niciranvp-vpc.xml b/docs/en-US/plugin-niciranvp-vpc.xml deleted file mode 100644 index a43c5fa85d3..00000000000 --- a/docs/en-US/plugin-niciranvp-vpc.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - - - Using the Nicira NVP plugin with VPC - - - - - - diff --git a/docs/en-US/plugin-niciranvp-vpcfeatures.xml b/docs/en-US/plugin-niciranvp-vpcfeatures.xml deleted file mode 100644 index a8d8194e9ba..00000000000 --- a/docs/en-US/plugin-niciranvp-vpcfeatures.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- Supported VPC features - The Nicira NVP plugin supports &PRODUCT; VPC to a certain extent. Starting with &PRODUCT; version 4.1 VPCs can be deployed using NVP isolated networks. - It is not possible to use a Nicira NVP Logical Router for as a VPC Router - It is not possible to connect a private gateway using a Nicira NVP Logical Switch -
diff --git a/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml b/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml deleted file mode 100644 index 141006ee350..00000000000 --- a/docs/en-US/plugin-niciranvp-vpcnetworkoffering.xml +++ /dev/null @@ -1,81 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- VPC Network Offerings - The VPC needs specific network offerings with the VPC flag enabled. Otherwise these network offerings are identical to regular network offerings. To allow VPC networks with a Nicira NVP isolated network the offerings need to support the Virtual Networking service with the NiciraNVP provider. - In a typical configuration two network offerings need to be created. One with the loadbalancing service enabled and one without loadbalancing. - - VPC Network Offering with Loadbalancing - - - - Service - Provider - - - - - VPN - VpcVirtualRouter - - - DHCP - VpcVirtualRouter - - - DNS - VpcVirtualRouter - - - Load Balancer - VpcVirtualRouter - - - User Data - VpcVirtualRouter - - - Source NAT - VpcVirtualRouter - - - Static NAT - VpcVirtualRouter - - - Post Forwarding - VpcVirtualRouter - - - NetworkACL - VpcVirtualRouter - - - Virtual Networking - NiciraNVP - - - -
- -
diff --git a/docs/en-US/plugin-niciranvp-vpcoffering.xml b/docs/en-US/plugin-niciranvp-vpcoffering.xml deleted file mode 100644 index 292621e516c..00000000000 --- a/docs/en-US/plugin-niciranvp-vpcoffering.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; - -%xinclude; -]> - -
- VPC Offering with Nicira NVP - To allow a VPC to use the Nicira NVP plugin to provision networks, a new VPC offering needs to be created which allows the Virtual Networking service to be implemented by NiciraNVP. - This is not currently possible with the UI. The API does provide the proper calls to create a VPC offering with Virtual Networking enabled. However due to a limitation in the 4.1 API it is not possible to select the provider for this network service. To configure the VPC offering with the NiciraNVP provider edit the database table 'vpc_offering_service_map' and change the provider to NiciraNvp for the service 'Connectivity' - It is also possible to update the default VPC offering by adding a row to the - 'vpc_offering_service_map' with service 'Connectivity' and provider 'NiciraNvp' - - - - - - nvp-physical-network-stt.png: a screenshot of the mysql table. - - - When creating a new VPC offering please note that the UI does not allow you to select a VPC offering yet. The VPC needs to be created using the API with the offering UUID. -
diff --git a/docs/en-US/pod-add.xml b/docs/en-US/pod-add.xml deleted file mode 100644 index 2a2b08753a9..00000000000 --- a/docs/en-US/pod-add.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding a Pod - When you created a new zone, &PRODUCT; adds the first pod for you. You can add more pods at any time using the procedure in this section. - - Log in to the &PRODUCT; UI. See . - In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone to which you want to add a pod. - Click the Compute and Storage tab. In the Pods node of the diagram, click View All. - Click Add Pod. - Enter the following details in the dialog. - - Name. The name of the pod. - Gateway. The gateway for the hosts in that pod. - Netmask. The network prefix that defines the pod's subnet. Use CIDR notation. - Start/End Reserved System IP. The IP range in the management network that &PRODUCT; uses to manage various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP. For more information, see System Reserved IP Addresses. - - - Click OK. - -
diff --git a/docs/en-US/port-forwarding.xml b/docs/en-US/port-forwarding.xml deleted file mode 100644 index 1bbba45e3b8..00000000000 --- a/docs/en-US/port-forwarding.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Port Forwarding - A port forward service is a set of port forwarding rules that define a policy. A port forward service is then applied to one or more guest VMs. The guest VM then has its inbound network access managed according to the policy defined by the port forwarding service. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses to be forwarded. - A guest VM can be in any number of port forward services. Port forward services can be defined but have no members. If a guest VM is part of more than one network, port forwarding rules will function only if they are defined on the default network - You cannot use port forwarding to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See Security Groups. - To set up port forwarding: - - Log in to the &PRODUCT; UI as an administrator or end user. - If you have not already done so, add a public IP address range to a zone in &PRODUCT;. See Adding a Zone and Pod in the Installation Guide. - Add one or more VM instances to &PRODUCT;. - In the left navigation bar, click Network. - Click the name of the guest network where the VMs are running. - - Choose an existing IP address or acquire a new IP address. See . Click the name of the IP address in the list. - Click the Configuration tab. - In the Port Forwarding node of the diagram, click View All. - Fill in the following: - - Public Port. The port to which public traffic will be - addressed on the IP address you acquired in the previous step. - Private Port. The port on which the instance is listening for - forwarded public traffic. - Protocol. The communication protocol in use between the two - ports - - Click Add. - -
diff --git a/docs/en-US/portable-ip.xml b/docs/en-US/portable-ip.xml deleted file mode 100644 index f9ae395de20..00000000000 --- a/docs/en-US/portable-ip.xml +++ /dev/null @@ -1,145 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Portable IPs -
- About Portable IP - Portable IPs in &PRODUCT; are region-level pool of IPs, which are elastic in nature, that - can be transferred across geographically separated zones. As an administrator, you can - provision a pool of portable public IPs at region level and are available for user - consumption. The users can acquire portable IPs if admin has provisioned portable IPs at the - region level they are part of. These IPs can be use for any service within an advanced zone. - You can also use portable IPs for EIP services in basic zones. - The salient features of Portable IP are as follows: - - IP is statically allocated - - - IP need not be associated with a network - - - IP association is transferable across networks - - - IP is transferable across both Basic and Advanced zones - - - IP is transferable across VPC, non-VPC isolated and shared networks - - - Portable IP transfer is available only for static NAT. - - - - Guidelines - Before transferring to another network, ensure that no network rules (Firewall, Static - NAT, Port Forwarding, and so on) exist on that portable IP. - -
-
- Configuring Portable IPs - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, click Regions. - - - Choose the Regions that you want to work with. - - - Click View Portable IP. - - - Click Portable IP Range. - The Add Portable IP Range window is displayed. - - - Specify the following: - - - Start IP/ End IP: A range of IP addresses that - are accessible from the Internet and will be allocated to guest VMs. Enter the first - and last IP addresses that define a range that &PRODUCT; can assign to guest - VMs. - - - Gateway: The gateway in use for the Portable IP - addresses you are configuring. - - - Netmask: The netmask associated with the Portable - IP range. - - - VLAN: The VLAN that will be used for public - traffic. - - - - - Click OK. - - -
-
- Acquiring a Portable IP - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click the name of the network where you want to work with. - - - Click View IP Addresses. - - - Click Acquire New IP. - The Acquire New IP window is displayed. - - - Specify whether you want cross-zone IP or not. - - - Click Yes in the confirmation dialog. - Within a few moments, the new IP address should appear with the state Allocated. You - can now use the IP address in port forwarding or static NAT rules. - - -
-
- Transferring Portable IP - An IP can be transferred from one network to another only if Static NAT is enabled. - However, when a portable IP is associated with a network, you can use it for any service in - the network. - To transfer a portable IP across the networks, execute the following API: - http://localhost:8096/client/api?command=enableStaticNat&response=json&ipaddressid=a4bc37b2-4b4e-461d-9a62-b66414618e36&virtualmachineid=a242c476-ef37-441e-9c7b-b303e2a9cb4f&networkid=6e7cd8d1-d1ba-4c35-bdaf-333354cbd49810 - Replace the UUID with appropriate UUID. For example, if you want to transfer a portable IP - to network X and VM Y in a network, execute the following: - http://localhost:8096/client/api?command=enableStaticNat&response=json&ipaddressid=a4bc37b2-4b4e-461d-9a62-b66414618e36&virtualmachineid=Y&networkid=X - -
-
diff --git a/docs/en-US/prepare-linux-template.xml b/docs/en-US/prepare-linux-template.xml deleted file mode 100755 index 84c2cdebf90..00000000000 --- a/docs/en-US/prepare-linux-template.xml +++ /dev/null @@ -1,190 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- System preparation for Linux - The following steps will prepare a basic Linux installation for templating. - - - - Installation - It is good practice to name your VM something generic during installation, this will ensure components such as LVM do not appear unique to a machine. It is recommended that the name of "localhost" is used for installation. - For CentOS, it is necessary to take unique identification out of the interface configuration file, for this edit /etc/sysconfig/network-scripts/ifcfg-eth0 and change the content to the following. - - DEVICE=eth0 - TYPE=Ethernet - BOOTPROTO=dhcp - ONBOOT=yes - - - The next steps updates the packages on the Template Master. - - - Ubuntu - - sudo -i - apt-get update - apt-get upgrade -y - apt-get install -y acpid ntp - reboot - - - - CentOS - - ifup eth0 - yum update -y - reboot - - - - - - Password management - If preferred, custom users (such as ones created during the Ubuntu installation) should be removed. First ensure the root user account is enabled by giving it a password and then login as root to continue. - - sudo passwd root - logout - - As root, remove any custom user accounts created during the installation process. - - deluser myuser --remove-home - - - See for instructions to setup the password management script, this will allow &PRODUCT; to change your root password from the web interface. - - - Hostname Management - CentOS configures the hostname by default on boot. Unfortunately Ubuntu does not have this functionality, for Ubuntu installations use the following steps. - - - Ubuntu - The hostname of a Templated VM is set by a custom script in /etc/dhcp/dhclient-exit-hooks.d, this script first checks if the current hostname is localhost, if true, it will get the host-name, domain-name and fixed-ip from the DHCP lease file and use those values to set the hostname and append the /etc/hosts file for local hostname resolution. Once this script, or a user has changed the hostname from localhost, it will no longer adjust system files regardless of it's new hostname. The script also recreates openssh-server keys, which should have been deleted before templating (shown below). Save the following script to /etc/dhcp/dhclient-exit-hooks.d/sethostname, and adjust the permissions. - - - #!/bin/sh - # dhclient change hostname script for Ubuntu - oldhostname=$(hostname -s) - if [ $oldhostname = 'localhost' ] - then - sleep 10 # Wait for configuration to be written to disk - hostname=$(cat /var/lib/dhcp/dhclient.eth0.leases | awk ' /host-name/ { host = $3 } END { printf host } ' | sed 's/[";]//g' ) - fqdn="$hostname.$(cat /var/lib/dhcp/dhclient.eth0.leases | awk ' /domain-name/ { domain = $3 } END { printf domain } ' | sed 's/[";]//g')" - ip=$(cat /var/lib/dhcp/dhclient.eth0.leases | awk ' /fixed-address/ { lease = $2 } END { printf lease } ' | sed 's/[";]//g') - echo "cloudstack-hostname: Hostname _localhost_ detected. Changing hostname and adding hosts." - echo " Hostname: $hostname \n FQDN: $fqdn \n IP: $ip" - # Update /etc/hosts - awk -v i="$ip" -v f="$fqdn" -v h="$hostname" "/^127/{x=1} !/^127/ && x { x=0; print i,f,h; } { print $0; }" /etc/hosts > /etc/hosts.dhcp.tmp - mv /etc/hosts /etc/hosts.dhcp.bak - mv /etc/hosts.dhcp.tmp /etc/hosts - # Rename Host - echo $hostname > /etc/hostname - hostname $hostname - # Recreate SSH2 - dpkg-reconfig openssh-server - fi - ### End of Script ### - - chmod 774 /etc/dhcp/dhclient-exit-hooks.d/sethostname - - - - - The following steps should be run when you are ready to template your Template Master. If the Template Master is rebooted during these steps you will have to run all the steps again. At the end of this process the Template Master should be shutdown and the template created in order to create and deploy the final template. - - - Remove the udev persistent device rules - This step removes information unique to your Template Master such as network MAC addresses, lease files and CD block devices, the files are automatically generated on next boot. - - - Ubuntu - - rm -f /etc/udev/rules.d/70* - rm -f /var/lib/dhcp/dhclient.* - - - - CentOS - - rm -f /etc/udev/rules.d/70* - rm -f /var/lib/dhclient/* - - - - - - Remove SSH Keys - This step is to ensure all your Templated VMs do not have the same SSH keys, which would decrease the security of the machines dramatically. - - rm -f /etc/ssh/*key* - - - - Cleaning log files - It is good practice to remove old logs from the Template Master. - - cat /dev/null > /var/log/audit/audit.log 2>/dev/null - cat /dev/null > /var/log/wtmp 2>/dev/null - logrotate -f /etc/logrotate.conf 2>/dev/null - rm -f /var/log/*-* /var/log/*.gz 2>/dev/null - - - - Setting hostname - In order for the Ubuntu DHCP script to function and the CentOS dhclient to set the VM hostname they both require the Template Master's hostname to be "localhost", run the following commands to change the hostname. - - hostname localhost - echo "localhost" > /etc/hostname - - - - Set user password to expire - This step forces the user to change the password of the VM after the template has been deployed. - - passwd --expire root - - - - Clearing User History - The next step clears the bash commands you have just run. - - history -c - unset HISTFILE - - - - Shutdown the VM - Your now ready to shutdown your Template Master and create a template! - - halt -p - - - - Create the template! - You are now ready to create the template, for more information see . - - - Templated VMs for both Ubuntu and CentOS may require a reboot after provisioning in order to pickup the hostname. - - -
diff --git a/docs/en-US/prepare-system-vm-template.xml b/docs/en-US/prepare-system-vm-template.xml deleted file mode 100644 index 35cc7e979bc..00000000000 --- a/docs/en-US/prepare-system-vm-template.xml +++ /dev/null @@ -1,79 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Prepare the System VM Template - Secondary storage must be seeded with a template that is used for &PRODUCT; system VMs. - Citrix provides you with the necessary binary package of the system VM. - - When copying and pasting a command, ensure that the command has pasted as a single line - before executing. Some document viewers may introduce unwanted line breaks in copied - text. - - - - If you are using a separate NFS server, mount the secondary storage on your Management - Server. Replace the example NFS server name and NFS share paths below with your own. - # mount -t nfs nfsservername:/nfs/share/secondary /mnt/secondary - If your secondary storage mount point is not named /mnt/secondary, substitute your own - mount point name. - - - On the Management Server, run one or more of the following cloud-install-sys-tmplt - commands to retrieve and decompress the system VM template. Run the command for each - hypervisor type that you expect end users to run in this Zone. - If you set the &PRODUCT; database encryption type to "web" when you set up the database, - you must now add the parameter -s <management-server-secret-key>. See . - This process will require approximately 5 GB of free space on the local file system and - up to 30 minutes each time it runs. - - - For XenServer: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2 -h xenserver -s <optional-management-server-secret-key> -F - - - For vSphere: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.ova -h vmware -s <optional-management-server-secret-key> -F - - - For KVM: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -s <optional-management-server-secret-key> -F - - - For LXC: - # /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h lxc -s <optional-management-server-secret-key> -F - - - - - When the script has finished, unmount secondary storage and remove the created - directory. - # umount /mnt/secondary -# rmdir /mnt/secondary - - - Repeat these steps for each secondary storage server. - - -
diff --git a/docs/en-US/primary-storage-add.xml b/docs/en-US/primary-storage-add.xml deleted file mode 100644 index d18dece54d9..00000000000 --- a/docs/en-US/primary-storage-add.xml +++ /dev/null @@ -1,163 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Add Primary Storage -
- System Requirements for Primary Storage - Hardware requirements: - - Any standards-compliant iSCSI or NFS server that is supported by the underlying hypervisor. - The storage server should be a machine with a large number of disks. The disks should ideally be managed by a hardware RAID controller. - Minimum required capacity depends on your needs. - - When setting up primary storage, follow these restrictions: - - Primary storage cannot be added until a host has been added to the cluster. - If you do not provision shared primary storage, you must set the global configuration parameter system.vm.local.storage.required to true, or else you will not be able to start VMs. - -
-
- Adding Primary Storage - When you create a new zone, the first primary storage is added as part of that procedure. You can add primary storage servers at any time, such as when adding a new cluster or adding more servers to an existing cluster. - When using preallocated storage for primary storage, be sure there is nothing on the storage (ex. you have an empty SAN volume or an empty NFS share). Adding the storage to &PRODUCT; will destroy any existing data. - Primary storage can also be added at the zone level through the &PRODUCT; API (adding zone-level primary storage is not yet supported through the &PRODUCT; UI).Once primary storage has been added at the zone level, it can be managed through the &PRODUCT; UI. - - Log in to the &PRODUCT; UI (see ). - In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the primary storage. - Click the Compute tab. - In the Primary Storage node of the diagram, click View All. - Click Add Primary Storage. - - Provide the following information in the dialog. The information required varies depending on your choice in Protocol. - - Scope. Indicate whether the storage is available to all hosts in the zone or only to hosts in a single cluster. - Pod. (Visible only if you choose Cluster in the Scope field.) The pod for the storage device. - Cluster. (Visible only if you choose Cluster in the Scope field.) The cluster for the storage device. - Name. The name of the storage device. - Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS or SharedMountPoint. For vSphere choose either VMFS (iSCSI or FiberChannel) or NFS. - Server (for NFS, iSCSI, or PreSetup). The IP address or DNS name of the storage device. - Server (for VMFS). The IP address or DNS name of the vCenter server. - Path (for NFS). In NFS this is the exported path from the server. - Path (for VMFS). In vSphere this is a combination of the datacenter name and the datastore name. The format is "/" datacenter name "/" datastore name. For example, "/cloud.dc.VM/cluster1datastore". - Path (for SharedMountPoint). With KVM this is the path on each host that is where this primary storage is mounted. For example, "/mnt/primary". - SR Name-Label (for PreSetup). Enter the name-label of the SR that has been set up outside &PRODUCT;. - Target IQN (for iSCSI). In iSCSI this is the IQN of the target. For example, iqn.1986-03.com.sun:02:01ec9bb549-1271378984. - Lun # (for iSCSI). In iSCSI this is the LUN number. For example, 3. - Tags (optional). The comma-separated list of tags for this storage device. It should be an equivalent set or superset of the tags on your disk offerings.. - - The tag sets on primary storage across clusters in a Zone must be identical. For example, if cluster A provides primary storage that has tags T1 and T2, all other clusters in the Zone must also provide primary storage that has tags T1 and T2. - - Click OK. - -
-
- Configuring a Storage Plug-in - - Primary storage that is based on a custom plug-in (ex. SolidFire) must be added through the &PRODUCT; API (described later in this section). There is no support at this time through the &PRODUCT; UI to add this type of primary storage (although most of its features are available through the &PRODUCT; UI). - - - At this time, a custom storage plug-in, such as the SolidFire storage plug-in, can only be leveraged for data disks (through Disk Offerings). - - - The SolidFire storage plug-in for &PRODUCT; is part of the standard &PRODUCT; install. There is no additional work required to add this component. - - Adding primary storage that is based on the SolidFire plug-in enables &PRODUCT; to provide hard quality-of-service (QoS) guarantees. - When used with Disk Offerings, an administrator is able to build an environment in which a data disk that a user creates leads to the dynamic creation of a SolidFire volume, which has guaranteed performance. Such a SolidFire volume is associated with one (and only ever one) &PRODUCT; volume, so performance of the &PRODUCT; volume does not vary depending on how heavily other tenants are using the system. - The createStoragePool API has been augmented to support plugable storage providers. The following is a list of parameters to use when adding storage to &PRODUCT; that is based on the SolidFire plug-in: - - - command=createStoragePool - - - scope=zone - - - zoneId=[your zone id] - - - name=[name for primary storage] - - - hypervisor=Any - - - provider=SolidFire - - - capacityIops=[whole number of IOPS from the SAN to give to &PRODUCT;] - - - capacityBytes=[whole number of bytes from the SAN to give to &PRODUCT;] - - - The url parameter is somewhat unique in that its value can contain additional key/value pairs. - - url=[key/value pairs detailed below (values are URL encoded; for example, '=' is represented as '%3D')] - - MVIP%3D[Management Virtual IP Address] (can be suffixed with :[port number]) - - - SVIP%3D[Storage Virtual IP Address] (can be suffixed with :[port number]) - - - clusterAdminUsername%3D[cluster admin's username] - - - clusterAdminPassword%3D[cluster admin's password] - - - clusterDefaultMinIops%3D[Min IOPS (whole number) to set for a volume; used if Min IOPS is not specified by administrator or user] - - - clusterDefaultMaxIops%3D[Max IOPS (whole number) to set for a volume; used if Max IOPS is not specified by administrator or user] - - - clusterDefaultBurstIopsPercentOfMaxIops%3D[Burst IOPS is determined by (Min IOPS * clusterDefaultBurstIopsPercentOfMaxIops parameter) (can be a decimal value)] - - - - Example URL to add primary storage to &PRODUCT; based on the SolidFire plug-in (note that URL encoding is used with the value of the url key, so '%3A' equals ':','%3B' equals '&' and '%3D' equals '='): - - http://127.0.0.1:8080/client/api?command=createStoragePool - &scope=zone - &zoneId=cf4e6ddf-8ae7-4194-8270-d46733a52b55 - &name=SolidFire_121258566 - &url=MVIP%3D192.168.138.180%3A443 - %3BSVIP%3D192.168.56.7 - %3BclusterAdminUsername%3Dadmin - %3BclusterAdminPassword%3Dpassword - %3BclusterDefaultMinIops%3D200 - %3BclusterDefaultMaxIops%3D300 - %3BclusterDefaultBurstIopsPercentOfMaxIop%3D2.5 - &provider=SolidFire - &tags=SolidFire_SAN_1 - &capacityIops=4000000 - &capacityBytes=2251799813685248 - &hypervisor=Any - &response=json - &apiKey=VrrkiZQWFFgSdA6k3DYtoKLcrgQJjZXoSWzicHXt8rYd9Bl47p8L39p0p8vfDpiljtlcMLn_jatMSqCWv5Cs-Q&signature=wqf8KzcPpY2JmT1Sxk%2F%2BWbgX3l8%3D - -
-
diff --git a/docs/en-US/primary-storage-outage-and-data-loss.xml b/docs/en-US/primary-storage-outage-and-data-loss.xml deleted file mode 100644 index e68d1d98e1b..00000000000 --- a/docs/en-US/primary-storage-outage-and-data-loss.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Primary Storage Outage and Data Loss - When a primary storage outage occurs the hypervisor immediately stops all VMs stored on that storage device. Guests that are marked for HA will be restarted as soon as practical when the primary storage comes back on line. With NFS, the hypervisor may allow the virtual machines to continue running depending on the nature of the issue. For example, an NFS hang will cause the guest VMs to be suspended until storage connectivity is restored.Primary storage is not designed to be backed up. Individual volumes in primary storage can be backed up using snapshots. -
diff --git a/docs/en-US/primary-storage.xml b/docs/en-US/primary-storage.xml deleted file mode 100644 index 4ab37ef6f17..00000000000 --- a/docs/en-US/primary-storage.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Primary Storage - This section gives concepts and technical details about &PRODUCT; primary storage. For information about how to install and configure primary storage through the &PRODUCT; UI, see the Installation Guide. - - - - - - -
diff --git a/docs/en-US/private-public-template.xml b/docs/en-US/private-public-template.xml deleted file mode 100644 index 85565833f03..00000000000 --- a/docs/en-US/private-public-template.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Private and Public Templates - When a user creates a template, it can be designated private or public. - Private templates are only available to the user who created them. By default, an uploaded template is private. - When a user marks a template as “public,†the template becomes available to all users in all accounts in the user's domain, as well as users in any other domains that have access to the Zone where the template is stored. This depends on whether the Zone, in turn, was defined as private or public. A private Zone is assigned to a single domain, and a public Zone is accessible to any domain. If a public template is created in a private Zone, it is available only to users in the domain assigned to that Zone. If a public template is created in a public Zone, it is available to all users in all domains. -
diff --git a/docs/en-US/projects-overview.xml b/docs/en-US/projects-overview.xml deleted file mode 100644 index 4f9a833b5ed..00000000000 --- a/docs/en-US/projects-overview.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Overview of Projects - Projects are used to organize people and resources. &PRODUCT; users within a single domain can group themselves into project teams so they can collaborate and share virtual resources such as VMs, snapshots, templates, data disks, and IP addresses. &PRODUCT; tracks resource usage per project as well as per user, so the usage can be billed to either a user account or a project. For example, a private cloud within a software company might have all members of the QA department assigned to one project, so the company can track the resources used in testing while the project members can more easily isolate their efforts from other users of the same cloud - You can configure &PRODUCT; to allow any user to create a new project, or you can restrict that ability to just &PRODUCT; administrators. Once you have created a project, you become that project’s administrator, and you can add others within your domain to the project. &PRODUCT; can be set up either so that you can add people directly to a project, or so that you have to send an invitation which the recipient must accept. Project members can view and manage all virtual resources created by anyone in the project (for example, share VMs). A user can be a member of any number of projects and can switch views in the &PRODUCT; UI to show only project-related information, such as project VMs, fellow project members, project-related alerts, and so on. - The project administrator can pass on the role to another project member. The project administrator can also add more members, remove members from the project, set new resource limits (as long as they are below the global defaults set by the &PRODUCT; administrator), and delete the project. When the administrator removes a member from the project, resources created by that user, such as VM instances, remain with the project. This brings us to the subject of resource ownership and which resources can be used by a project. - Resources created within a project are owned by the project, not by any particular &PRODUCT; account, and they can be used only within the project. A user who belongs to one or more projects can still create resources outside of those projects, and those resources belong to the user’s account; they will not be counted against the project’s usage or resource limits. You can create project-level networks to isolate traffic within the project and provide network services such as port forwarding, load balancing, VPN, and static NAT. A project can also make use of certain types of resources from outside the project, if those resources are shared. For example, a shared network or public template is available to any project in the domain. A project can get access to a private template if the template’s owner will grant permission. A project can use any service offering or disk offering available in its domain; however, you can not create private service and disk offerings at the project level.. -
- diff --git a/docs/en-US/projects.xml b/docs/en-US/projects.xml deleted file mode 100644 index 39ce96bd3bc..00000000000 --- a/docs/en-US/projects.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Using Projects to Organize Users and Resources - - - - - - - - - diff --git a/docs/en-US/provisioning-auth-api.xml b/docs/en-US/provisioning-auth-api.xml deleted file mode 100644 index 0f28b1f3421..00000000000 --- a/docs/en-US/provisioning-auth-api.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Provisioning and Authentication API - &PRODUCT; expects that a customer will have their own user provisioning infrastructure. It provides APIs to integrate with these existing systems where the systems call out to &PRODUCT; to add/remove users.. - &PRODUCT; supports pluggable authenticators. By default, &PRODUCT; assumes it is provisioned with the user’s password, and as a result authentication is done locally. However, external authentication is possible as well. For example, see Using an LDAP Server for User Authentication. -
diff --git a/docs/en-US/provisioning-steps-overview.xml b/docs/en-US/provisioning-steps-overview.xml deleted file mode 100644 index 5fb61963b4b..00000000000 --- a/docs/en-US/provisioning-steps-overview.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Overview of Provisioning Steps - After the Management Server is installed and running, you can add the compute resources for it to manage. For an overview of how a &PRODUCT; cloud infrastructure is organized, see . - To provision the cloud infrastructure, or to scale it up at any time, follow these procedures: - - Define regions (optional). See . - Add a zone to the region. See . - Add more pods to the zone (optional). See . - Add more clusters to the pod (optional). See . - Add more hosts to the cluster (optional). See . - Add primary storage to the cluster. See . - Add secondary storage to the zone. See . - Initialize and test the new cloud. See . - - When you have finished these steps, you will have a deployment with the following basic structure: - - - - - provisioning-overview.png: Conceptual overview of a basic deployment - -
diff --git a/docs/en-US/provisioning-steps.xml b/docs/en-US/provisioning-steps.xml deleted file mode 100644 index 04ece13938e..00000000000 --- a/docs/en-US/provisioning-steps.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Steps to Provisioning Your Cloud Infrastructure - This section tells how to add regions, zones, pods, clusters, hosts, storage, and networks to your cloud. If you are unfamiliar with these entities, please begin by looking through . - - - - - - - - - - diff --git a/docs/en-US/pvlan.xml b/docs/en-US/pvlan.xml deleted file mode 100644 index 38b25319faf..00000000000 --- a/docs/en-US/pvlan.xml +++ /dev/null @@ -1,247 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Isolation in Advanced Zone Using Private VLAN - Isolation of guest traffic in shared networks can be achieved by using Private VLANs - (PVLAN). PVLANs provide Layer 2 isolation between ports within the same VLAN. In a PVLAN-enabled - shared network, a user VM cannot reach other user VM though they can reach the DHCP server and - gateway, this would in turn allow users to control traffic within a network and help them deploy - multiple applications without communication between application as well as prevent communication - with other users’ VMs. - - - Isolate VMs in a shared networks by using Private VLANs. - - - Supported on KVM, XenServer, and VMware hypervisors - - - PVLAN-enabled shared network can be a part of multiple networks of a guest VM. - - -
- About Private VLAN - In an Ethernet switch, a VLAN is a broadcast domain where hosts can establish direct - communication with each another at Layer 2. Private VLAN is designed as an extension of VLAN - standard to add further segmentation of the logical broadcast domain. A regular VLAN is a - single broadcast domain, whereas a private VLAN partitions a larger VLAN broadcast domain into - smaller sub-domains. A sub-domain is represented by a pair of VLANs: a Primary VLAN and a - Secondary VLAN. The original VLAN that is being divided into smaller groups is called Primary, - which implies that all VLAN pairs in a private VLAN share the same Primary VLAN. All the - secondary VLANs exist only inside the Primary. Each Secondary VLAN has a specific VLAN ID - associated to it, which differentiates one sub-domain from another. - Three types of ports exist in a private VLAN domain, which essentially determine the - behaviour of the participating hosts. Each ports will have its own unique set of rules, which - regulate a connected host's ability to communicate with other connected host within the same - private VLAN domain. Configure each host that is part of a PVLAN pair can be by using one of - these three port designation: - - - Promiscuous: A promiscuous port can communicate with - all the interfaces, including the community and isolated host ports that belong to the - secondary VLANs. In Promiscuous mode, hosts are connected to promiscuous ports and are - able to communicate directly with resources on both primary and secondary VLAN. Routers, - DHCP servers, and other trusted devices are typically attached to promiscuous - ports. - - - Isolated VLANs: The ports within an isolated VLAN - cannot communicate with each other at the layer-2 level. The hosts that are connected to - Isolated ports can directly communicate only with the Promiscuous resources. If your - customer device needs to have access only to a gateway router, attach it to an isolated - port. - - - Community VLANs: The ports within a community VLAN - can communicate with each other and with the promiscuous ports, but they cannot - communicate with the ports in other communities at the layer-2 level. In a Community mode, - direct communication is permitted only with the hosts in the same community and those that - are connected to the Primary PVLAN in promiscuous mode. If your customer has two devices - that need to be isolated from other customers' devices, but to be able to communicate - among themselves, deploy them in community ports. - - - For further reading: - - - Understanding Private VLANs - - - Cisco Systems' Private VLANs: Scalable - Security in a Multi-Client Environment - - - Private VLAN (PVLAN) on vNetwork Distributed Switch - - Concept Overview (1010691) - - -
-
- Prerequisites - - - Use a PVLAN supported switch. - See Private VLAN Catalyst Switch Support Matrixfor more information. - - - All the layer 2 switches, which are PVLAN-aware, are connected to each other, and one - of them is connected to a router. All the ports connected to the host would be configured - in trunk mode. Open Management VLAN, Primary VLAN (public) and Secondary Isolated VLAN - ports. Configure the switch port connected to the router in PVLAN promiscuous trunk mode, - which would translate an isolated VLAN to primary VLAN for the PVLAN-unaware router. - Note that only Cisco Catalyst 4500 has the PVLAN promiscuous trunk mode to connect - both normal VLAN and PVLAN to a PVLAN-unaware switch. For the other Catalyst PVLAN support - switch, connect the switch to upper switch by using cables, one each for a PVLAN - pair. - - - Configure private VLAN on your physical switches out-of-band. - - - Before you use PVLAN on XenServer and KVM, enable Open vSwitch (OVS). - - OVS on XenServer and KVM does not support PVLAN natively. Therefore, &PRODUCT; - managed to simulate PVLAN on OVS for XenServer and KVM by modifying the flow - table. - - - -
-
- Creating a PVLAN-Enabled Guest Network - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation, choose Infrastructure. - - - On Zones, click View More. - - - Click the zone to which you want to add a guest network. - - - Click the Physical Network tab. - - - Click the physical network you want to work with. - - - On the Guest node of the diagram, click Configure. - - - Click the Network tab. - - - Click Add guest network. - The Add guest network window is displayed. - - - Specify the following: - - - Name: The name of the network. This will be - visible to the user. - - - Description: The short description of the network - that can be displayed to users. - - - VLAN ID: The unique ID of the VLAN. - - - Secondary Isolated VLAN ID: The unique ID of the - Secondary Isolated VLAN. - For the description on Secondary Isolated VLAN, see . - - - Scope: The available scopes are Domain, Account, - Project, and All. - - - Domain: Selecting Domain limits the scope of - this guest network to the domain you specify. The network will not be available - for other domains. If you select Subdomain Access, the guest network is available - to all the sub domains within the selected domain. - - - Account: The account for which the guest - network is being created for. You must specify the domain the account belongs - to. - - - Project: The project for which the guest - network is being created for. You must specify the domain the project belongs - to. - - - All: The guest network is available for all - the domains, account, projects within the selected zone. - - - - - Network Offering: If the administrator has - configured multiple network offerings, select the one you want to use for this - network. - - - Gateway: The gateway that the guests should - use. - - - Netmask: The netmask in use on the subnet the - guests will use. - - - IP Range: A range of IP addresses that are - accessible from the Internet and are assigned to the guest VMs. - - - - - Network Domain: A custom DNS suffix at the level - of a network. If you want to assign a special domain name to the guest VM network, - specify a DNS suffix. - - - - - Click OK to confirm. - - -
-
diff --git a/docs/en-US/re-install-hosts.xml b/docs/en-US/re-install-hosts.xml deleted file mode 100644 index b8092adb44a..00000000000 --- a/docs/en-US/re-install-hosts.xml +++ /dev/null @@ -1,26 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Re-Installing Hosts - You can re-install a host after placing it in maintenance mode and then removing it. If a - host is down and cannot be placed in maintenance mode, it should still be removed before the - re-install. -
diff --git a/docs/en-US/region-add.xml b/docs/en-US/region-add.xml deleted file mode 100644 index 212047ad89b..00000000000 --- a/docs/en-US/region-add.xml +++ /dev/null @@ -1,151 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding Regions (optional) - Grouping your cloud resources into geographic regions is an optional step when provisioning the cloud. - For an overview of regions, see . -
- The First Region: The Default Region - If you do not take action to define regions, then all the zones in your cloud will be - automatically grouped into a single default region. This region is assigned the region - ID of 1. You can change the name or URL of the default region by displaying the region in - the &PRODUCT; UI and clicking the Edit button. -
-
- Adding a Region - Use these steps to add a second region in addition to the default region. - - Each region has its own &PRODUCT; instance. Therefore, the first step of creating a new region - is to install the Management Server software, on one or more nodes, in the - geographic area where you want to set up the new region. Use the steps in the - Installation guide. When you come to the step where you set up the database, use - the additional command-line flag -r <region_id> to set a - region ID for the new region. The default region is automatically assigned a - region ID of 1, so your first additional region might be region 2. - cloudstack-setup-databases cloud:<dbpassword>@localhost --deploy-as=root:<password> -e <encryption_type> -m <management_server_key> -k <database_key> -r <region_id> - - By the end of the installation procedure, the Management Server should have been started. Be sure that the Management Server installation was successful and complete. - Now add the new region to region 1 in &PRODUCT;. - - Log in to &PRODUCT; in the first region as root administrator - (that is, log in to <region.1.IP.address>:8080/client). - In the left navigation bar, click Regions. - Click Add Region. In the dialog, fill in the following fields: - - ID. A unique identifying number. Use the same number - you set in the database during Management Server installation in the new region; - for example, 2. - Name. Give the new region a descriptive name. - Endpoint. The URL where you can log in to the Management Server in the new region. - This has the format <region.2.IP.address>:8080/client. - - - - - Now perform the same procedure in reverse. Log in to region 2, and add region 1. - Copy the account, user, and domain tables from the region 1 database to the region 2 database. - In the following commands, it is assumed that you have set the root password on the - database, which is a &PRODUCT; recommended best practice. Substitute your own MySQL - root password. - - First, run this command to copy the contents of the database: - # mysqldump -u root -p<mysql_password> -h <region1_db_host> cloud account user domain > region1.sql - - Then run this command to put the data onto the region 2 database: - # mysql -u root -p<mysql_password> -h <region2_db_host> cloud < region1.sql - - - - Remove project accounts. Run these commands on the region 2 database: - mysql> delete from account where type = 5; - - Set the default zone as null: - mysql> update account set default_zone_id = null; - - Restart the Management Servers in region 2. - -
-
- Adding Third and Subsequent Regions - To add the third region, and subsequent additional regions, the steps are similar to those for adding the second region. - However, you must repeat certain steps additional times for each additional region: - - Install &PRODUCT; in each additional region. Set the region ID for each region during the database setup step. - cloudstack-setup-databases cloud:<dbpassword>@localhost --deploy-as=root:<password> -e <encryption_type> -m <management_server_key> -k <database_key> -r <region_id> - Once the Management Server is running, add your new region to all existing regions by - repeatedly using the Add Region button in the UI. For example, if you were adding - region 3: - - Log in to &PRODUCT; in the first region as root administrator - (that is, log in to <region.1.IP.address>:8080/client), and add a region with ID 3, the name of region 3, and the endpoint <region.3.IP.address>:8080/client. - Log in to &PRODUCT; in the second region as root administrator (that is, log in to <region.2.IP.address>:8080/client), and add a region with ID 3, the name of region 3, and the endpoint <region.3.IP.address>:8080/client. - - - Repeat the procedure in reverse to add all existing regions to the new region. For example, - for the third region, add the other two existing regions: - - Log in to &PRODUCT; in the third region as root administrator - (that is, log in to <region.3.IP.address>:8080/client). - Add a region with ID 1, the name of region 1, and the endpoint <region.1.IP.address>:8080/client. - Add a region with ID 2, the name of region 2, and the endpoint <region.2.IP.address>:8080/client. - - - Copy the account, user, and domain tables from any existing region's database to the new - region's database. - In the following commands, it is assumed that you have set the root password on the - database, which is a &PRODUCT; recommended best practice. Substitute your own MySQL - root password. - - First, run this command to copy the contents of the database: - # mysqldump -u root -p<mysql_password> -h <region1_db_host> cloud account user domain > region1.sql - - Then run this command to put the data onto the new region's database. For example, for region - 3: - # mysql -u root -p<mysql_password> -h <region3_db_host> cloud < region1.sql - - - - Remove project accounts. Run these commands on the region 3 database: - mysql> delete from account where type = 5; - - Set the default zone as null: - mysql> update account set default_zone_id = null; - - Restart the Management Servers in the new region. - -
-
- Deleting a Region - Log in to each of the other regions, navigate to the one you want to delete, and click Remove Region. - For example, to remove the third region in a 3-region cloud: - - Log in to <region.1.IP.address>:8080/client. - In the left navigation bar, click Regions. - Click the name of the region you want to delete. - Click the Remove Region button. - Repeat these steps for <region.2.IP.address>:8080/client. - -
-
diff --git a/docs/en-US/release-ip-address.xml b/docs/en-US/release-ip-address.xml deleted file mode 100644 index 9fdccd740fc..00000000000 --- a/docs/en-US/release-ip-address.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Releasing an IP Address - When the last rule for an IP address is removed, you can release that IP address. The IP address still belongs to the VPC; however, it can be picked up for any guest network again. - - Log in to the &PRODUCT; UI as an administrator or end user. - In the left navigation, choose Network. - Click the name of the network where you want to work with. - Click View IP Addresses. - Click the IP address you want to release. - - Click the Release IP button. - - - - - ReleaseIPButton.png: button to release an IP - - - -
diff --git a/docs/en-US/release-ip-for-vpc.xml b/docs/en-US/release-ip-for-vpc.xml deleted file mode 100644 index f827b671c03..00000000000 --- a/docs/en-US/release-ip-for-vpc.xml +++ /dev/null @@ -1,95 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Releasing an IP Address Alloted to a VPC - The IP address is a limited resource. If you no longer need a particular IP, you can - disassociate it from its VPC and return it to the pool of available addresses. An IP address can - be released from its tier, only when all the networking ( port forwarding, load balancing, or - StaticNAT ) rules are removed for this IP address. The released IP address will still belongs to - the same VPC. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Click the Configure button of the VPC whose IP you want to release. - The VPC page is displayed where all the tiers you created are listed in a - diagram. - The following options are displayed. - - - Internal LB - - - Public LB IP - - - Static NAT - - - Virtual Machines - - - CIDR - - - The following router information is displayed: - - - Private Gateways - - - Public IP Addresses - - - Site-to-Site VPNs - - - Network ACL Lists - - - - - Select Public IP Addresses. - The IP Addresses page is displayed. - - - Click the IP you want to release. - - - In the Details tab, click the Release IP button - - - - - release-ip-icon.png: button to release an IP. - - - - -
diff --git a/docs/en-US/remove-member-from-project.xml b/docs/en-US/remove-member-from-project.xml deleted file mode 100644 index dcd3746158c..00000000000 --- a/docs/en-US/remove-member-from-project.xml +++ /dev/null @@ -1,44 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Removing a Member From a Project - When a member is removed from a project, the member’s resources continue to be owned by the project. The former project member cannot create any new resources within the project or use any of the project’s existing resources. - A member of a project can be removed by the project administrator, the domain administrator of the domain the project belongs to or of its parent domain, or the &PRODUCT; root administrator. - - Log in to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select View, choose Projects. - Click the name of the project. - Click the Accounts tab. - Click the name of the member. - Click the Delete button. - - - - deletebutton.png: Removes a member - - -
- diff --git a/docs/en-US/remove-tier.xml b/docs/en-US/remove-tier.xml deleted file mode 100644 index 701645cc4ed..00000000000 --- a/docs/en-US/remove-tier.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Removing Tiers - You can remove a tier from a VPC. A removed tier cannot be revoked. When a tier is removed, - only the resources of the tier are expunged. All the network rules (port forwarding, load - balancing and staticNAT) and the IP addresses associated to the tier are removed. The IP address - still be belonging to the same VPC. - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPC that you have created for the account is listed in the page. - - - Click the Configure button of the VPC for which you want to set up tiers. - The Configure VPC page is displayed. Locate the tier you want to work with. - - - Select the tier you want to remove. - - - In the Network Details tab, click the Delete Network button. - - - - - del-tier.png: button to remove a tier - - - Click Yes to confirm. Wait for some time for the tier to be removed. - - -
diff --git a/docs/en-US/remove-vpc.xml b/docs/en-US/remove-vpc.xml deleted file mode 100644 index b373f1a52c3..00000000000 --- a/docs/en-US/remove-vpc.xml +++ /dev/null @@ -1,69 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Editing, Restarting, and Removing a Virtual Private Cloud - - Ensure that all the tiers are removed before you remove a VPC. - - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - In the Select view, select VPC. - All the VPCs that you have created for the account is listed in the page. - - - Select the VPC you want to work with. - - - In the Details tab, click the Remove VPC button - - - - - remove-vpc.png: button to remove a VPC - - - You can remove the VPC by also using the remove button in the Quick View. - You can edit the name and description of a VPC. To do that, select the VPC, then click - the Edit button. - - - - - edit-icon.png: button to edit a VPC - - - To restart a VPC, select the VPC, then click the Restart button. - - - - - restart-vpc.png: button to restart a VPC - - - - -
diff --git a/docs/en-US/removed-API-commands.xml b/docs/en-US/removed-API-commands.xml deleted file mode 100644 index 51bb7cf4828..00000000000 --- a/docs/en-US/removed-API-commands.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Removed API commands - - createConfiguration (Adds configuration value) - configureSimulator (Configures simulator) - -
- diff --git a/docs/en-US/removed-api-4.2.xml b/docs/en-US/removed-api-4.2.xml deleted file mode 100644 index 596d3163fe0..00000000000 --- a/docs/en-US/removed-api-4.2.xml +++ /dev/null @@ -1,144 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Deprecated APIs - - - deleteCiscoNexusVSM (Deletes a Cisco Nexus VSM device) - - - enableCiscoNexusVSM (Enables a Cisco Nexus VSM device) - - - disableCiscoNexusVSM (Disables a Cisco Nexus VSM device) - - - listCiscoNexusVSMs (Retrieves a Cisco Nexus 1000v Virtual Switch Manager device - associated with a Cluster) - - - addBaremetalHost (Adds a new host.) - - - addExternalFirewall (Adds an external firewall appliance) - - - deleteExternalFirewall (Deletes an external firewall appliance.) - - - listExternalFirewalls (Lists external firewall appliances.) - - - addExternalLoadBalancer (Adds F5 external load balancer appliance.) - - - deleteExternalLoadBalancer (Deletes a F5 external load balancer appliance added in a - zone.) - - - listExternalLoadBalancers (Lists F5 external load balancer appliances added in a - zone.) - - - createVolumeOnFiler (Creates a volume.) - - - destroyVolumeOnFiler (Destroys a volume.) - - - listVolumesOnFiler (Lists volumes.) - - - createLunOnFiler (Creates a LUN from a pool.) - - - destroyLunOnFiler (Destroys a LUN.) - - - listLunsOnFiler (Lists LUN.) - - - associateLun (Associates a LUN with a guest IQN.) - - - dissociateLun (Dissociates a LUN.) - - - createPool (Creates a pool.) - - - deletePool (Deletes a pool.) - - - modifyPool (Modifies pool.) - - - listPools (Lists pool.) - - - addF5LoadBalancer (Adds a F5 BigIP load balancer device.) - - - configureF5LoadBalancer (Configures a F5 load balancer device.) - - - deleteF5LoadBalancer (Deletes a F5 load balancer device.) - - - listF5LoadBalancers (Lists F5 load balancer devices.) - - - listF5LoadBalancerNetworks (Lists network that are using a F5 load balancer device.) - - - - addSrxFirewall (Adds a SRX firewall device.) - - - deleteSrxFirewall (Deletes a SRX firewall device.) - - - configureSrxFirewall (Configures a SRX firewall device) - - - listSrxFirewalls (Lists SRX firewall devices in a physical network) - - - listSrxFirewallNetworks (Lists network that are using SRX firewall device) - - - addNetscalerLoadBalancer (Adds a netscaler load balancer device) - - - deleteNetscalerLoadBalancer (Deletes a netscaler load balancer device) - - - configureNetscalerLoadBalancer (Configures a netscaler load balancer device) - - - listNetscalerLoadBalancers (Lists netscaler load balancer devices) - - - listNetscalerLoadBalancerNetworks (Lists network that are using a netscaler load - balancer device) - - -
diff --git a/docs/en-US/removing-hosts.xml b/docs/en-US/removing-hosts.xml deleted file mode 100644 index 468f36ecd3f..00000000000 --- a/docs/en-US/removing-hosts.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Removing Hosts - Hosts can be removed from the cloud as needed. The procedure to remove a host depends on the hypervisor type. - - -
diff --git a/docs/en-US/removing-vsphere-hosts.xml b/docs/en-US/removing-vsphere-hosts.xml deleted file mode 100644 index 3f819f06641..00000000000 --- a/docs/en-US/removing-vsphere-hosts.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Removing vSphere Hosts - To remove this type of host, first place it in maintenance mode, as described in . Then use &PRODUCT; to remove the host. &PRODUCT; will not direct commands to a host that has been removed using &PRODUCT;. However, the host may still exist in the vCenter cluster. -
diff --git a/docs/en-US/removing-xenserver-kvm-hosts.xml b/docs/en-US/removing-xenserver-kvm-hosts.xml deleted file mode 100644 index c7043723ff6..00000000000 --- a/docs/en-US/removing-xenserver-kvm-hosts.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Removing XenServer and KVM Hosts - A node cannot be removed from a cluster until it has been placed in maintenance mode. This will ensure that all of the VMs on it have been migrated to other Hosts. To remove a Host from the cloud: - - Place the node in maintenance mode. See . - For KVM, stop the cloud-agent service. - Use the UI option to remove the node.Then you may power down the Host, re-use its IP address, re-install it, etc - -
diff --git a/docs/en-US/requirements-templates.xml b/docs/en-US/requirements-templates.xml deleted file mode 100644 index f434dbe871c..00000000000 --- a/docs/en-US/requirements-templates.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Requirements for Templates - - For XenServer, install PV drivers / Xen tools on each template that you create. This will enable live migration and clean guest shutdown. - For vSphere, install VMware Tools on each template that you create. This will enable console view to work properly. - -
diff --git a/docs/en-US/reserved-ip-addresses-non-csvms.xml b/docs/en-US/reserved-ip-addresses-non-csvms.xml deleted file mode 100644 index 0f20b634f11..00000000000 --- a/docs/en-US/reserved-ip-addresses-non-csvms.xml +++ /dev/null @@ -1,166 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- IP Reservation in Isolated Guest Networks - In isolated guest networks, a part of the guest IP address space can be reserved for - non-&PRODUCT; VMs or physical servers. To do so, you configure a range of Reserved IP addresses - by specifying the CIDR when a guest network is in Implemented state. If your customers wish to - have non-&PRODUCT; controlled VMs or physical servers on the same network, they can share a part - of the IP address space that is primarily provided to the guest network. - In an Advanced zone, an IP address range or a CIDR is assigned to a network when the network - is defined. The &PRODUCT; virtual router acts as the DHCP server and uses CIDR for assigning IP - addresses to the guest VMs. If you decide to reserve CIDR for non-&PRODUCT; purposes, you can - specify a part of the IP address range or the CIDR that should only be allocated by the DHCP - service of the virtual router to the guest VMs created in &PRODUCT;. The remaining IPs in that - network are called Reserved IP Range. When IP reservation is configured, the administrator can - add additional VMs or physical servers that are not part of &PRODUCT; to the same network and - assign them the Reserved IP addresses. &PRODUCT; guest VMs cannot acquire IPs from the Reserved - IP Range. -
- IP Reservation Considerations - Consider the following before you reserve an IP range for non-&PRODUCT; machines: - - - IP Reservation is supported only in Isolated networks. - - - IP Reservation can be applied only when the network is in Implemented state. - - - No IP Reservation is done by default. - - - Guest VM CIDR you specify must be a subset of the network CIDR. - - - Specify a valid Guest VM CIDR. IP Reservation is applied only if no active IPs exist - outside the Guest VM CIDR. - You cannot apply IP Reservation if any VM is alloted with an IP address that is - outside the Guest VM CIDR. - - - To reset an existing IP Reservation, apply IP reservation by specifying the value of - network CIDR in the CIDR field. - For example, the following table describes three scenarios of guest network - creation: - - - - - - - - - - Case - CIDR - Network CIDR - Reserved IP Range for Non-&PRODUCT; VMs - Description - - - - - 1 - 10.1.1.0/24 - None - None - No IP Reservation. - - - 2 - 10.1.1.0/26 - 10.1.1.0/24 - 10.1.1.64 to 10.1.1.254 - IP Reservation configured by the UpdateNetwork API with - guestvmcidr=10.1.1.0/26 or enter 10.1.1.0/26 in the CIDR field in the - UI. - - - 3 - 10.1.1.0/24 - None - None - Removing IP Reservation by the UpdateNetwork API with - guestvmcidr=10.1.1.0/24 or enter 10.1.1.0/24 in the CIDR field in the UI. - - - - - - - -
-
- Limitations - - - The IP Reservation is not supported if active IPs that are found outside the Guest VM - CIDR. - - - Upgrading network offering which causes a change in CIDR (such as upgrading an - offering with no external devices to one with external devices) IP Reservation becomes - void if any. Reconfigure IP Reservation in the new re-implemeted network. - - -
-
- Best Practices - Apply IP Reservation to the guest network as soon as the network state changes to - Implemented. If you apply reservation soon after the first guest VM is deployed, lesser - conflicts occurs while applying reservation. -
-
- Reserving an IP Range - - - Log in to the &PRODUCT; UI as an administrator or end user. - - - In the left navigation, choose Network. - - - Click the name of the network you want to modify. - - - In the Details tab, click Edit. - - - - - edit-icon.png: button to edit a network - - - The CIDR field changes to editable one. - - - In CIDR, specify the Guest VM CIDR. - - - Click Apply. - Wait for the update to complete. The Network CIDR and the Reserved IP Range are - displayed on the Details page. - - -
-
diff --git a/docs/en-US/reset-ssh-key-dev.xml b/docs/en-US/reset-ssh-key-dev.xml deleted file mode 100644 index 1a904e566ef..00000000000 --- a/docs/en-US/reset-ssh-key-dev.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Resetting SSH Keys to Access VMs - Use the resetSSHKeyForVirtualMachine API to set or reset the SSH keypair assigned to a - virtual machine. With the addition of this feature, a lost or compromised SSH keypair can be - changed, and the user can access the VM by using the new keypair. Just create or register a new - keypair, then call resetSSHKeyForVirtualMachine. -
diff --git a/docs/en-US/reset-volume-on-reboot.xml b/docs/en-US/reset-volume-on-reboot.xml deleted file mode 100644 index 6c21d1fdca5..00000000000 --- a/docs/en-US/reset-volume-on-reboot.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- - Reset VM to New Root Disk on Reboot - You can specify that you want to discard the root disk and create a new one whenever a given - VM is rebooted. This is useful for secure environments that need a fresh start on every boot and - for desktops that should not retain state. The IP address of the VM will not change due to this - operation. - To enable root disk reset on VM reboot: - When creating a new service offering, set the parameter isVolatile to True. VMs created from - this service offering will have their disks reset upon reboot. See . -
\ No newline at end of file diff --git a/docs/en-US/resizing-volumes.xml b/docs/en-US/resizing-volumes.xml deleted file mode 100644 index 42b584bf6c6..00000000000 --- a/docs/en-US/resizing-volumes.xml +++ /dev/null @@ -1,98 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Resizing Volumes - &PRODUCT; provides the ability to resize data disks; &PRODUCT; controls volume size by using - disk offerings. This provides &PRODUCT; administrators with the flexibility to choose how much - space they want to make available to the end users. Volumes within the disk offerings with the - same storage tag can be resized. For example, if you only want to offer 10, 50, and 100 GB - offerings, the allowed resize should stay within those limits. That implies if you define a 10 - GB, a 50 GB and a 100 GB disk offerings, a user can upgrade from 10 GB to 50 GB, or 50 GB to 100 - GB. If you create a custom-sized disk offering, then you have the option to resize the volume by - specifying a new, larger size. - Additionally, using the resizeVolume API, a data volume can be moved from a static disk - offering to a custom disk offering with the size specified. This functionality allows those who - might be billing by certain volume sizes or disk offerings to stick to that model, while - providing the flexibility to migrate to whatever custom size necessary. - This feature is supported on KVM, XenServer, and VMware hosts. However, shrinking volumes is - not supported on VMware hosts. - Before you try to resize a volume, consider the following: - - - The VMs associated with the volume are stopped. - - - The data disks associated with the volume are removed. - - - When a volume is shrunk, the disk associated with it is simply truncated, and doing so - would put its content at risk of data loss. Therefore, resize any partitions or file systems - before you shrink a data disk so that all the data is moved off from that disk. - - - To resize a volume: - - - Log in to the &PRODUCT; UI as a user or admin. - - - In the left navigation bar, click Storage. - - - In Select View, choose Volumes. - - - Select the volume name in the Volumes list, then click the Resize Volume button - - - - - resize-volume-icon.png: button to display the resize volume option. - - - - - In the Resize Volume pop-up, choose desired characteristics for the storage. - - - - - - resize-volume.png: option to resize a volume. - - - - - If you select Custom Disk, specify a custom size. - - - Click Shrink OK to confirm that you are reducing the size of a volume. - This parameter protects against inadvertent shrinking of a disk, which might lead to - the risk of data loss. You must sign off that you know what you are doing. - - - - - Click OK. - - -
diff --git a/docs/en-US/response-formats.xml b/docs/en-US/response-formats.xml deleted file mode 100644 index b21f4ab668b..00000000000 --- a/docs/en-US/response-formats.xml +++ /dev/null @@ -1,58 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Response Formats: XML and JSON - CloudStack supports two formats as the response to an API call. The default response is XML. If you would like the response to be in JSON, add &response=json to the Command String. - The two response formats differ in how they handle blank fields. In JSON, if there is no value for a response field, it will not appear in the response. If all the fields were empty, there might be no response at all. - In XML, even if there is no value to be returned, an empty field will be returned as a placeholder XML element. - Sample XML Response: - - <listipaddressesresponse> - <allocatedipaddress> - <ipaddress>192.168.10.141</ipaddress> - <allocated>2009-09-18T13:16:10-0700</allocated> - <zoneid>4</zoneid> - <zonename>WC</zonename> - <issourcenat>true</issourcenat> - </allocatedipaddress> - </listipaddressesresponse> - - Sample JSON Response: - - { "listipaddressesresponse" : - { "allocatedipaddress" : - [ - { - "ipaddress" : "192.168.10.141", - "allocated" : "2009-09-18T13:16:10-0700", - "zoneid" : "4", - "zonename" : "WC", - "issourcenat" : "true" - } - ] - } - } - -
diff --git a/docs/en-US/responses.xml b/docs/en-US/responses.xml deleted file mode 100644 index 9f70c871932..00000000000 --- a/docs/en-US/responses.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Responses - - - -
diff --git a/docs/en-US/roles.xml b/docs/en-US/roles.xml deleted file mode 100644 index 775e30b25f5..00000000000 --- a/docs/en-US/roles.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Roles - - The &PRODUCT; API supports three access roles: - - Root Admin. Access to all features of the cloud, including both virtual and physical resource management. - Domain Admin. Access to only the virtual resources of the clouds that belong to the administrator’s domain. - User. Access to only the features that allow management of the user’s virtual instances, storage, and network. - -
- diff --git a/docs/en-US/root-admin-ui-overview.xml b/docs/en-US/root-admin-ui-overview.xml deleted file mode 100644 index f59aaea55ab..00000000000 --- a/docs/en-US/root-admin-ui-overview.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Root Administrator's UI Overview - The &PRODUCT; UI helps the &PRODUCT; administrator provision, view, and manage the cloud infrastructure, domains, user accounts, projects, and configuration settings. The first time you start the UI after a fresh Management Server installation, you can choose to follow a guided tour to provision your cloud infrastructure. On subsequent logins, the dashboard of the logged-in user appears. The various links in this screen and the navigation bar on the left provide access to a variety of administrative functions. The root administrator can also use the UI to perform all the same tasks that are present in the end-user’s UI. -
diff --git a/docs/en-US/runtime-allocation-virtual-network-resources.xml b/docs/en-US/runtime-allocation-virtual-network-resources.xml deleted file mode 100644 index 479f069680f..00000000000 --- a/docs/en-US/runtime-allocation-virtual-network-resources.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Runtime Allocation of Virtual Network Resources - When you define a new virtual network, all your settings for that network are stored in - &PRODUCT;. The actual network resources are activated only when the first virtual - machine starts in the network. When all virtual machines have left the virtual network, the - network resources are garbage collected so they can be allocated again. This helps to - conserve network resources. -
diff --git a/docs/en-US/runtime-behavior-of-primary-storage.xml b/docs/en-US/runtime-behavior-of-primary-storage.xml deleted file mode 100644 index 5e17a4f77a4..00000000000 --- a/docs/en-US/runtime-behavior-of-primary-storage.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Runtime Behavior of Primary Storage - Root volumes are created automatically when a virtual machine is created. Root volumes are deleted when the VM is destroyed. Data volumes can be created and dynamically attached to VMs. Data volumes are not deleted when VMs are destroyed. - Administrators should monitor the capacity of primary storage devices and add additional primary storage as needed. See the Advanced Installation Guide. - Administrators add primary storage to the system by creating a &PRODUCT; storage pool. Each storage pool is associated with a cluster or a zone. - With regards to data disks, when a user executes a Disk Offering to create a data disk, the information is initially written to the CloudStack database only. Upon the first request that the data disk be attached to a VM, CloudStack determines what storage to place the volume on and space is taken from that storage (either from preallocated storage or from a storage system (ex. a SAN), depending on how the primary storage was added to CloudStack). -
diff --git a/docs/en-US/runtime-internal-comm-req.xml b/docs/en-US/runtime-internal-comm-req.xml deleted file mode 100644 index f4539dd0307..00000000000 --- a/docs/en-US/runtime-internal-comm-req.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Runtime Internal Communications Requirements - - - The Management Servers communicate with each other to coordinate tasks. This - communication uses TCP on ports 8250 and 9090. - - - The console proxy VMs connect to all hosts in the zone over the management traffic - network. Therefore the management traffic network of any given pod in the zone must have - connectivity to the management traffic network of all other pods in the zone. - - - The secondary storage VMs and console proxy VMs connect to the Management Server on - port 8250. If you are using multiple Management Servers, the load balanced IP address of the - Management Servers on port 8250 must be reachable. - - -
diff --git a/docs/en-US/scheduled-maintenance-maintenance-mode-hosts.xml b/docs/en-US/scheduled-maintenance-maintenance-mode-hosts.xml deleted file mode 100644 index 6b736e4eb11..00000000000 --- a/docs/en-US/scheduled-maintenance-maintenance-mode-hosts.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Scheduled Maintenance and Maintenance Mode for Hosts - You can place a host into maintenance mode. When maintenance mode is activated, the host becomes unavailable to receive new guest VMs, and the guest VMs already running on the host are seamlessly migrated to another host not in maintenance mode. This migration uses live migration technology and does not interrupt the execution of the guest. - - -
diff --git a/docs/en-US/search-base.xml b/docs/en-US/search-base.xml deleted file mode 100644 index b8fc0920158..00000000000 --- a/docs/en-US/search-base.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Search Base -An LDAP query is relative to a given node of the LDAP directory tree, called the search base. The search base is the distinguished name (DN) of a level of the directory tree below which all users can be found. The users can be in the immediate base directory or in some subdirectory. The search base may be equivalent to the organization, group, or domain name. The syntax for writing a DN varies depending on which LDAP server you are using. A full discussion of distinguished names is outside the scope of our documentation. The following table shows some examples of search bases to find users in the testing department.. - - - - - - LDAP Server - Example Search Base DN - - - - - ApacheDS - ou=testing,o=project - - - Active Directory - OU=testing, DC=company - - - - - - -
diff --git a/docs/en-US/secondary-storage-add.xml b/docs/en-US/secondary-storage-add.xml deleted file mode 100644 index 9dd1e7d9319..00000000000 --- a/docs/en-US/secondary-storage-add.xml +++ /dev/null @@ -1,87 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Add Secondary Storage -
- System Requirements for Secondary Storage - - NFS storage appliance or Linux NFS server - (Optional) OpenStack Object Storage (Swift) (see http://swift.openstack.org) - 100GB minimum capacity - A secondary storage device must be located in the same zone as the guest VMs it serves. - Each Secondary Storage server must be available to all hosts in the zone. - -
-
- Adding Secondary Storage - When you create a new zone, the first secondary storage is added as part of that procedure. You can add secondary storage servers at any time to add more servers to an existing zone. - Be sure there is nothing stored on the server. Adding the server to &PRODUCT; will destroy any existing data. - - To prepare for the zone-based Secondary Staging Store, you should have created and mounted an NFS share during Management Server installation. See .See Preparing NFS Shares in the Installation Guide. - Make sure you prepared the system VM template during Management Server installation. See .See Prepare the System VM Template in the Installation Guide. - Log in to the &PRODUCT; UI as root administrator. - In the left navigation bar, click Infrastructure. - In Secondary Storage, click View All. - Click Add Secondary Storage. - Fill in the following fields: - - Name. Give the storage a descriptive name. - Provider. Choose S3, Swift, or NFS, then fill in the related fields which appear. - The fields will vary depending on the storage provider; for more information, consult the - provider's documentation (such as the S3 or Swift website). - NFS can be used for zone-based storage, and the others for region-wide storage. - You can use only a single S3 or Swift account per region. - Create NFS Secondary Staging Store. This box must always be checked. - Even if the UI allows you to uncheck this box, do not do so. - This checkbox and the three fields below it must be filled in. - Even when Swift or S3 is used as the secondary storage provider, an NFS - staging storage in each zone is still required. - Zone. The zone where the NFS Secondary Staging Store is to be located. - NFS server. The name of the zone's Secondary Staging Store. - Path. The path to the zone's Secondary Staging Store. - - - -
-
- Adding an NFS Secondary Staging Store for Each Zone - Every zone must have at least one NFS store provisioned; multiple NFS servers are - allowed per zone. To provision an NFS Staging Store for a zone: - - Log in to the &PRODUCT; UI as root administrator. - In the left navigation bar, click Infrastructure. - In Secondary Storage, click View All. - In Select View, choose Secondary Staging Store. - Click the Add NFS Secondary Staging Store button. - Fill out the dialog box fields, then click OK: - - Zone. The zone where the NFS Secondary Staging Store is to be located. - NFS server. The name of the zone's Secondary Staging Store. - Path. The path to the zone's Secondary Staging Store. - - - -
-
diff --git a/docs/en-US/secondary-storage-outage-and-data-loss.xml b/docs/en-US/secondary-storage-outage-and-data-loss.xml deleted file mode 100644 index 42ab7d47188..00000000000 --- a/docs/en-US/secondary-storage-outage-and-data-loss.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Secondary Storage Outage and Data Loss - For a Zone that has only one secondary storage server, a secondary storage outage will have feature level impact to the system but will not impact running guest VMs. It may become impossible to create a VM with the selected template for a user. A user may also not be able to save snapshots or examine/restore saved snapshots. These features will automatically be available when the secondary storage comes back online. - Secondary storage data loss will impact recently added user data including templates, snapshots, and ISO images. Secondary storage should be backed up periodically. Multiple secondary storage servers can be provisioned within each zone to increase the scalability of the system. -
diff --git a/docs/en-US/secondary-storage-vm.xml b/docs/en-US/secondary-storage-vm.xml deleted file mode 100644 index 34015c32a91..00000000000 --- a/docs/en-US/secondary-storage-vm.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Secondary Storage VM - In addition to the hosts, &PRODUCT;’s Secondary Storage VM mounts and writes to secondary storage. - Submissions to secondary storage go through the Secondary Storage VM. The Secondary Storage VM can retrieve templates and ISO images from URLs using a variety of protocols. - The secondary storage VM provides a background task that takes care of a variety of secondary storage activities: downloading a new template to a Zone, copying templates between Zones, and snapshot backups. - The administrator can log in to the secondary storage VM if needed. - -
- diff --git a/docs/en-US/secondary-storage.xml b/docs/en-US/secondary-storage.xml deleted file mode 100644 index 4a01c27f72d..00000000000 --- a/docs/en-US/secondary-storage.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Secondary Storage - This section gives concepts and technical details about &PRODUCT; secondary storage. For information about how to install and configure secondary storage through the &PRODUCT; UI, see the Advanced Installation Guide. - -
- diff --git a/docs/en-US/security-groups-advanced-zones.xml b/docs/en-US/security-groups-advanced-zones.xml deleted file mode 100644 index bfae1883cc9..00000000000 --- a/docs/en-US/security-groups-advanced-zones.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Security Groups in Advanced Zones (KVM Only) - &PRODUCT; provides the ability to use security groups to provide isolation between - guests on a single shared, zone-wide network in an advanced zone where KVM is the - hypervisor. Using security groups in advanced zones rather than multiple VLANs allows a greater range - of options for setting up guest isolation in a cloud. - - Limitations - The following are not supported for this feature: - - - - Two IP ranges with the same VLAN and different gateway or netmask in security - group-enabled shared network. - - - Two IP ranges with the same VLAN and different gateway or netmask in - account-specific shared networks. - - - Multiple VLAN ranges in security group-enabled shared network. - - - Multiple VLAN ranges in account-specific shared networks. - - - Security groups must be enabled in the zone in order for this feature to be used. -
diff --git a/docs/en-US/security-groups.xml b/docs/en-US/security-groups.xml deleted file mode 100644 index 6b36650005e..00000000000 --- a/docs/en-US/security-groups.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Security Groups - - - - - -
diff --git a/docs/en-US/security-req.xml b/docs/en-US/security-req.xml deleted file mode 100644 index a4869b43a70..00000000000 --- a/docs/en-US/security-req.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Security Requirements - The public Internet must not be able to access port 8096 or port 8250 on the Management Server. -
diff --git a/docs/en-US/send-projects-membership-invitation.xml b/docs/en-US/send-projects-membership-invitation.xml deleted file mode 100644 index 164235f2abe..00000000000 --- a/docs/en-US/send-projects-membership-invitation.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Sending Project Membership Invitations - Use these steps to add a new member to a project if the invitations feature is enabled in the cloud as described in . If the invitations feature is not turned on, use the procedure in Adding Project Members From the UI. - - Log in to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select View, choose Projects. - Click the name of the project you want to work with. - Click the Invitations tab. - In Add by, select one of the following: - - Account – The invitation will appear in the user’s Invitations tab in the Project View. See Using the Project View. - Email – The invitation will be sent to the user’s email address. Each emailed invitation includes a unique code called a token which the recipient will provide back to &PRODUCT; when accepting the invitation. Email invitations will work only if the global parameters related to the SMTP server have been set. See . - - Type the user name or email address of the new member you want to add, and click Invite. Type the &PRODUCT; user name if you chose Account in the previous step. If you chose Email, type the email address. You can invite only people who have an account in this cloud within the same domain as the project. However, you can send the invitation to any email address. - To view and manage the invitations you have sent, return to this tab. When an invitation is accepted, the new member will appear in the project’s Accounts tab. - -
diff --git a/docs/en-US/separate_storage_network.xml b/docs/en-US/separate_storage_network.xml deleted file mode 100644 index c3f6330cb14..00000000000 --- a/docs/en-US/separate_storage_network.xml +++ /dev/null @@ -1,24 +0,0 @@ - -%BOOK_ENTITIES; -]> - - -
- Separate Storage Network - In the large-scale redundant setup described in the previous section, storage traffic can overload the management network. A separate storage network is optional for deployments. Storage protocols such as iSCSI are sensitive to network delays. A separate storage network ensures guest network traffic contention does not impact storage performance. -
\ No newline at end of file diff --git a/docs/en-US/service-offerings.xml b/docs/en-US/service-offerings.xml deleted file mode 100644 index 5283c05afa7..00000000000 --- a/docs/en-US/service-offerings.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Service Offerings - vCenter Maintenance Mode - XenServer and Maintenance Mode - vCenter Maintenance Mode - XenServer and Maintenance Mode -
diff --git a/docs/en-US/set-database-buffer-pool-size.xml b/docs/en-US/set-database-buffer-pool-size.xml deleted file mode 100644 index 8265ae544f2..00000000000 --- a/docs/en-US/set-database-buffer-pool-size.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Set Database Buffer Pool Size - It is important to provide enough memory space for the MySQL database to cache data and indexes: - - Edit the MySQL configuration file:/etc/my.cnf - Insert the following line in the [mysqld] section, below the datadir line. Use a value that is appropriate for your situation. We recommend setting the buffer pool at 40% of RAM if MySQL is on the same server as the management server or 70% of RAM if MySQL has a dedicated server. The following example assumes a dedicated server with 1024M of RAM. - innodb_buffer_pool_size=700M - Restart the MySQL service.# service mysqld restart - - For more information about the buffer pool, see "The InnoDB Buffer Pool" at MySQL Reference Manual. -
- diff --git a/docs/en-US/set-global-project-resource-limits.xml b/docs/en-US/set-global-project-resource-limits.xml deleted file mode 100644 index 8ec13259051..00000000000 --- a/docs/en-US/set-global-project-resource-limits.xml +++ /dev/null @@ -1,82 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Setting the Global Project Resource Limits - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation, click Global Settings. - - - In the search box, type max.projects and click the search button. - - - In the search results, you will see the parameters you can use to set per-project - maximum resource amounts that apply to all projects in the cloud. No project can have more - resources, but an individual project can have lower limits. Click the edit button to set - each parameter. - - - - - editbutton.png: Edits parameters - - - - - - - max.project.public.ips - Maximum number of public IP addresses that can be owned by any project in - the cloud. See About Public IP Addresses. - - - max.project.snapshots - Maximum number of snapshots that can be owned by any project in the - cloud. See Working with Snapshots. - - - max.project.templates - Maximum number of templates that can be owned by any project in the - cloud. See Working with Templates. - - - max.project.uservms - Maximum number of guest virtual machines that can be owned by any project - in the cloud. See Working With Virtual Machines. - - - max.project.volumes - Maximum number of data volumes that can be owned by any project in the - cloud. See Working with Volumes. - - - - - - - Restart the Management Server. - # service cloudstack-management restart - - -
diff --git a/docs/en-US/set-monitor-total-vm-limits-per-host.xml b/docs/en-US/set-monitor-total-vm-limits-per-host.xml deleted file mode 100644 index 0cc247505af..00000000000 --- a/docs/en-US/set-monitor-total-vm-limits-per-host.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Set and Monitor Total VM Limits per Host - The &PRODUCT; administrator should monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are automatically redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster at all times. Keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation of more VMs to the cluster. -
- diff --git a/docs/en-US/set-per-project-resource-limits.xml b/docs/en-US/set-per-project-resource-limits.xml deleted file mode 100644 index a0f64ea5a38..00000000000 --- a/docs/en-US/set-per-project-resource-limits.xml +++ /dev/null @@ -1,55 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Setting Per-Project Resource Limits - The &PRODUCT; root administrator or the domain administrator of the domain where the project - resides can set new resource limits for an individual project. The project owner can set - resource limits only if the owner is also a domain or root administrator. - The new limits must be below the global default limits set by the &PRODUCT; administrator - (as described in ). If the project already - owns more of a given type of resource than the new maximum, the resources are not affected; - however, the project can not add any new resources of that type until the total drops below the - new limit. - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation, click Projects. - - - In Select View, choose Projects. - - - Click the name of the project you want to work with. - - - Click the Resources tab. This tab lists the current maximum amount that the project is - allowed to own for each type of resource. - - - Type new values for one or more resources. - - - Click Apply. - - -
diff --git a/docs/en-US/set-projects-creator-permissions.xml b/docs/en-US/set-projects-creator-permissions.xml deleted file mode 100644 index dd9cfe95d56..00000000000 --- a/docs/en-US/set-projects-creator-permissions.xml +++ /dev/null @@ -1,62 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Setting Project Creator Permissions - You can configure &PRODUCT; to allow any user to create a new project, or you can restrict - that ability to just &PRODUCT; administrators. - - - Log in as administrator to the &PRODUCT; UI. - - - In the left navigation, click Global Settings. - - - In the search box, type allow.user.create.projects. - - - Click the edit button to set the parameter. - - - - - editbutton.png: Edits parameters - - - - - - - allow.user.create.projects - Set to true to allow end users to create projects. Set to false if you - want only the &PRODUCT; root administrator and domain administrators to create - projects. - - - - - - - Restart the Management Server. - # service cloudstack-management restart - - -
diff --git a/docs/en-US/set-resource-limits-for-projects.xml b/docs/en-US/set-resource-limits-for-projects.xml deleted file mode 100644 index 669ca259372..00000000000 --- a/docs/en-US/set-resource-limits-for-projects.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Setting Resource Limits for Projects - The &PRODUCT; administrator can set global default limits to control the amount of resources - that can be owned by each project in the cloud. This serves to prevent uncontrolled usage of - resources such as snapshots, IP addresses, and virtual machine instances. Domain administrators - can override these resource limits for individual projects with their domains, as long as the - new limits are below the global defaults set by the &PRODUCT; root administrator. The root - administrator can also set lower resource limits for any project in the cloud - - -
diff --git a/docs/en-US/set-up-invitations.xml b/docs/en-US/set-up-invitations.xml deleted file mode 100644 index 180c041e87e..00000000000 --- a/docs/en-US/set-up-invitations.xml +++ /dev/null @@ -1,96 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Setting Up Invitations - &PRODUCT; can be set up either so that project administrators can add people directly to a project, or so that it is necessary to send an invitation which the recipient must accept. The invitation can be sent by email or through the user’s &PRODUCT; account. If you want administrators to use invitations to add members to projects, turn on and set up the invitations feature in &PRODUCT;. - - Log in as administrator to the &PRODUCT; UI. - In the left navigation, click Global Settings. - In the search box, type project and click the search button. - - - - searchbutton.png: Searches projects - - In the search results, you can see a few other parameters you need to set to control how - invitations behave. The table below shows global configuration parameters related to - project invitations. Click the edit button to set each parameter. - - - - - Configuration Parameters - Description - - - - - project.invite.required - Set to true to turn on the invitations feature. - - - - - project.email.sender - The email address to show in the From field of invitation emails. - - - - project.invite.timeout - Amount of time to allow for a new member to respond to the invitation. - - - - project.smtp.host - Name of the host that acts as an email server to handle invitations. - - - - project.smtp.password - (Optional) Password required by the SMTP server. You must also set project.smtp.username and set project.smtp.useAuth to true. - - - - project.smtp.port - SMTP server’s listening port. - - - - project.smtp.useAuth - Set to true if the SMTP server requires a username and password. - - - project.smtp.username - (Optional) User name required by the SMTP server for authentication. You must also set project.smtp.password and set project.smtp.useAuth to true.. - - - - - - Restart the Management Server: - service cloudstack-management restart - - -
- diff --git a/docs/en-US/set-up-network-for-users.xml b/docs/en-US/set-up-network-for-users.xml deleted file mode 100644 index c22babc7232..00000000000 --- a/docs/en-US/set-up-network-for-users.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Setting Up Networking for Users - - - - - diff --git a/docs/en-US/set-usage-limit.xml b/docs/en-US/set-usage-limit.xml deleted file mode 100644 index 5e2d770c7e0..00000000000 --- a/docs/en-US/set-usage-limit.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Setting Usage Limits - &PRODUCT; provides several administrator control points for capping resource usage by users. Some of these limits are global configuration parameters. Others are applied at the ROOT domain and may be overridden on a per-account basis. - Aggregate limits may be set on a per-domain basis. For example, you may limit a domain and all subdomains to the creation of 100 VMs. - This section covers the following topics: -
diff --git a/docs/en-US/set-zone-vlan-run-vm-max.xml b/docs/en-US/set-zone-vlan-run-vm-max.xml deleted file mode 100644 index 2159b753ce7..00000000000 --- a/docs/en-US/set-zone-vlan-run-vm-max.xml +++ /dev/null @@ -1,65 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Setting Zone VLAN and Running VM Maximums - In the external networking case, every VM in a zone must have a unique guest IP address. - There are two variables that you need to consider in determining how to configure &PRODUCT; - to support this: how many Zone VLANs do you expect to have and how many VMs do you expect to - have running in the Zone at any one time. - Use the following table to determine how to configure &PRODUCT; for your - deployment. - - - - - guest.vlan.bits - Maximum Running VMs per Zone - Maximum Zone VLANs - - - - - 12 - 4096 - 4094 - - - 11 - 8192 - 2048 - - - 10 - 16384 - 1024 - - - 10 - 32768 - 512 - - - - - Based on your deployment's needs, choose the appropriate value of guest.vlan.bits. Set it as - described in Edit the Global Configuration Settings (Optional) section and restart the - Management Server. -
diff --git a/docs/en-US/shared-networks.xml b/docs/en-US/shared-networks.xml deleted file mode 100644 index 83d0e4eea6f..00000000000 --- a/docs/en-US/shared-networks.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Shared Networks - A shared network can be accessed by virtual machines that belong to many different accounts. - Network Isolation on shared networks is accomplished by using techniques such as security - groups, which is supported only in Basic zones in &PRODUCT; 3.0.3 and later versions. - - - Shared Networks are created by the administrator - - - Shared Networks can be designated to a certain domain - - - Shared Network resources such as VLAN and physical network that it maps to are - designated by the administrator - - - Shared Networks can be isolated by security groups - - - Public Network is a shared network that is not shown to the end users - - - Source NAT per zone is not supported when the service provider is virtual router. - However, Source NAT per account is supported with virtual router in a Shared Network. - - - For information, see . -
diff --git a/docs/en-US/signing-api-calls-python.xml b/docs/en-US/signing-api-calls-python.xml deleted file mode 100644 index a2f897f6df1..00000000000 --- a/docs/en-US/signing-api-calls-python.xml +++ /dev/null @@ -1,101 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- How to sign an API call with Python - To illustrate the procedure used to sign API calls we present a step by step interactive session - using Python. - - First import the required modules: - - - >> import urllib2 ->>> import urllib ->>> import hashlib ->>> import hmac ->>> import base64 - ]]> - - - Define the endpoint of the Cloud, the command that you want to execute and the keys of the user. - - >> baseurl='http://localhost:8080/client/api?' ->>> request={} ->>> request['command']='listUsers' ->>> request['response']='json' ->>> request['apikey']='plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg' ->>> secretkey='VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ' - ]]> - - Build the request string: - - >> request_str='&'.join(['='.join([k,urllib.quote_plus(request[k])]) for k in request.keys()]) ->>> request_str -'apikey=plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg&command=listUsers&response=json' - ]]> - - - Compute the signature with hmac, do a 64 bit encoding and a url encoding: - - >> sig_str='&'.join(['='.join([k.lower(),urllib.quote_plus(request[k].lower().replace('+','%20'))])for k in sorted(request.iterkeys())]) ->>> sig_str -'apikey=plgwjfzk4gys3momtvmjuvg-x-jlwlnfauj9gabbbf9edm-kaymmailqzzq1elzlyq_u38zcm0bewzgudp66mg&command=listusers&response=json' ->>> sig=hmac.new(secretkey,sig_str,hashlib.sha1) ->>> sig - ->>> sig=hmac.new(secretkey,sig_str,hashlib.sha1).digest() ->>> sig -'M:]\x0e\xaf\xfb\x8f\xf2y\xf1p\x91\x1e\x89\x8a\xa1\x05\xc4A\xdb' ->>> sig=base64.encodestring(hmac.new(secretkey,sig_str,hashlib.sha1).digest()) ->>> sig -'TTpdDq/7j/J58XCRHomKoQXEQds=\n' ->>> sig=base64.encodestring(hmac.new(secretkey,sig_str,hashlib.sha1).digest()).strip() ->>> sig -'TTpdDq/7j/J58XCRHomKoQXEQds=' ->>> sig=urllib.quote_plus(base64.encodestring(hmac.new(secretkey,sig_str,hashlib.sha1).digest()).strip()) - ]]> - - - Finally, build the entire string and do an http GET: - - >> req=baseurl+request_str+'&signature='+sig ->>> req -'http://localhost:8080/client/api?apikey=plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg&command=listUsers&response=json&signature=TTpdDq%2F7j%2FJ58XCRHomKoQXEQds%3D' ->>> res=urllib2.urlopen(req) ->>> res.read() -'{ "listusersresponse" : { "count":3 ,"user" : [ {"id":"7ed6d5da-93b2-4545-a502-23d20b48ef2a","username":"admin","firstname":"admin","lastname":"cloud","created":"2012-07-05T12:18:27-0700","state":"enabled","account":"admin","accounttype":1,"domainid":"8a111e58-e155-4482-93ce-84efff3c7c77","domain":"ROOT","apikey":"plgWJfZK4gyS3mOMTVmjUVg-X-jlWlnfaUJ9GAbBbf9EdM-kAYMmAiLqzzq1ElZLYq_u38zCm0bewzGUdP66mg","secretkey":"VDaACYb0LV9eNjTetIOElcVQkvJck_J_QljX_FcHRj87ZKiy0z0ty0ZsYBkoXkY9b7eq1EhwJaw7FF3akA3KBQ","accountid":"7548ac03-af1d-4c1c-9064-2f3e2c0eda0d"}, {"id":"1fea6418-5576-4989-a21e-4790787bbee3","username":"runseb","firstname":"foobar","lastname":"goa","email":"joe@smith.com","created":"2013-04-10T16:52:06-0700","state":"enabled","account":"admin","accounttype":1,"domainid":"8a111e58-e155-4482-93ce-84efff3c7c77","domain":"ROOT","apikey":"Xhsb3MewjJQaXXMszRcLvQI9_NPy_UcbDj1QXikkVbDC9MDSPwWdtZ1bUY1H7JBEYTtDDLY3yuchCeW778GkBA","secretkey":"gIsgmi8C5YwxMHjX5o51pSe0kqs6JnKriw0jJBLceY5bgnfzKjL4aM6ctJX-i1ddQIHJLbLJDK9MRzsKk6xZ_w","accountid":"7548ac03-af1d-4c1c-9064-2f3e2c0eda0d"}, {"id":"52f65396-183c-4473-883f-a37e7bb93967","username":"toto","firstname":"john","lastname":"smith","email":"john@smith.com","created":"2013-04-23T04:27:22-0700","state":"enabled","account":"admin","accounttype":1,"domainid":"8a111e58-e155-4482-93ce-84efff3c7c77","domain":"ROOT","apikey":"THaA6fFWS_OmvU8od201omxFC8yKNL_Hc5ZCS77LFCJsRzSx48JyZucbUul6XYbEg-ZyXMl_wuEpECzK-wKnow","secretkey":"O5ywpqJorAsEBKR_5jEvrtGHfWL1Y_j1E4Z_iCr8OKCYcsPIOdVcfzjJQ8YqK0a5EzSpoRrjOFiLsG0hQrYnDA","accountid":"7548ac03-af1d-4c1c-9064-2f3e2c0eda0d"} ] } }' - ]]> - - -
diff --git a/docs/en-US/signing-api-requests.xml b/docs/en-US/signing-api-requests.xml deleted file mode 100644 index 92ff79e61f3..00000000000 --- a/docs/en-US/signing-api-requests.xml +++ /dev/null @@ -1,63 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Signing API Requests - Whether you access the CloudStack API with HTTP or HTTPS, it must still be signed so that CloudStack can verify the caller has been authenticated and authorized to execute the command. Make sure that you have both the API Key and Secret Key provided by the CloudStack administrator for your account before proceeding with the signing process. - To show how to sign a request, we will re-use the previous example. - http://http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - Breaking this down, we have several distinct parts to this URL. - - Base URL: This is the base URL to the CloudStack Management Server. - http://localhost:8080 - - API Path: This is the path to the API Servlet that processes the incoming requests. - /client/api? - - Command String: This part of the query string comprises of the command, its parameters, and the API Key that identifies the account. - As with all query string parameters of field-value pairs, the "field" component is case insensitive while all "value" values are case sensitive. - command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ - - Signature: This is the signature of the command string that is generated using a combination of the user’s Secret Key and the HMAC SHA-1 hashing algorithm. - &signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - - - Every API request has the format Base URL+API Path+Command String+Signature. - To generate the signature. - - For each field-value pair (as separated by a '&') in the Command String, URL encode each value so that it can be safely sent via HTTP GET. - Make sure all spaces are encoded as "%20" rather than "+". - - Lower case the entire Command String and sort it alphabetically via the field for each field-value pair. The result of this step would look like the following. - apikey=mivr6x7u6bn_sdahobpjnejpgest35exq-jb8cg20yi3yaxxcgpyuairmfi_ejtvwz0nukkjbpmy3y2bcikwfq&command=deployvirtualmachine&diskofferingid=1&serviceofferingid=1&templateid=2&zoneid=4 - - Take the sorted Command String and run it through the HMAC SHA-1 hashing algorithm (most programming languages offer a utility method to do this) with the user’s Secret Key. Base64 encode the resulting byte array in UTF-8 so that it can be safely transmitted via HTTP. The final string produced after Base64 encoding should be "Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D". - By reconstructing the final URL in the format Base URL+API Path+Command String+Signature, the final URL should look like: - http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D - - - - - -
diff --git a/docs/en-US/site-to-site-vpn.xml b/docs/en-US/site-to-site-vpn.xml deleted file mode 100644 index 9a41a0adf82..00000000000 --- a/docs/en-US/site-to-site-vpn.xml +++ /dev/null @@ -1,70 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Setting Up a Site-to-Site VPN Connection - A Site-to-Site VPN connection helps you establish a secure connection from an enterprise - datacenter to the cloud infrastructure. This allows users to access the guest VMs by - establishing a VPN connection to the virtual router of the account from a device in the - datacenter of the enterprise. Having this facility eliminates the need to establish VPN - connections to individual VMs. - The difference from Remote VPN is that Site-to-site VPNs connects entire networks to each - other, for example, connecting a branch office network to a company headquarters network. In a - site-to-site VPN, hosts do not have VPN client software; they send and receive normal TCP/IP - traffic through a VPN gateway. - The supported endpoints on the remote datacenters are: - - - Cisco ISR with IOS 12.4 or later - - - Juniper J-Series routers with JunOS 9.5 or later - - - - In addition to the specific Cisco and Juniper devices listed above, the expectation is - that any Cisco or Juniper device running on the supported operating systems are able to - establish VPN connections. - - - To set up a Site-to-Site VPN connection, perform the following: - - - Create a Virtual Private Cloud (VPC). - See . - - - Create a VPN Customer Gateway. - - - Create a VPN gateway for the VPC that you created. - - - Create VPN connection from the VPC VPN gateway to the customer VPN gateway. - - - - - - -
diff --git a/docs/en-US/small_scale_deployment.xml b/docs/en-US/small_scale_deployment.xml deleted file mode 100644 index bba2b9a7573..00000000000 --- a/docs/en-US/small_scale_deployment.xml +++ /dev/null @@ -1,37 +0,0 @@ - -%BOOK_ENTITIES; -]> - - - -
- Small-Scale Deployment - - - - - Small-Scale Deployment - - This diagram illustrates the network architecture of a small-scale &PRODUCT; deployment. - - A firewall provides a connection to the Internet. The firewall is configured in NAT mode. The firewall forwards HTTP requests and API calls from the Internet to the Management Server. The Management Server resides on the management network. - A layer-2 switch connects all physical servers and storage. - A single NFS server functions as both the primary and secondary storage. - The Management Server is connected to the management network. - -
diff --git a/docs/en-US/snapshot-performance-vmware.xml b/docs/en-US/snapshot-performance-vmware.xml deleted file mode 100644 index 1b9ee3de5ff..00000000000 --- a/docs/en-US/snapshot-performance-vmware.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- VMware Volume Snapshot Performance - When you take a snapshot of a data or root volume on VMware, &PRODUCT; uses an - efficient storage technique to improve performance. - A snapshot is not immediately exported from vCenter to a mounted NFS - share and packaged into an OVA file format. This operation would consume time and resources. - Instead, the original file formats (e.g., VMDK) provided by vCenter are - retained. An OVA file will only be created as needed, on demand. To generate the OVA, - &PRODUCT; uses information in a properties file (*.ova.meta) which it stored along with - the original snapshot data. - For upgrading customers: This process applies only to newly created snapshots after upgrade to &PRODUCT; - 4.2. Snapshots that have already been taken and stored in OVA format will continue to - exist in that format, and will continue to work as expected. - -
diff --git a/docs/en-US/snapshot-restore.xml b/docs/en-US/snapshot-restore.xml deleted file mode 100644 index b2f60f4b1e9..00000000000 --- a/docs/en-US/snapshot-restore.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Snapshot Restore - There are two paths to restoring snapshots. Users can create a volume from the snapshot. The volume can then be mounted to a VM and files recovered as needed. Alternatively, a template may be created from the snapshot of a root disk. The user can then boot a VM from this template to effect recovery of the root disk. -
diff --git a/docs/en-US/snapshot-throttling.xml b/docs/en-US/snapshot-throttling.xml deleted file mode 100644 index 6bda437e503..00000000000 --- a/docs/en-US/snapshot-throttling.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Snapshot Job Throttling - When a snapshot of a virtual machine is requested, the snapshot job runs on the same - host where the VM is running or, in the case of a stopped VM, the host where it ran last. If - many snapshots are requested for VMs on a single host, this can lead to problems with too - many snapshot jobs overwhelming the resources of the host. - To address this situation, the cloud's root administrator can throttle how many snapshot - jobs are executed simultaneously on the hosts in the cloud by using the global configuration - setting concurrent.snapshots.threshold.perhost. By using this setting, the administrator can - better ensure that snapshot jobs do not time out and hypervisor hosts do not experience - performance issues due to hosts being overloaded with too many snapshot requests. - Set concurrent.snapshots.threshold.perhost to a value that represents a best guess about - how many snapshot jobs the hypervisor hosts can execute at one time, given the current - resources of the hosts and the number of VMs running on the hosts. If a given host has more - snapshot requests, the additional requests are placed in a waiting queue. No new snapshot - jobs will start until the number of currently executing snapshot jobs falls below the - configured limit. - The admin can also set job.expire.minutes to place a maximum on how long a snapshot - request will wait in the queue. If this limit is reached, the snapshot request fails and - returns an error message. -
diff --git a/docs/en-US/source-build.xml b/docs/en-US/source-build.xml deleted file mode 100644 index a56d304245f..00000000000 --- a/docs/en-US/source-build.xml +++ /dev/null @@ -1,49 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Building &PRODUCT; from Source - - Prior to the 4.0.0 incubating release, Ant was used to build &PRODUCT;. A migration to Maven started in the 4.0.0 cycle, and has completed in 4.1.0. - The website and the wiki contain up to date information on the build procedure at: - - https://cwiki.apache.org/confluence/display/CLOUDSTACK/How+to+build+on+master+branch - https://cwiki.apache.org/confluence/display/CLOUDSTACK/Setting+up+CloudStack+Development+Environment - - - The overarching steps to build &PRODUCT; are:. - - Install the prerequisites and setup your environment - Understand that various Maven profiles and build targets - Deploy and test your build - If needed, learn how to build binaries - - - - Learning Maven is outside the scope of this documentation. - Go to the Maven website at http://maven.apache.org/guides/getting-started/index.html - - -
- diff --git a/docs/en-US/source-prereqs.xml b/docs/en-US/source-prereqs.xml deleted file mode 100644 index 2e40a58c59a..00000000000 --- a/docs/en-US/source-prereqs.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Prerequisites for building Apache CloudStack - - There are a number of prerequisites needed to build &PRODUCT;. This - document assumes compilation on a Linux system that uses RPMs or DEBs - for package management. - - - You will need, at a minimum, the following to compile &PRODUCT;: - - Maven (version 3) - Java (OpenJDK 1.6 or Java 7/OpenJDK 1.7) - Apache Web Services Common Utilities (ws-commons-util) - MySQL - MySQLdb (provides Python database API) - Tomcat 6 (not 6.0.35) - genisoimage - rpmbuild or dpkg-dev - - -
diff --git a/docs/en-US/source.xml b/docs/en-US/source.xml deleted file mode 100644 index ea30000c6a9..00000000000 --- a/docs/en-US/source.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Building from Source - The official &PRODUCT; release is always in source code form. You will likely be able to find "convenience binaries," the source is the canonical release. In this section, we'll cover acquiring the source release and building that so that you can deploy it using Maven or create Debian packages or RPMs. - Note that building and deploying directly from source is typically not the most efficient way to deploy an IaaS. However, we will cover that method as well as building RPMs or Debian packages for deploying &PRODUCT;. - The instructions here are likely version-specific. That is, the method for building from source for the 4.0.x series is different from the 4.1.x series. - If you are working with a unreleased version of &PRODUCT;, see the INSTALL.md file in the top-level directory of the release. - - - - - - - - diff --git a/docs/en-US/ssl.xml b/docs/en-US/ssl.xml deleted file mode 100644 index 9aeb2f91aa8..00000000000 --- a/docs/en-US/ssl.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- SSL (Optional) - &PRODUCT; provides HTTP access in its default installation. There are a number of technologies and sites which choose to implement SSL. As a result, we have left &PRODUCT; to expose HTTP under the assumption that a site will implement its typical practice. - &PRODUCT; uses Tomcat as its servlet container. For sites that would like &PRODUCT; to terminate the SSL session, Tomcat’s SSL access may be enabled. Tomcat SSL configuration is described at http://tomcat.apache.org/tomcat-6.0-doc/ssl-howto.html. -
diff --git a/docs/en-US/standard-events.xml b/docs/en-US/standard-events.xml deleted file mode 100644 index 9c10f873044..00000000000 --- a/docs/en-US/standard-events.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Standard Events - The events log records three types of standard events. - - INFO. This event is generated when an operation has been successfully performed. - WARN. This event is generated in the following circumstances. - - When a network is disconnected while monitoring a template download. - When a template download is abandoned. - When an issue on the storage server causes the volumes to fail over to the mirror storage server. - - - ERROR. This event is generated when an operation has not been successfully performed - - -
- diff --git a/docs/en-US/static-nat.xml b/docs/en-US/static-nat.xml deleted file mode 100644 index 4225d6eecad..00000000000 --- a/docs/en-US/static-nat.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Static NAT - A static NAT rule maps a public IP address to the private IP address of a VM in order to allow Internet traffic into the VM. The public IP address always remains the same, which is why it is called “static†NAT. This section tells how to enable or disable static NAT for a particular IP address. - -
diff --git a/docs/en-US/sticky-session-policies-for-lb-rules.xml b/docs/en-US/sticky-session-policies-for-lb-rules.xml deleted file mode 100644 index 09780855f7e..00000000000 --- a/docs/en-US/sticky-session-policies-for-lb-rules.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Sticky Session Policies for Load Balancer Rules - Sticky sessions are used in Web-based applications to ensure continued availability of - information across the multiple requests in a user's session. For example, if a shopper is - filling a cart, you need to remember what has been purchased so far. The concept of "stickiness" - is also referred to as persistence or maintaining state. - Any load balancer rule defined in &PRODUCT; can have a stickiness policy. The policy - consists of a name, stickiness method, and parameters. The parameters are name-value pairs or - flags, which are defined by the load balancer vendor. The stickiness method could be load - balancer-generated cookie, application-generated cookie, or source-based. In the source-based - method, the source IP address is used to identify the user and locate the user’s stored data. In - the other methods, cookies are used. The cookie generated by the load balancer or application is - included in request and response URLs to create persistence. The cookie name can be specified by - the administrator or automatically generated. A variety of options are provided to control the - exact behavior of cookies, such as how they are generated and whether they are cached. - For the most up to date list of available stickiness methods, see the &PRODUCT; UI or call - listNetworks and check the SupportedStickinessMethods capability. -
diff --git a/docs/en-US/stop-restart-management-server.xml b/docs/en-US/stop-restart-management-server.xml deleted file mode 100644 index 74a687c23a1..00000000000 --- a/docs/en-US/stop-restart-management-server.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Stopping and Restarting the Management Server - The root administrator will need to stop and restart the Management Server from time to time. - For example, after changing a global configuration parameter, a restart is required. If you have multiple Management Server nodes, restart all of them to put the new parameter value into effect consistently throughout the cloud.. - To stop the Management Server, issue the following command at the operating system prompt on the Management Server node: - # service cloudstack-management stop - To start the Management Server: - # service cloudstack-management start - To stop the Management Server: - # service cloudstack-management stop -
diff --git a/docs/en-US/stopped-vm.xml b/docs/en-US/stopped-vm.xml deleted file mode 100644 index 7024be7a0b2..00000000000 --- a/docs/en-US/stopped-vm.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Stopped VM - &PRODUCT; now supports creating a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A VM can now be deployed in two ways: create and start a VM (the default method); or create a VM and leave it in the stopped state. - A new request parameter, startVM, is introduced in the deployVm API to support the stopped VM feature. - The possible values are: - - true - The VM starts as a part of the VM deployment. - false - The VM is left in the stopped state at the end of the VM deployment. - - The default value is true. -
diff --git a/docs/en-US/stopping-and-starting-vms.xml b/docs/en-US/stopping-and-starting-vms.xml deleted file mode 100644 index 25c1f494b92..00000000000 --- a/docs/en-US/stopping-and-starting-vms.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Stopping and Starting VMs - Once a VM instance is created, you can stop, restart, or delete it as needed. In the &PRODUCT; UI, click Instances, select the VM, and use the Stop, Start, Reboot, and Destroy buttons. -
- diff --git a/docs/en-US/storage-nw-topology-req.xml b/docs/en-US/storage-nw-topology-req.xml deleted file mode 100644 index a594babea37..00000000000 --- a/docs/en-US/storage-nw-topology-req.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Storage Network Topology Requirements - The secondary storage NFS export is mounted by the secondary storage VM. Secondary storage - traffic goes over the management traffic network, even if there is a separate storage network. - Primary storage traffic goes over the storage network, if available. If you choose to place - secondary storage NFS servers on the storage network, you must make sure there is a route from - the management traffic network to the storage network. -
diff --git a/docs/en-US/storage-overview.xml b/docs/en-US/storage-overview.xml deleted file mode 100644 index bebf441ab60..00000000000 --- a/docs/en-US/storage-overview.xml +++ /dev/null @@ -1,27 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Storage Overview - &PRODUCT; defines two types of storage: primary and secondary. Primary storage can be - accessed by either iSCSI or NFS. Additionally, direct attached storage may be used for primary - storage. Secondary storage is always accessed using NFS. - There is no ephemeral storage in &PRODUCT;. All volumes on all nodes are persistent. -
diff --git a/docs/en-US/storage-plugins.xml b/docs/en-US/storage-plugins.xml deleted file mode 100644 index e6612c199d8..00000000000 --- a/docs/en-US/storage-plugins.xml +++ /dev/null @@ -1,144 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Writing a Storage Plugin - This section gives an outline of how to implement a plugin - to integrate a third-party storage provider. - For details and an example, you will need to read the code. - - Example code is available at: - plugins/storage/volume/sample - - - Third party storage providers can integrate with &PRODUCT; to provide - either primary storage or secondary storage. - For example, &PRODUCT; provides plugins for - Amazon Simple Storage Service (S3) or OpenStack - Object Storage (Swift). Additional third party object storages can be integrated with &PRODUCT; - by writing plugin software that uses the object storage plugin framework. - Several new interfaces are available so that - storage providers can develop vendor-specific plugins based on well-defined - contracts that can be seamlessly managed by &PRODUCT;. - Artifacts such as templates, ISOs and snapshots are kept in storage which &PRODUCT; - refers to as secondary storage. To improve scalability and performance, as when a number - of hosts access secondary storage concurrently, object storage can be used for secondary - storage. Object storage can also provide built-in high availability capability. When using - object storage, access to secondary storage data can be made available across multiple - zones in a region. This is a huge benefit, as it is no longer necessary to copy templates, - snapshots etc. across zones as would be needed in an environment - using only zone-based NFS storage. - The user enables a storage plugin through the UI. - A new dialog box choice is offered to select the storage - provider. Depending on the provider you select, additional input fields may appear so that - you can provide the additional details required by that provider, such as a user name and - password for a third-party storage account. - -
- Overview of How to Write a Storage Plugin - To add a third-party storage option to &PRODUCT;, implement the following interfaces in Java: - - DataStoreDriver - DataStoreLifecycle - DataStoreProvider - In addition to implementing the interfaces, you have to hardcode your plugin's required additional - input fields into the code for the Add Secondary Storage - or Add Primary Storage dialog box. - Place your .jar file in plugins/storage/volume/ or plugins/storage/image/. - Edit /client/tomcatconf/componentContext.xml.in. - Edit client/pom.xml. - -
-
- Implementing DataStoreDriver - DataStoreDriver contains the code that &PRODUCT; will use to provision the object store, when needed. - You must implement the following methods: - - getTO() - getStoreTO() - createAsync() - deleteAsync() - - The following methods are optional: - - resize() - canCopy() is optional. If you set it to true, then you must implement copyAsync(). - -
-
- Implementing DataStoreLifecycle - DataStoreLifecycle contains the code to manage the storage operations for ongoing use of the storage. - Several operations are needed, like create, maintenance mode, delete, etc. - You must implement the following methods: - - initialize() - maintain() - cancelMaintain() - deleteDataStore() - Implement one of the attach*() methods depending on what scope you want the storage to have: attachHost(), attachCluster(), or attachZone(). - -
-
- Implementing DataStoreProvider - DataStoreProvider contains the main code of the data store. - You must implement the following methods: - - getDatastoreLifeCycle() - getDataStoreDriver() - getTypes(). Returns one or more types of storage for which this data store provider can be used. - For secondary object storage, return IMAGE, and for a Secondary Staging Store, return ImageCache. - configure(). First initialize the lifecycle implementation and the driver implementation, - then call registerDriver() to register the new object store provider instance with &PRODUCT;. - getName(). Returns the unique name of your provider; for example, - this can be used to get the name to display in the UI. - - The following methods are optional: - - getHostListener() is optional; it's for monitoring the status of the host. - -
-
- Place the .jar File in the Right Directory - For a secondary storage plugin, place your .jar file here: - plugins/storage/image/ - For a primary storage plugin, place your .jar file here: - plugins/storage/volume/ -
-
- Edit Configuration Files - First, edit the following file tell &PRODUCT; to include your .jar file. - Add a line to this file to tell the &PRODUCT; Management Server that it now has a dependency on your code: - client/pom.xml - Place some facts about your code in the following file so &PRODUCT; can run it: - /client/tomcatconf/componentContext.xml.in - In the section “Deployment configurations of various adapters,†add this: - <bean>id=â€some unique ID†class=â€package name of your implementation of DataStoreProviderâ€</bean> - In the section “Storage Providers,†add this: - <property name=â€providersâ€> - <ref local=â€same ID from the bean tag's id attributeâ€> -</property> - -
- -
diff --git a/docs/en-US/storage-setup.xml b/docs/en-US/storage-setup.xml deleted file mode 100644 index dee2f4ccbd7..00000000000 --- a/docs/en-US/storage-setup.xml +++ /dev/null @@ -1,192 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Storage Setup - &PRODUCT; is designed to work with a wide variety of commodity and enterprise-grade storage. Local disk may be used as well, if supported by the selected hypervisor. Storage type support for guest virtual disks differs based on hypervisor selection. - - - - - - XenServer - vSphere - KVM - - - - - NFS - Supported - Supported - Supported - - - iSCSI - Supported - Supported via VMFS - Supported via Clustered Filesystems - - - Fiber Channel - Supported via Pre-existing SR - Supported - Supported via Clustered Filesystems - - - Local Disk - Supported - Supported - Supported - - - - - The use of the Cluster Logical Volume Manager (CLVM) for KVM is not officially supported with &PRODUCT;. -
- Small-Scale Setup - In a small-scale setup, a single NFS server can function as both primary and secondary storage. The NFS server just needs to export two separate shares, one for primary storage and the other for secondary storage. -
-
- Secondary Storage - &PRODUCT; is designed to work with any scalable secondary storage system. The only requirement is the secondary storage system supports the NFS protocol. - - The storage server should be a machine with a large number of disks. The disks should ideally be managed by a hardware RAID controller. Modern hardware RAID controllers support hot plug functionality independent of the operating system so you can replace faulty disks without impacting the running operating system. - -
-
- Example Configurations - In this section we go through a few examples of how to set up storage to work properly on a few types of NFS and iSCSI storage systems. -
- Linux NFS on Local Disks and DAS - This section describes how to configure an NFS export on a standard Linux installation. The exact commands might vary depending on the operating system version. - - Install the RHEL/CentOS distribution on the storage server. - If the root volume is more than 2 TB in size, create a smaller boot volume to install RHEL/CentOS. A root volume of 20 GB should be sufficient. - After the system is installed, create a directory called /export. This can each be a directory in the root partition itself or a mount point for a large disk volume. - If you have more than 16TB of storage on one host, create multiple EXT3 file systems and multiple NFS exports. Individual EXT3 file systems cannot exceed 16TB. - - After /export directory is created, run the following command to configure it as an NFS export. - # echo "/export <CIDR>(rw,async,no_root_squash)" >> /etc/exports - Adjust the above command to suit your deployment needs. - - - Limiting NFS export. It is highly recommended that you limit the NFS export to a particular subnet by specifying a subnet mask (e.g.,â€192.168.1.0/24â€). By allowing access from only within the expected cluster, you avoid having non-pool member mount the storage. The limit you place must include the management network(s) and the storage network(s). If the two are the same network then one CIDR is sufficient. If you have a separate storage network you must provide separate CIDR’s for both or one CIDR that is broad enough to span both. - The following is an example with separate CIDRs: - /export 192.168.1.0/24(rw,async,no_root_squash) 10.50.1.0/24(rw,async,no_root_squash) - - - Removing the async flag. The async flag improves performance by allowing the NFS server to respond before writes are committed to the disk. Remove the async flag in your mission critical production deployment. - - - - - Run the following command to enable NFS service. - # chkconfig nfs on - - - Edit the /etc/sysconfig/nfs file and uncomment the following lines. - LOCKD_TCPPORT=32803 -LOCKD_UDPPORT=32769 -MOUNTD_PORT=892 -RQUOTAD_PORT=875 -STATD_PORT=662 -STATD_OUTGOING_PORT=2020 - - - Edit the /etc/sysconfig/iptables file and add the following lines at the beginning of the INPUT chain. - --A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT --A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT --A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT --A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT --A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT --A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT - - - - Reboot the server. - An NFS share called /export is now set up. - - - When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text. -
-
- Linux NFS on iSCSI - Use the following steps to set up a Linux NFS server export on an iSCSI volume. These steps apply to RHEL/CentOS 5 distributions. - - - Install iscsiadm. - -# yum install iscsi-initiator-utils -# service iscsi start -# chkconfig --add iscsi -# chkconfig iscsi on - - - - Discover the iSCSI target. - # iscsiadm -m discovery -t st -p <iSCSI Server IP address>:3260 - For example: - # iscsiadm -m discovery -t st -p 172.23.10.240:3260 - 172.23.10.240:3260,1 iqn.2001-05.com.equallogic:0-8a0906-83bcb3401-16e0002fd0a46f3d-rhel5-test - - - Log in. - # iscsiadm -m node -T <Complete Target Name> -l -p <Group IP>:3260 - For example: - # iscsiadm -m node -l -T iqn.2001-05.com.equallogic:83bcb3401-16e0002fd0a46f3d-rhel5-test -p 172.23.10.240:3260 - - - Discover the SCSI disk. For example: - -# iscsiadm -m session -P3 | grep Attached -Attached scsi disk sdb State: running - - - - Format the disk as ext3 and mount the volume. - # mkfs.ext3 /dev/sdb -# mkdir -p /export -# mount /dev/sdb /export - - - - Add the disk to /etc/fstab to make sure it gets mounted on boot. - /dev/sdb /export ext3 _netdev 0 0 - - - Now you can set up /export as an NFS share. - - - Limiting NFS export. In order to avoid data loss, it is highly recommended that you limit the NFS export to a particular subnet by specifying a subnet mask (e.g.,â€192.168.1.0/24â€). By allowing access from only within the expected cluster, you avoid having non-pool member mount the storage and inadvertently delete all its data. The limit you place must include the management network(s) and the storage network(s). If the two are the same network then one CIDR is sufficient. If you have a separate storage network you must provide separate CIDRs for both or one CIDR that is broad enough to span both. - The following is an example with separate CIDRs: - /export 192.168.1.0/24(rw,async,no_root_squash) 10.50.1.0/24(rw,async,no_root_squash) - - Removing the async flag. The async flag improves performance by allowing the NFS server to respond before writes are committed to the disk. Remove the async flag in your mission critical production deployment. - -
-
-
diff --git a/docs/en-US/storage-tags.xml b/docs/en-US/storage-tags.xml deleted file mode 100644 index 39b55a65fdd..00000000000 --- a/docs/en-US/storage-tags.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Storage Tags - Storage may be "tagged". A tag is a text string attribute associated with primary storage, a Disk Offering, or a Service Offering. Tags allow administrators to provide additional information about the storage. For example, that is a "SSD" or it is "slow". Tags are not interpreted by &PRODUCT;. They are matched against tags placed on service and disk offerings. &PRODUCT; requires all tags on service and disk offerings to exist on the primary storage before it allocates root or data disks on the primary storage. Service and disk offering tags are used to identify the requirements of the storage that those offerings have. For example, the high end service offering may require "fast" for its root disk volume. - The interaction between tags, allocation, and volume copying across clusters and pods can be complex. To simplify the situation, use the same set of tags on the primary storage for all clusters in a pod. Even if different devices are used to present those tags, the set of exposed tags can be the same. -
diff --git a/docs/en-US/storage.xml b/docs/en-US/storage.xml deleted file mode 100644 index 3ef73246d1d..00000000000 --- a/docs/en-US/storage.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Working With Storage - - - - - - diff --git a/docs/en-US/suspend-project.xml b/docs/en-US/suspend-project.xml deleted file mode 100644 index b4f821b2c26..00000000000 --- a/docs/en-US/suspend-project.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Suspending or Deleting a Project - When a project is suspended, it retains the resources it owns, but they can no longer be used. No new resources or members can be added to a suspended project. - When a project is deleted, its resources are destroyed, and member accounts are removed from the project. The project’s status is shown as Disabled pending final deletion. - A project can be suspended or deleted by the project administrator, the domain administrator of the domain the project belongs to or of its parent domain, or the &PRODUCT; root administrator. - - Log in to the &PRODUCT; UI. - In the left navigation, click Projects. - In Select View, choose Projects. - Click the name of the project. - Click one of the buttons:To delete, use - - - - - deletebutton.png: Removes a project - - - To suspend, use - - - - - deletebutton.png: suspends a project - - - -
diff --git a/docs/en-US/sys-offering-sysvm.xml b/docs/en-US/sys-offering-sysvm.xml deleted file mode 100644 index 563dd6f5ebf..00000000000 --- a/docs/en-US/sys-offering-sysvm.xml +++ /dev/null @@ -1,75 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Changing the Default System Offering for System VMs - You can manually change the system offering for a particular System VM. Additionally, as a - &PRODUCT; administrator, you can also change the default system offering used for System - VMs. - - - Create a new system offering. - For more information, see - Creating a New System Service Offering. - - - Back up the database: - mysqldump -u root -p cloud | bzip2 > cloud_backup.sql.bz2 - - - Open an MySQL prompt: - mysql -u cloud -p cloud - - - Run the following queries on the cloud database. - - - In the disk_offering table, identify the original default offering and the new - offering you want to use by default. - Take a note of the ID of the new offering. - select id,name,unique_name,type from disk_offering; - - - For the original default offering, set the value of unique_name to NULL. - # update disk_offering set unique_name = NULL where id = 10; - Ensure that you use the correct value for the ID. - - - For the new offering that you want to use by default, set the value of unique_name - as follows: - For the default Console Proxy VM (CPVM) offering,set unique_name to - 'Cloud.com-ConsoleProxy'. For the default Secondary Storage VM (SSVM) offering, set - unique_name to 'Cloud.com-SecondaryStorage'. For example: - update disk_offering set unique_name = 'Cloud.com-ConsoleProxy' where id = 16; - - - - - Restart &PRODUCT; Management Server. Restarting is required because the default - offerings are loaded into the memory at startup. - service cloudstack-management restart - - - Destroy the existing CPVM or SSVM offerings and wait for them to be recreated. The new - CPVM or SSVM are configured with the new offering. - - -
diff --git a/docs/en-US/sys-reliability-and-ha.xml b/docs/en-US/sys-reliability-and-ha.xml deleted file mode 100644 index e3c1cd9026f..00000000000 --- a/docs/en-US/sys-reliability-and-ha.xml +++ /dev/null @@ -1,34 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - System Reliability and High Availability - - - - - - - - diff --git a/docs/en-US/sysprep-for-windows-server-2003R2.xml b/docs/en-US/sysprep-for-windows-server-2003R2.xml deleted file mode 100644 index 5f8a3890705..00000000000 --- a/docs/en-US/sysprep-for-windows-server-2003R2.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- System Preparation for Windows Server 2003 R2 - Earlier versions of Windows have a different sysprep tool. Follow these steps for Windows Server 2003 R2. - - Extract the content of \support\tools\deploy.cab on the Windows installation CD into a directory called c:\sysprep on the Windows 2003 R2 VM. - Run c:\sysprep\setupmgr.exe to create the sysprep.inf file. - - Select Create New to create a new Answer File. - Enter “Sysprep setup†for the Type of Setup. - Select the appropriate OS version and edition. - On the License Agreement screen, select “Yes fully automate the installationâ€. - Provide your name and organization. - Leave display settings at default. - Set the appropriate time zone. - Provide your product key. - Select an appropriate license mode for your deployment - Select “Automatically generate computer nameâ€. - Type a default administrator password. If you enable the password reset feature, the users will not actually use this password. This password will be reset by the instance manager after the guest boots up. - Leave Network Components at “Typical Settingsâ€. - Select the “WORKGROUP†option. - Leave Telephony options at default. - Select appropriate Regional Settings. - Select appropriate language settings. - Do not install printers. - Do not specify “Run Once commandsâ€. - You need not specify an identification string. - Save the Answer File as c:\sysprep\sysprep.inf. - - - - Run the following command to sysprep the image:c:\sysprep\sysprep.exe -reseal -mini -activated - After this step the machine will automatically shut down - -
diff --git a/docs/en-US/sysprep-windows-server-2008R2.xml b/docs/en-US/sysprep-windows-server-2008R2.xml deleted file mode 100644 index 49e7477c6b4..00000000000 --- a/docs/en-US/sysprep-windows-server-2008R2.xml +++ /dev/null @@ -1,71 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- System Preparation for Windows Server 2008 R2 - For Windows 2008 R2, you run Windows System Image Manager to create a custom sysprep response XML file. Windows System Image Manager is installed as part of the Windows Automated Installation Kit (AIK). Windows AIK can be downloaded from Microsoft Download Center. - Use the following steps to run sysprep for Windows 2008 R2:The steps outlined here are derived from the excellent guide by Charity Shelbourne, originally published at Windows Server 2008 Sysprep Mini-Setup. - - - - Download and install the Windows AIKWindows AIK should not be installed on the Windows 2008 R2 VM you just created. Windows AIK should not be part of the template you create. It is only used to create the sysprep answer file. - Copy the install.wim file in the \sources directory of the Windows 2008 R2 installation DVD to the hard disk. This is a very large file and may take a long time to copy. Windows AIK requires the WIM file to be writable. - Start the Windows System Image Manager, which is part of the Windows AIK. - In the Windows Image pane, right click the Select a Windows image or catalog file option to - load the install.wim file you just copied. - Select the Windows 2008 R2 Edition.You may be prompted with a warning that the catalog file cannot be opened. Click Yes to create a new catalog file. - In the Answer File pane, right click to create a new answer file. - Generate the answer file from the Windows System Image Manager using the following steps: - - The first page you need to automate is the Language and Country or Region Selection page. To automate this, expand Components in your Windows Image pane, right-click and add the Microsoft-Windows-International-Core setting to Pass 7 oobeSystem. In your Answer File pane, configure the InputLocale, SystemLocale, UILanguage, and UserLocale with the appropriate settings for your language and country or region. Should you have a question about any of these settings, you can right-click on the specific setting and select Help. This will open the appropriate CHM help file with more information, including examples on the setting you are attempting to configure. - - - - - sysmanager.png: System Image Manager - - You need to automate the Software License Terms Selection page, otherwise known as the End-User License Agreement (EULA). To do this, expand the Microsoft-Windows-Shell-Setup component. High-light the OOBE setting, and add the setting to the Pass 7 oobeSystem. In Settings, set HideEULAPage true. - - - - software-license.png: Depicts hiding the EULA page. - - Make sure the license key is properly set. If you use MAK key, you can just enter the MAK key on the Windows 2008 R2 VM. You need not input the MAK into the Windows System Image Manager. If you use KMS host for activation you need not enter the Product Key. Details of Windows Volume Activation can be found at - You need to automate is the Change Administrator Password page. Expand the Microsoft-Windows-Shell-Setup component (if it is not still expanded), expand UserAccounts, right-click on AdministratorPassword, and add the setting to the Pass 7 oobeSystem configuration pass of your answer file. Under Settings, specify a password next to Value. - - - - - change-admin-password.png: Depicts changing the administrator password - You may read the AIK documentation and set many more options that suit your deployment. The steps above are the minimum needed to make Windows unattended setup work. - Save the answer file as unattend.xml. You can ignore the warning messages that appear in the validation window. - Copy the unattend.xml file into the c:\windows\system32\sysprep directory of the Windows 2008 R2 Virtual Machine - Once you place the unattend.xml file in c:\windows\system32\sysprep directory, you run the sysprep tool as follows: - cd c:\Windows\System32\sysprep -sysprep.exe /oobe /generalize /shutdown -The Windows 2008 R2 VM will automatically shut down after sysprep is complete. - - - - -
diff --git a/docs/en-US/system-reserved-ip-addresses.xml b/docs/en-US/system-reserved-ip-addresses.xml deleted file mode 100644 index 7ae9fa8df9f..00000000000 --- a/docs/en-US/system-reserved-ip-addresses.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- System Reserved IP Addresses - In each zone, you need to configure a range of reserved IP addresses for the management network. This network carries communication between the &PRODUCT; Management Server and various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP. - The reserved IP addresses must be unique across the cloud. You cannot, for example, have a host in one zone which has the same private IP address as a host in another zone. - The hosts in a pod are assigned private IP addresses. These are typically RFC1918 addresses. The Console Proxy and Secondary Storage system VMs are also allocated private IP addresses in the CIDR of the pod that they are created in. - Make sure computing servers and Management Servers use IP addresses outside of the System Reserved IP range. For example, suppose the System Reserved IP range starts at 192.168.154.2 and ends at 192.168.154.7. &PRODUCT; can use .2 to .7 for System VMs. This leaves the rest of the pod CIDR, from .8 to .254, for the Management Server and hypervisor hosts. - In all zones: - Provide private IPs for the system in each pod and provision them in &PRODUCT;. - For KVM and XenServer, the recommended number of private IPs per pod is one per host. If you expect a pod to grow, add enough private IPs now to accommodate the growth. - In a zone that uses advanced networking: - For zones with advanced networking, we recommend provisioning enough private IPs for your total number of customers, plus enough for the required &PRODUCT; System VMs. Typically, about 10 additional IPs are required for the System VMs. For more information about System VMs, see Working with System Virtual Machines in the Administrator's Guide. - When advanced networking is being used, the number of private IP addresses available in each pod varies depending on which hypervisor is running on the nodes in that pod. Citrix XenServer and KVM use link-local addresses, which in theory provide more than 65,000 private IP addresses within the address block. As the pod grows over time, this should be more than enough for any reasonable number of hosts as well as IP addresses for guest virtual routers. VMWare ESXi, by contrast uses any administrator-specified subnetting scheme, and the typical administrator provides only 255 IPs per pod. Since these are shared by physical machines, the guest virtual router, and other entities, it is possible to run out of private IPs when scaling up a pod whose nodes are running ESXi. - To ensure adequate headroom to scale private IP space in an ESXi pod that uses advanced networking, use one or both of the following techniques: - - Specify a larger CIDR block for the subnet. A subnet mask with a /20 suffix will provide more than 4,000 IP addresses. - Create multiple pods, each with its own subnet. For example, if you create 10 pods and each pod has 255 IPs, this will provide 2,550 IP addresses. - -
diff --git a/docs/en-US/system-service-offerings.xml b/docs/en-US/system-service-offerings.xml deleted file mode 100644 index 84d5f7ae7b5..00000000000 --- a/docs/en-US/system-service-offerings.xml +++ /dev/null @@ -1,30 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- System Service Offerings - System service offerings provide a choice of CPU speed, number of CPUs, tags, and RAM size, just as other service offerings do. But rather than being used for virtual machine instances and exposed to users, system service offerings are used to change the default properties of virtual routers, console proxies, and other system VMs. System service offerings are visible only to the &PRODUCT; root administrator. &PRODUCT; provides default system service offerings. The &PRODUCT; root administrator can create additional custom system service offerings. - When &PRODUCT; creates a virtual router for a guest network, it uses default settings which are defined in the system service offering associated with the network offering. You can upgrade the capabilities of the virtual router by applying a new network offering that contains a different system service offering. All virtual routers in that network will begin using the settings from the new service offering. - -
diff --git a/docs/en-US/system-vm-template.xml b/docs/en-US/system-vm-template.xml deleted file mode 100644 index a9477f3a61d..00000000000 --- a/docs/en-US/system-vm-template.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- The System VM Template - The System VMs come from a single template. The System VM has the following characteristics: - - Debian 6.0 ("Squeeze"), 2.6.32 kernel with the latest security patches from the Debian security APT repository - Has a minimal set of packages installed thereby reducing the attack surface - 32-bit for enhanced performance on Xen/VMWare - pvops kernel with Xen PV drivers, KVM virtio drivers, and VMware tools for optimum performance on all hypervisors - Xen tools inclusion allows performance monitoring - Latest versions of HAProxy, iptables, IPsec, and Apache from debian repository ensures improved security and speed - Latest version of JRE from Sun/Oracle ensures improved security and speed - -
diff --git a/docs/en-US/tagging-resources.xml b/docs/en-US/tagging-resources.xml deleted file mode 100644 index 31ee3825e4a..00000000000 --- a/docs/en-US/tagging-resources.xml +++ /dev/null @@ -1,69 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using Tags to Organize Resources in the Cloud - A tag is a key-value pair that stores metadata about a resource in the cloud. Tags are - useful for categorizing resources. For example, you can tag a user VM with a - value that indicates the user's city of residence. In this case, the key would - be "city" and the value might be "Toronto" or "Tokyo." You can then request - &PRODUCT; to find all resources that have a given tag; for example, VMs for - users in a given city. - You can tag a user virtual machine, volume, snapshot, guest network, template, - ISO, firewall rule, port forwarding rule, public IP address, security group, - load balancer rule, project, VPC, network ACL, or static route. You can not tag - a remote access VPN. - You can work with tags through the UI or through the API commands createTags, - deleteTags, and listTags. You can define multiple tags for each resource. There - is no limit on the number of tags you can define. Each tag can be up to 255 - characters long. Users can define tags on the resources they own, and - administrators can define tags on any resources in the cloud. - An optional input parameter, "tags," exists on many of the list* API commands. - The following example shows how to use this new parameter to find all the volumes - having tag region=canada OR tag city=Toronto: - command=listVolumes - &listAll=true - &tags[0].key=region - &tags[0].value=canada - &tags[1].key=city - &tags[1].value=Toronto - The following API commands have the "tags" input parameter: - - listVirtualMachines - listVolumes - listSnapshots - listNetworks - listTemplates - listIsos - listFirewallRules - listPortForwardingRules - listPublicIpAddresses - listSecurityGroups - listLoadBalancerRules - listProjects - listVPCs - listNetworkACLs - listStaticRoutes - -
diff --git a/docs/en-US/template-iso-snapshot-usage-record-format.xml b/docs/en-US/template-iso-snapshot-usage-record-format.xml deleted file mode 100644 index 3f34f362d70..00000000000 --- a/docs/en-US/template-iso-snapshot-usage-record-format.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Template, ISO, and Snapshot Usage Record Format - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - usageid – The ID of the the template, ISO, or snapshot - offeringid – The ID of the disk offering - templateid – – Included only for templates (usage type 7). Source template ID. - size – Size of the template, ISO, or snapshot - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/templates.xml b/docs/en-US/templates.xml deleted file mode 100644 index faecf1b76dc..00000000000 --- a/docs/en-US/templates.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Templates - - -
diff --git a/docs/en-US/third-party-ui-plugin.xml b/docs/en-US/third-party-ui-plugin.xml deleted file mode 100644 index 297fdaa857f..00000000000 --- a/docs/en-US/third-party-ui-plugin.xml +++ /dev/null @@ -1,364 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Third-Party UI Plugin Framework - Using the new third-party plugin framework, you can write and install extensions to - &PRODUCT;. The installed and enabled plugins will appear in the UI alongside the - other features. - The code for the plugin is simply placed in a special directory - within &PRODUCT;’s installed code at any time after &PRODUCT; installation. The new plugin - appears only when it is enabled by the cloud administrator. - - - - - - plugin_intro.jpg: New plugin button in product navbar - - - The left navigation bar of the &PRODUCT; UI has a new Plugins button to help you work with UI plugins. -
- How to Write a Plugin: Overview - The basic procedure for writing a plugin is: - - - Write the code and create the other files needed. You will need the plugin code - itself (in Javascript), a thumbnail image, the plugin listing, and a CSS file. - - - - - - plugin1.jpg: Write the plugin code - - - All UI plugins have the following set of files: - +-- cloudstack/ - +-- ui/ - +-- plugins/ - +-- csMyFirstPlugin/ - +-- config.js --> Plugin metadata (title, author, vendor URL, etc.) - +-- icon.png --> Icon, shown on side nav bar and plugin listing - (should be square, and ~50x50px) - +-- csMyFirstPlugin.css --> CSS file, loaded automatically when plugin loads - +-- csMyFirstPlugin.js --> Main JS file, containing plugin code - - The same files must also be present at /tomcat/webapps/client/plugins. - - - The &PRODUCT; administrator adds the folder containing your plugin code under the - &PRODUCT; PLUGINS folder. - - - - - - plugin2.jpg: The plugin code is placed in the PLUGINS folder - - - - - The administrator also adds the name of your plugin to the plugin.js file in the - PLUGINS folder. - - - - - - plugin3.jpg: The plugin name is added to plugin.js in the PLUGINS - folder - - - - - The next time the user refreshes the UI in the browser, your plugin will appear in - the left navigation bar. - - - - - - plugin4.jpg: The plugin appears in the UI - - - - -
-
- How to Write a Plugin: Implementation Details - This section requires an understanding of JavaScript and the &PRODUCT; API. You don't - need knowledge of specific frameworks for this tutorial (jQuery, etc.), since the - &PRODUCT; UI handles the front-end rendering for you. - There is much more to the &PRODUCT; UI framework than can be described here. The UI is - very flexible to handle many use cases, so there are countless options and variations. The - best reference right now is to read the existing code for the main UI, which is in the /ui - folder. Plugins are written in a very similar way to the main UI. - - - Create the directory to hold your plugin. - All plugins are composed of set of required files in the directory - /ui/plugins/pluginID, where pluginID is a short name for your plugin. It's recommended - that you prefix your folder name (for example, bfMyPlugin) to avoid naming conflicts - with other people's plugins. - In this example, the plugin is named csMyFirstPlugin. - $ cd cloudstack/ui/plugins -$ mkdir csMyFirstPlugin -$ ls -l - -total 8 -drwxr-xr-x 2 bgregory staff 68 Feb 11 14:44 csMyFirstPlugin --rw-r--r-- 1 bgregory staff 101 Feb 11 14:26 plugins.js - - - - Change to your new plugin directory. - $ cd csMyFirstPlugin - - - - Set up the listing. - Add the file config.js, using your favorite editor. - $ vi config.js - Add the following content to config.js. This information will be displayed on the - plugin listing page in the UI: - (function (cloudStack) { - cloudStack.plugins.csMyFirstPlugin.config = { - title: 'My first plugin', - desc: 'Tutorial plugin', - externalLink: 'http://www.cloudstack.org/', - authorName: 'Test Plugin Developer', - authorEmail: 'plugin.developer@example.com' - }; -}(cloudStack)); - - - - Add a new main section. - Add the file csMyFirstPlugin.js, using your favorite editor. - $ vi csMyFirstPlugin.js - Add the following content to csMyFirstPlugin.js: - (function (cloudStack) { - cloudStack.plugins.csMyFirstPlugin = function(plugin) { - plugin.ui.addSection({ - id: 'csMyFirstPlugin', - title: 'My Plugin', - preFilter: function(args) { - return isAdmin(); - }, - show: function() { - return $('<div>').html('Content will go here'); - } - }); - }; -}(cloudStack)); - - - - Register the plugin. - You now have the minimal content needed to run the plugin, so you can activate the - plugin in the UI by adding it to plugins.js. First, edit the file: - $ cd cloudstack/ui/plugins -$ vi plugins.js - - Now add the following to plugins.js: - (function($, cloudStack) { - cloudStack.plugins = [ - 'csMyFirstPlugin' - ]; -}(jQuery, cloudStack)); - - - - Check the plugin in the UI. - First, copy all the plugin code that you have created so far to - /tomcat/webapps/client/plugins. Then refresh the browser and click Plugins in the side - navigation bar. You should see your new plugin. - - - Make the plugin do something. - Right now, you just have placeholder content in the new plugin. It's time to add - real code. In this example, you will write a basic list view, which renders data from - an API call. You will list all virtual machines owned by the logged-in user. To do - this, replace the 'show' function in the plugin code with a 'listView' block, - containing the required syntax for a list view. To get the data, use the - listVirtualMachines API call. Without any parameters, it will return VMs only for your - active user. Use the provided 'apiCall' helper method to handle the server call. Of - course, you are free to use any other method for making the AJAX call (for example, - jQuery's $.ajax method). - First, open your plugin's JavaScript source file in your favorite editor: - $ cd csMyFirstPlugin -$ vi csMyFirstPlugin.js - - Add the following code in csMyFirstPlugin.js: - (function (cloudStack) { - cloudStack.plugins.csMyFirstPlugin = function(plugin) { - plugin.ui.addSection({ - id: 'csMyFirstPlugin', - title: 'My Plugin', - preFilter: function(args) { - return isAdmin(); - }, - - // Render page as a list view - listView: { - id: 'testPluginInstances', - fields: { - name: { label: 'label.name' }, - instancename: { label: 'label.internal.name' }, - displayname: { label: 'label.display.name' }, - zonename: { label: 'label.zone.name' } - }, - dataProvider: function(args) { - // API calls go here, to retrive the data asynchronously - // - // On successful retrieval, call - // args.response.success({ data: [data array] }); - plugin.ui.apiCall('listVirtualMachines', { - success: function(json) { - var vms = json.listvirtualmachinesresponse.virtualmachine; - - args.response.success({ data: vms }); - }, - error: function(errorMessage) { - args.response.error(errorMessage) - } - }); - } - } - }); - }; -}(cloudStack)); - - - - Test the plugin. - First, copy all the plugin code that you have created so far to - /tomcat/webapps/client/plugins. Then refresh the browser. You can see that your - placeholder content was replaced with a list table, containing 4 columns of virtual - machine data. - - - Add an action button. - Let's add an action button to the list view, which will reboot the VM. To do this, - add an actions block under listView. After specifying the correct format, the actions - will appear automatically to the right of each row of data. - $ vi csMyFirstPlugin.js - - Now add the following new code in csMyFirstPlugin.js. (The dots ... show where we - have omitted some existing code for the sake of space. Don't actually cut and paste - that part): - ... - listView: { - id: 'testPluginInstances', - ... - - actions: { - // The key/ID you specify here will determine what icon is - // shown in the UI for this action, - // and will be added as a CSS class to the action's element - // (i.e., '.action.restart') - // - // -- here, 'restart' is a predefined name in &PRODUCT; that will - // automatically show a 'reboot' arrow as an icon; - // this can be changed in csMyFirstPlugin.css - restart: { - label: 'Restart VM', - messages: { - confirm: function() { return 'Are you sure you want to restart this VM?' }, - notification: function() { return 'Rebooted VM' } - }, - action: function(args) { - // Get the instance object of the selected row from context - // - // -- all currently loaded state is stored in 'context' as objects, - // such as the selected list view row, - // the selected section, and active user - // - // -- for list view actions, the object's key will be the same as - // listView.id, specified above; - // always make sure you specify an 'id' for the listView, - // or else it will be 'undefined!' - var instance = args.context.testPluginInstances[0]; - - plugin.ui.apiCall('rebootVirtualMachine', { - // These will be appended to the API request - // - // i.e., rebootVirtualMachine&id=... - data: { - id: instance.id - }, - success: function(json) { - args.response.success({ - // This is an async job, so success here only indicates - // that the job was initiated. - // - // To pass the job ID to the notification UI - // (for checking to see when action is completed), - // '_custom: { jobID: ... }' needs to always be passed on success, - // in the same format as below - _custom: { jobId: json.rebootvirtualmachineresponse.jobid } - }); - }, - - - error: function(errorMessage) { - args.response.error(errorMessage); // Cancel action, show error message returned - } - }); - }, - - // Because rebootVirtualMachine is an async job, we need to add - // a poll function, which will perodically check - // the management server to see if the job is ready - // (via pollAsyncJobResult API call) - // - // The plugin API provides a helper function, 'plugin.ui.pollAsyncJob', - / which will work for most jobs - // in &PRODUCT; - notification: { - poll: plugin.ui.pollAsyncJob - } - } - }, - - dataProvider: function(args) { - ... -... - - - - Add the thumbnail icon. - Create an icon file; it should be square, about 50x50 pixels, and named icon.png. - Copy it into the same directory with your plugin code: - cloudstack/ui/plugins/csMyFirstPlugin/icon.png. - - - Add the stylesheet. - Create a CSS file, with the same name as your .js file. Copy it into the same - directory with your plugin code: - cloudstack/ui/plugins/csMyFirstPlugin/csMyFirstPlugin.css. - - -
-
diff --git a/docs/en-US/time-zones.xml b/docs/en-US/time-zones.xml deleted file mode 100644 index 6b3b64ed85c..00000000000 --- a/docs/en-US/time-zones.xml +++ /dev/null @@ -1,137 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Time Zones - The following time zone identifiers are accepted by &PRODUCT;. There are several places that have a time zone as a required or optional parameter. These include scheduling recurring snapshots, creating a user, and specifying the usage time zone in the Configuration table. - - - - - - - - Etc/GMT+12 - Etc/GMT+11 - Pacific/Samoa - - - Pacific/Honolulu - US/Alaska - America/Los_Angeles - - - Mexico/BajaNorte - US/Arizona - US/Mountain - - - America/Chihuahua - America/Chicago - America/Costa_Rica - - - America/Mexico_City - Canada/Saskatchewan - America/Bogota - - - America/New_York - America/Caracas - America/Asuncion - - - America/Cuiaba - America/Halifax - America/La_Paz - - - America/Santiago - America/St_Johns - America/Araguaina - - - America/Argentina/Buenos_Aires - America/Cayenne - America/Godthab - - - America/Montevideo - Etc/GMT+2 - Atlantic/Azores - - - Atlantic/Cape_Verde - Africa/Casablanca - Etc/UTC - - - Atlantic/Reykjavik - Europe/London - CET - - - Europe/Bucharest - Africa/Johannesburg - Asia/Beirut - - - Africa/Cairo - Asia/Jerusalem - Europe/Minsk - - - Europe/Moscow - Africa/Nairobi - Asia/Karachi - - - Asia/Kolkata - Asia/Bangkok - Asia/Shanghai - - - Asia/Kuala_Lumpur - Australia/Perth - Asia/Taipei - - - Asia/Tokyo - Asia/Seoul - Australia/Adelaide - - - Australia/Darwin - Australia/Brisbane - Australia/Canberra - - - Pacific/Guam - Pacific/Auckland - - - - - - diff --git a/docs/en-US/tools.xml b/docs/en-US/tools.xml deleted file mode 100644 index 8cddf28014f..00000000000 --- a/docs/en-US/tools.xml +++ /dev/null @@ -1,31 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Tools - - - - - diff --git a/docs/en-US/topology-req.xml b/docs/en-US/topology-req.xml deleted file mode 100644 index 75fe69b41a4..00000000000 --- a/docs/en-US/topology-req.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Topology Requirements - - - - - - - - - -
diff --git a/docs/en-US/translating-documentation.xml b/docs/en-US/translating-documentation.xml deleted file mode 100644 index 4d5e3d21b43..00000000000 --- a/docs/en-US/translating-documentation.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Translating &PRODUCT; Documentation - - There are two ways to translate the documentation: - - - Directly using the Transifex website and using their user interface. - Using the Transifex client and pushing your translated strings to the website. - - - Once a translation is complete, a site admin will pull the translated strings within the &PRODUCT; repository, build the documentation and publish it. - For instructions on how to use the Transifex website see http://sebgoa.blogspot.ch/2012/11/translating-apache-cloudstack-docs-with.html - For instructions on how to use the Transifex client to translate from the command line see http://sebgoa.blogspot.ch/2012/12/using-transifex-client-to-translate.html -
diff --git a/docs/en-US/troubleshooting-alerts.xml b/docs/en-US/troubleshooting-alerts.xml deleted file mode 100644 index 0efeb81dd1e..00000000000 --- a/docs/en-US/troubleshooting-alerts.xml +++ /dev/null @@ -1,57 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Alerts - The following is the list of alert type numbers. - -MEMORY = 0 -CPU = 1 -STORAGE =2 -STORAGE_ALLOCATED = 3 -PUBLIC_IP = 4 -PRIVATE_IP = 5 -HOST = 6 -USERVM = 7 -DOMAIN_ROUTER = 8 -CONSOLE_PROXY = 9 -ROUTING = 10// lost connection to default route (to the gateway) -STORAGE_MISC = 11 // lost connection to default route (to the gateway) -USAGE_SERVER = 12 // lost connection to default route (to the gateway) -MANAGMENT_NODE = 13 // lost connection to default route (to the gateway) -DOMAIN_ROUTER_MIGRATE = 14 -CONSOLE_PROXY_MIGRATE = 15 -USERVM_MIGRATE = 16 -VLAN = 17 -SSVM = 18 -USAGE_SERVER_RESULT = 19 -STORAGE_DELETE = 20; -UPDATE_RESOURCE_COUNT = 21; //Generated when we fail to update the resource count -USAGE_SANITY_RESULT = 22; -DIRECT_ATTACHED_PUBLIC_IP = 23; -LOCAL_STORAGE = 24; -RESOURCE_LIMIT_EXCEEDED = 25; //Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only - - -
diff --git a/docs/en-US/troubleshooting-lb-rules-fails.xml b/docs/en-US/troubleshooting-lb-rules-fails.xml deleted file mode 100644 index 62955341e23..00000000000 --- a/docs/en-US/troubleshooting-lb-rules-fails.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Load balancer rules fail after changing network offering - - Symptom - After changing the network offering on a network, load balancer rules stop working. - - - Cause - Load balancing rules were created while using a network service offering that includes an external load balancer device such as NetScaler, and later the network service offering changed to one that uses the &PRODUCT; virtual router. - - - Solution - Create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function. - -
diff --git a/docs/en-US/troubleshooting-recover-lost-virtual-router.xml b/docs/en-US/troubleshooting-recover-lost-virtual-router.xml deleted file mode 100644 index 12a51501379..00000000000 --- a/docs/en-US/troubleshooting-recover-lost-virtual-router.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Recovering a Lost Virtual Router - - Symptom - A virtual router is running, but the host is disconnected. A virtual router no longer functions as expected. - - - Cause - The Virtual router is lost or down. - - - Solution - If you are sure that a virtual router is down forever, or no longer functions as expected, destroy it. You must create one afresh while keeping the backup router up and running (it is assumed this is in a redundant router setup): - - - Force stop the router. Use the stopRouter API with forced=true parameter to do so. - Before you continue with destroying this router, ensure that the backup router is running. Otherwise the network connection will be lost. - Destroy the router by using the destroyRouter API. - - Recreate the missing router by using the restartNetwork API with cleanup=false parameter. For more information about redundant router setup, see Creating a New Network Offering. - For more information about the API syntax, see the API Reference at API Reference. -
diff --git a/docs/en-US/troubleshooting-unable-to-deploy-vms.xml b/docs/en-US/troubleshooting-unable-to-deploy-vms.xml deleted file mode 100644 index 412b9bfc0d8..00000000000 --- a/docs/en-US/troubleshooting-unable-to-deploy-vms.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Unable to deploy VMs from uploaded vSphere template - - Symptom - When attempting to create a VM, the VM will not deploy. - - - Cause - If the template was created by uploading an OVA file that was created using vSphere Client, it is possible the OVA contained an ISO image. If it does, the deployment of VMs from the template will fail. - - - Solution - Remove the ISO and re-upload the template. - -
diff --git a/docs/en-US/troubleshooting-unable-to-power-on-vm.xml b/docs/en-US/troubleshooting-unable-to-power-on-vm.xml deleted file mode 100644 index e037eda08f1..00000000000 --- a/docs/en-US/troubleshooting-unable-to-power-on-vm.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Unable to power on virtual machine on VMware - - Symptom - Virtual machine does not power on. You might see errors like: - - - Unable to open Swap File - Unable to access a file since it is locked - Unable to access Virtual machine configuration - - - Cause - A known issue on VMware machines. ESX hosts lock certain critical virtual machine files and file systems to prevent concurrent changes. Sometimes the files are not unlocked when the virtual machine is powered off. When a virtual machine attempts to power on, it can not access these critical files, and the virtual machine is unable to power on. - - - Solution - See the following: - - VMware Knowledge Base Article -
diff --git a/docs/en-US/troubleshooting-working-with-server-logs.xml b/docs/en-US/troubleshooting-working-with-server-logs.xml deleted file mode 100644 index fa0f78cae3d..00000000000 --- a/docs/en-US/troubleshooting-working-with-server-logs.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Working with Server Logs - The &PRODUCT; Management Server logs all web site, middle tier, and database activities for diagnostics purposes in /var/log/cloudstack/management/. The &PRODUCT; logs a variety of error messages. We recommend this command to find the problematic output in the Management Server log:. - When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text. - - grep -i -E 'exception|unable|fail|invalid|leak|warn|error' /var/log/cloudstack/management/management-server.log - - The &PRODUCT; processes requests with a Job ID. If you find an error in the logs and you are interested in debugging the issue you can grep for this job ID in the management server log. For example, suppose that you find the following ERROR message: - - 2010-10-04 13:49:32,595 ERROR [cloud.vm.UserVmManagerImpl] (Job-Executor-11:job-1076) Unable to find any host for [User|i-8-42-VM-untagged] - - Note that the job ID is 1076. You can track back the events relating to job 1076 with the following grep: - - grep "job-1076)" management-server.log - - The &PRODUCT; Agent Server logs its activities in /var/log/cloudstack/agent/. -
diff --git a/docs/en-US/troubleshooting.xml b/docs/en-US/troubleshooting.xml deleted file mode 100644 index 570d02e4315..00000000000 --- a/docs/en-US/troubleshooting.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Troubleshooting - - - - - - - - - diff --git a/docs/en-US/troublesht-dataloss-on-exp-primary-storage.xml b/docs/en-US/troublesht-dataloss-on-exp-primary-storage.xml deleted file mode 100644 index 4a94f60fbd3..00000000000 --- a/docs/en-US/troublesht-dataloss-on-exp-primary-storage.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Data Loss on Exported Primary Storage - - Symptom - Loss of existing data on primary storage which has been exposed as a Linux NFS server export on an iSCSI volume. - - - Cause - It is possible that a client from outside the intended pool has mounted the storage. When this occurs, the LVM is wiped and all data in the volume is lost - - - Solution - When setting up LUN exports, restrict the range of IP addresses that are allowed access by specifying a subnet mask. For example: - - echo “/export 192.168.1.0/24(rw,async,no_root_squash)†> /etc/exports - Adjust the above command to suit your deployment needs. - - More Information - See the export procedure in the "Secondary Storage" section of the &PRODUCT; Installation Guide - -
diff --git a/docs/en-US/troublesht-mtn-mode-not-working-on-vCenter.xml b/docs/en-US/troublesht-mtn-mode-not-working-on-vCenter.xml deleted file mode 100644 index 764d18825b8..00000000000 --- a/docs/en-US/troublesht-mtn-mode-not-working-on-vCenter.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Maintenance mode not working on vCenter - - Symptom - Host was placed in maintenance mode, but still appears live in vCenter. - - - Cause - The &PRODUCT; administrator UI was used to place the host in scheduled maintenance mode. This mode is separate from vCenter's maintenance mode. - - - Solution - Use vCenter to place the host in maintenance mode. - - - More Information - See - -
diff --git a/docs/en-US/tuning.xml b/docs/en-US/tuning.xml deleted file mode 100644 index 4f9dd01549f..00000000000 --- a/docs/en-US/tuning.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Tuning - This section provides tips on how to improve the performance of your cloud. - - - - - - diff --git a/docs/en-US/ui.xml b/docs/en-US/ui.xml deleted file mode 100644 index 94ea4eaf95c..00000000000 --- a/docs/en-US/ui.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - User Interface - - - diff --git a/docs/en-US/update-iso-vm.xml b/docs/en-US/update-iso-vm.xml deleted file mode 100644 index 98105f51198..00000000000 --- a/docs/en-US/update-iso-vm.xml +++ /dev/null @@ -1,47 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- - Changing a VM's Base Image - Every VM is created from a base image, which is a template or ISO which has been created and - stored in &PRODUCT;. Both cloud administrators and end users can create and modify templates, - ISOs, and VMs. - In &PRODUCT;, you can change an existing VM's base image from one template to another, - or from one ISO to another. (You can not change from an ISO to a template, or from a - template to an ISO). - For example, suppose there is a - template based on a particular operating system, and the OS vendor releases a software patch. - The administrator or user naturally wants to apply the patch and then make sure existing VMs - start using it. Whether a software update is involved or not, it's also possible to simply - switch a VM from its current template to any other desired template. - To change a VM's base image, call the restoreVirtualMachine API command and pass in the - virtual machine ID and a new template ID. The template ID parameter may refer to either a - template or an ISO, depending on which type of base image the VM was already using (it must - match the previous type of image). When this call occurs, the VM's root disk is first destroyed, - then a new root disk is created from the source designated in the template ID parameter. The new - root disk is attached to the VM, and now the VM is based on the new template. - You can also omit the template ID parameter from the restoreVirtualMachine call. In this - case, the VM's root disk is destroyed and recreated, but from the same template or ISO that was - already in use by the VM. -
\ No newline at end of file diff --git a/docs/en-US/upgrade-virtual-router-with-service-offering.xml b/docs/en-US/upgrade-virtual-router-with-service-offering.xml deleted file mode 100644 index dad1d906c5f..00000000000 --- a/docs/en-US/upgrade-virtual-router-with-service-offering.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Upgrading a Virtual Router with System Service Offerings - When &PRODUCT; creates a virtual router, it uses default settings which are defined in a default system service offering. See . All the virtual routers in a single guest network use the same system service offering. You can upgrade the capabilities of the virtual router by creating and applying a custom system service offering. - - Define your custom system service offering. See . In System VM Type, choose Domain Router. - Associate the system service offering with a network offering. - See "Creating Network Offerings" in the Administrator's Guide. - See . - - Apply the network offering to the network where you want the virtual routers to use the new system service offering. If this is a new network, follow the steps in Adding an Additional Guest Network on page 66. To change the service offering for existing virtual routers, follow the steps in . - -
diff --git a/docs/en-US/upload-existing-volume-to-vm.xml b/docs/en-US/upload-existing-volume-to-vm.xml deleted file mode 100644 index 46813747273..00000000000 --- a/docs/en-US/upload-existing-volume-to-vm.xml +++ /dev/null @@ -1,111 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Uploading an Existing Volume to a Virtual Machine - Existing data can be made accessible to a virtual machine. This is called uploading a volume - to the VM. For example, this is useful to upload data from a local file system and attach it to - a VM. Root administrators, domain administrators, and end users can all upload existing volumes - to VMs. - The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary - storage - You cannot upload a volume if the preconfigured volume limit has already been reached. The - default limit for the cloud is set in the global configuration parameter max.account.volumes, - but administrators can also set per-domain limits that are different from the global default. - See Setting Usage Limits - To upload a volume: - - - (Optional) Create an MD5 hash (checksum) of the disk image file that you are going to - upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data - corruption has occurred. - - - Log in to the &PRODUCT; UI as an administrator or user - - - In the left navigation bar, click Storage. - - - Click Upload Volume. - - - Provide the following: - - - Name and Description. Any desired name and a brief description that can be shown in - the UI. - - - Availability Zone. Choose the zone where you want to store the volume. VMs running - on hosts in this zone can attach the volume. - - - Format. Choose one of the following to indicate the disk image format of the - volume. - - - - - Hypervisor - Disk Image Format - - - - - XenServer - VHD - - - VMware - OVA - - - KVM - QCOW2 - - - - - - - - URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The - type of file at the URL must match the value chosen in Format. For example, if Format is - VHD, the URL might look like the following: - http://yourFileServerIP/userdata/myDataDisk.vhd - - - MD5 checksum. (Optional) Use the hash that you created in step 1. - - - - - Wait until the status of the volume shows that the upload is complete. Click Instances - - Volumes, find the name you specified in step 5, and make sure the status is Uploaded. - - -
diff --git a/docs/en-US/upload-template.xml b/docs/en-US/upload-template.xml deleted file mode 100644 index f270c899a53..00000000000 --- a/docs/en-US/upload-template.xml +++ /dev/null @@ -1,67 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Uploading Templates - vSphere Templates and ISOsIf you are uploading a template that was created using vSphere Client, be sure the OVA file does not contain an ISO. If it does, the deployment of VMs from the template will fail. - Templates are uploaded based on a URL. HTTP is the supported access protocol. Templates are frequently large files. You can optionally gzip them to decrease upload times. - To upload a template: - - In the left navigation bar, click Templates. - Click Register Template. - Provide the following: - - Name and Description. These will be shown in the UI, so - choose something descriptive. - URL. The Management Server will download the file from the - specified URL, such as http://my.web.server/filename.vhd.gz. - Zone. Choose the zone where you want the template to be - available, or All Zones to make it available throughout - &PRODUCT;. - OS Type: This helps &PRODUCT; and the hypervisor perform - certain operations and make assumptions that improve the performance of the - guest. Select one of the following: - - If the operating system of the stopped VM is listed, choose it. - If the OS type of the stopped VM is not listed, choose Other. - You should not choose an older version of the OS than the version in the image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will in general not work. In those cases you should choose Other. - - - Hypervisor: The supported hypervisors are listed. Select the desired one. - Format. The format of the template upload file, such as VHD - or OVA. - Password Enabled. Choose Yes if your template has the - &PRODUCT; password change script installed. See Adding Password - Management to Your Templates - Extractable. Choose Yes if the template is available for extraction. If this option is selected, end users can - download a full image of a template. - Public. Choose Yes to make this template accessible to all - users of this &PRODUCT; installation. The template will appear in the - Community Templates list. See . - Featured. Choose Yes if you would like this template to be - more prominent for users to select. The template will appear in the Featured - Templates list. Only an administrator can make a template Featured. - - - -
diff --git a/docs/en-US/usage-record-format.xml b/docs/en-US/usage-record-format.xml deleted file mode 100644 index 7f7db06df26..00000000000 --- a/docs/en-US/usage-record-format.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Usage Record Format - - - - - - - - -
diff --git a/docs/en-US/usage-types.xml b/docs/en-US/usage-types.xml deleted file mode 100644 index 5a189028ad2..00000000000 --- a/docs/en-US/usage-types.xml +++ /dev/null @@ -1,109 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Usage Types - The following table shows all usage types. - - - - - - - - Type ID - Type Name - Description - - - - - 1 - RUNNING_VM - Tracks the total running time of a VM per usage record period. If the VM is upgraded during the usage period, you will get a separate Usage Record for the new upgraded VM. - - - 2 - ALLOCATED_VM - Tracks the total time the VM has been created to the time when it has been destroyed. This usage type is also useful in determining usage for specific templates such as Windows-based templates. - - - 3 - IP_ADDRESS - Tracks the public IP address owned by the account. - - - 4 - NETWORK_BYTES_SENT - Tracks the total number of bytes sent by all the VMs for an account. Cloud.com does not currently track network traffic per VM. - - - 5 - NETWORK_BYTES_RECEIVED - Tracks the total number of bytes received by all the VMs for an account. Cloud.com does not currently track network traffic per VM. - - - 6 - VOLUME - Tracks the total time a disk volume has been created to the time when it has been destroyed. - - - 7 - TEMPLATE - Tracks the total time a template (either created from a snapshot or uploaded to the cloud) has been created to the time it has been destroyed. The size of the template is also returned. - - - 8 - ISO - Tracks the total time an ISO has been uploaded to the time it has been removed from the cloud. The size of the ISO is also returned. - - - 9 - SNAPSHOT - Tracks the total time from when a snapshot has been created to the time it have been destroyed. - - - 11 - LOAD_BALANCER_POLICY - Tracks the total time a load balancer policy has been created to the time it has been removed. Cloud.com does not track whether a VM has been assigned to a policy. - - - 12 - PORT_FORWARDING_RULE - Tracks the time from when a port forwarding rule was created until the time it was removed. - - - 13 - NETWORK_OFFERING - The time from when a network offering was assigned to a VM until it is removed. - - - 14 - VPN_USERS - The time from when a VPN user is created until it is removed. - - - - -
diff --git a/docs/en-US/use-project-view.xml b/docs/en-US/use-project-view.xml deleted file mode 100644 index df874d82409..00000000000 --- a/docs/en-US/use-project-view.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using the Project View - If you are a member of a project, you can use &PRODUCT;’s project view to see project members, resources consumed, and more. The project view shows only information related to one project. It is a useful way to filter out other information so you can concentrate on a project status and resources. - - Log in to the &PRODUCT; UI. - Click Project View. - The project dashboard appears, showing the project’s VMs, volumes, users, events, network settings, and more. From the dashboard, you can: - - Click the Accounts tab to view and manage project members. If you are the project administrator, you can add new members, remove members, or change the role of a member from user to admin. Only one member at a time can have the admin role, so if you set another user’s role to admin, your role will change to regular user. - (If invitations are enabled) Click the Invitations tab to view and manage invitations that have been sent to new project members but not yet accepted. Pending invitations will remain in this list until the new member accepts, the invitation timeout is reached, or you cancel the invitation. - - - -
diff --git a/docs/en-US/user-data-and-meta-data.xml b/docs/en-US/user-data-and-meta-data.xml deleted file mode 100644 index 34007011de1..00000000000 --- a/docs/en-US/user-data-and-meta-data.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- User Data and Meta Data - &PRODUCT; provides API access to attach up to 32KB of user data to a deployed VM. Deployed VMs also have access to instance metadata via the virtual router. - User data can be accessed once the IP address of the virtual router is known. Once the IP address is known, use the following steps to access the user data: - - Run the following command to find the virtual router. - # cat /var/lib/dhclient/dhclient-eth0.leases | grep dhcp-server-identifier | tail -1 - - Access user data by running the following command using the result of the above command# curl http://10.1.1.1/latest/user-data - - Meta Data can be accessed similarly, using a URL of the form http://10.1.1.1/latest/meta-data/{metadata type}. (For backwards compatibility, the previous URL http://10.1.1.1/latest/{metadata type} is also supported.) For metadata type, use one of the following: - - service-offering. A description of the VMs service offering - availability-zone. The Zone name - local-ipv4. The guest IP of the VM - local-hostname. The hostname of the VM - public-ipv4. The first public IP for the router. (E.g. the first IP of eth2) - public-hostname. This is the same as public-ipv4 - instance-id. The instance name of the VM - -
diff --git a/docs/en-US/user-services-overview.xml b/docs/en-US/user-services-overview.xml deleted file mode 100644 index ad27375dd1d..00000000000 --- a/docs/en-US/user-services-overview.xml +++ /dev/null @@ -1,72 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - User Services Overview - In addition to the physical and logical infrastructure of your cloud - and the &PRODUCT; software and servers, you also need a layer of user - services so that people can actually make use of the cloud. This means - not just a user UI, but a set of options and resources that users can - choose from, such as templates for creating virtual machines, disk - storage, and more. If you are running a commercial service, you will be - keeping track of what services and resources users are consuming and - charging them for that usage. Even if you do not charge anything for - people to use your cloud – say, if the users are strictly internal to your - organization, or just friends who are sharing your cloud – you can still - keep track of what services they use and how much of them. - -
- Service Offerings, Disk Offerings, Network Offerings, and Templates - A user creating a new instance can make a variety of choices about - its characteristics and capabilities. &PRODUCT; provides several ways to - present users with choices when creating a new instance: - - - Service Offerings, defined by the &PRODUCT; administrator, - provide a choice of CPU speed, number of CPUs, RAM size, tags on the - root disk, and other choices. See Creating a New Compute Offering. - - Disk Offerings, defined by the &PRODUCT; administrator, - provide a choice of disk size and IOPS (Quality of Service) for primary - data storage. See Creating a New Disk Offering. - - Network Offerings, defined by the &PRODUCT; administrator, - describe the feature set that is available to end users from the virtual - router or external networking devices on a given guest network. See - Network Offerings. - - Templates, defined by the &PRODUCT; administrator or by - any &PRODUCT; user, are the base OS images that the user can choose - from when creating a new instance. For example, &PRODUCT; includes - CentOS as a template. See Working with Templates. - - - In addition to these choices that are provided for users, there is - another type of service offering which is available only to the &PRODUCT; - root administrator, and is used for configuring virtual infrastructure - resources. For more information, see Upgrading a Virtual Router with - System Service Offerings. - -
-
diff --git a/docs/en-US/using-multiple-guest-networks.xml b/docs/en-US/using-multiple-guest-networks.xml deleted file mode 100644 index cb7e9c73be3..00000000000 --- a/docs/en-US/using-multiple-guest-networks.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Using Multiple Guest Networks - In zones that use advanced networking, additional networks for guest traffic may be added at any time after the initial installation. You can also customize the domain name associated with the network by specifying a DNS suffix for each network. - A VM's networks are defined at VM creation time. A VM cannot add or remove networks after it has been created, although the user can go into the guest and remove the IP address from the NIC on a particular network. - Each VM has just one default network. The virtual router's DHCP reply will set the guest's default gateway as that for the default network. Multiple non-default networks may be added to a guest in addition to the single, required default network. The administrator can control which networks are available as the default network. - Additional networks can either be available to all accounts or be assigned to a specific account. Networks that are available to all accounts are zone-wide. Any user with access to the zone can create a VM with access to that network. These zone-wide networks provide little or no isolation between guests.Networks that are assigned to a specific account provide strong isolation. - - - -
diff --git a/docs/en-US/using-netscaler-load-balancers.xml b/docs/en-US/using-netscaler-load-balancers.xml deleted file mode 100644 index 7d18331f106..00000000000 --- a/docs/en-US/using-netscaler-load-balancers.xml +++ /dev/null @@ -1,90 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Using a NetScaler Load Balancer - Citrix NetScaler is supported as an external network element for load balancing in zones - that use isolated networking in advanced zones. Set up an external load balancer when you want - to provide load balancing through means other than &PRODUCT;’s provided virtual router. - - In a Basic zone, load balancing service is supported only if Elastic IP or Elastic LB - services are enabled. - - When NetScaler load balancer is used to provide EIP or ELB services in a Basic zone, ensure - that all guest VM traffic must enter and exit through the NetScaler device. When inbound traffic - goes through the NetScaler device, traffic is routed by using the NAT protocol depending on the - EIP/ELB configured on the public IP to the private IP. The traffic that is originated from the - guest VMs usually goes through the layer 3 router. To ensure that outbound traffic goes through - NetScaler device providing EIP/ELB, layer 3 router must have a policy-based routing. A - policy-based route must be set up so that all traffic originated from the guest VM's are - directed to NetScaler device. This is required to ensure that the outbound traffic from the - guest VM's is routed to a public IP by using NAT.For more information on Elastic IP, see . - The NetScaler can be set up in direct (outside the firewall) mode. It must be added before - any load balancing rules are deployed on guest VMs in the zone. - The functional behavior of the NetScaler with &PRODUCT; is the same as described in the - &PRODUCT; documentation for using an F5 external load balancer. The only exception is that the - F5 supports routing domains, and NetScaler does not. NetScaler can not yet be used as a - firewall. - To install and enable an external load balancer for &PRODUCT; management, see . - External Guest Load Balancer Integration in the Installation - Guide. - - The Citrix NetScaler comes in three varieties. The following table summarizes how these - variants are treated in &PRODUCT;. - - - - - NetScaler ADC Type - Description of Capabilities - &PRODUCT; Supported Features - - - - - MPX - Physical appliance. Capable of deep packet inspection. Can act as application - firewall and load balancer - In advanced zones, load balancer functionality fully supported without - limitation. In basic zones, static NAT, elastic IP (EIP), and elastic load balancing - (ELB) are also provided. - - - VPX - Virtual appliance. Can run as VM on XenServer, ESXi, and Hyper-V hypervisors. - Same functionality as MPX - Supported on ESXi and XenServer. Same functional support as for MPX. - &PRODUCT; will treat VPX and MPX as the same device type. - - - SDX - Physical appliance. Can create multiple fully isolated VPX instances on a - single appliance to support multi-tenant usage - &PRODUCT; will dynamically provision, configure, and manage the life cycle of - VPX instances on the SDX. Provisioned instances are added into &PRODUCT; automatically - – no manual configuration by the administrator is required. Once a VPX instance is - added into &PRODUCT;, it is treated the same as a VPX on an ESXi host. - - - - -
diff --git a/docs/en-US/using-sshkeys.xml b/docs/en-US/using-sshkeys.xml deleted file mode 100644 index f34dfa0c15b..00000000000 --- a/docs/en-US/using-sshkeys.xml +++ /dev/null @@ -1,112 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using SSH Keys for Authentication - In addition to the username and password authentication, &PRODUCT; supports using SSH keys to log in to the cloud infrastructure for additional security. You can use the createSSHKeyPair API to generate the SSH keys. - Because each cloud user has their own SSH key, one cloud user cannot log in to another cloud user's instances unless they share their SSH key files. Using a single SSH key pair, you can manage multiple instances. -
- Creating an Instance Template that Supports SSH Keys - Create a instance template that supports SSH Keys. - - Create a new instance by using the template provided by cloudstack. - For more information on creating a new instance, see - - Download the cloudstack script from The SSH Key Gen Scriptto the instance you have created. - wget http://downloads.sourceforge.net/project/cloudstack/SSH%20Key%20Gen%20Script/cloud-set-guest-sshkey.in?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fcloudstack%2Ffiles%2FSSH%2520Key%2520Gen%2520Script%2F&ts=1331225219&use_mirror=iweb - - Copy the file to /etc/init.d. - cp cloud-set-guest-sshkey.in /etc/init.d/ - - Give the necessary permissions on the script: - chmod +x /etc/init.d/cloud-set-guest-sshkey.in - - Run the script while starting up the operating system: - chkconfig --add cloud-set-guest-sshkey.in - - Stop the instance. - - -
-
- Creating the SSH Keypair - You must make a call to the createSSHKeyPair api method. You can either use the &PRODUCT; Python API library or the curl commands to make the call to the cloudstack api. - For example, make a call from the cloudstack server to create a SSH keypair called "keypair-doc" for the admin account in the root domain: - Ensure that you adjust these values to meet your needs. If you are making the API call from a different server, your URL/PORT will be different, and you will need to use the API keys. - - Run the following curl command: - curl --globoff "http://localhost:8096/?command=createSSHKeyPair&name=keypair-doc&account=admin&domainid=5163440e-c44b-42b5-9109-ad75cae8e8a2" - The output is something similar to what is given below: - <?xml version="1.0" encoding="ISO-8859-1"?><createsshkeypairresponse cloud-stack-version="3.0.0.20120228045507"><keypair><name>keypair-doc</name><fingerprint>f6:77:39:d5:5e:77:02:22:6a:d8:7f:ce:ab:cd:b3:56</fingerprint><privatekey>-----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQCSydmnQ67jP6lNoXdX3noZjQdrMAWNQZ7y5SrEu4wDxplvhYci -dXYBeZVwakDVsU2MLGl/K+wefwefwefwefwefJyKJaogMKn7BperPD6n1wIDAQAB -AoGAdXaJ7uyZKeRDoy6wA0UmF0kSPbMZCR+UTIHNkS/E0/4U+6lhMokmFSHtu -mfDZ1kGGDYhMsdytjDBztljawfawfeawefawfawfawQQDCjEsoRdgkduTy -QpbSGDIa11Jsc+XNDx2fgRinDsxXI/zJYXTKRhSl/LIPHBw/brW8vzxhOlSOrwm7 -VvemkkgpAkEAwSeEw394LYZiEVv395ar9MLRVTVLwpo54jC4tsOxQCBlloocK -lYaocpk0yBqqOUSBawfIiDCuLXSdvBo1Xz5ICTM19vgvEp/+kMuECQBzm -nVo8b2Gvyagqt/KEQo8wzH2THghZ1qQ1QRhIeJG2aissEacF6bGB2oZ7Igim5L14 -4KR7OeEToyCLC2k+02UCQQCrniSnWKtDVoVqeK/zbB32JhW3Wullv5p5zUEcd -KfEEuzcCUIxtJYTahJ1pvlFkQ8anpuxjSEDp8x/18bq3 ------END RSA PRIVATE KEY----- -</privatekey></keypair></createsshkeypairresponse> - Copy the key data into a file. The file looks like this: - -----BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQCSydmnQ67jP6lNoXdX3noZjQdrMAWNQZ7y5SrEu4wDxplvhYci -dXYBeZVwakDVsU2MLGl/K+wefwefwefwefwefJyKJaogMKn7BperPD6n1wIDAQAB -AoGAdXaJ7uyZKeRDoy6wA0UmF0kSPbMZCR+UTIHNkS/E0/4U+6lhMokmFSHtu -mfDZ1kGGDYhMsdytjDBztljawfawfeawefawfawfawQQDCjEsoRdgkduTy -QpbSGDIa11Jsc+XNDx2fgRinDsxXI/zJYXTKRhSl/LIPHBw/brW8vzxhOlSOrwm7 -VvemkkgpAkEAwSeEw394LYZiEVv395ar9MLRVTVLwpo54jC4tsOxQCBlloocK -lYaocpk0yBqqOUSBawfIiDCuLXSdvBo1Xz5ICTM19vgvEp/+kMuECQBzm -nVo8b2Gvyagqt/KEQo8wzH2THghZ1qQ1QRhIeJG2aissEacF6bGB2oZ7Igim5L14 -4KR7OeEToyCLC2k+02UCQQCrniSnWKtDVoVqeK/zbB32JhW3Wullv5p5zUEcd -KfEEuzcCUIxtJYTahJ1pvlFkQ8anpuxjSEDp8x/18bq3 ------END RSA PRIVATE KEY----- - Save the file. - -
-
- Creating an Instance - After you save the SSH keypair file, you must create an instance by using the template that you created at . Ensure that you use the same SSH key name that you created at . - You cannot create the instance by using the GUI at this time and associate the instance with the newly created SSH keypair. - A sample curl command to create a new instance is: - curl --globoff http://localhost:<port number>/?command=deployVirtualMachine\&zoneId=1\&serviceOfferingId=18727021-7556-4110-9322-d625b52e0813\&templateId=e899c18a-ce13-4bbf-98a9-625c5026e0b5\&securitygroupids=ff03f02f-9e3b-48f8-834d-91b822da40c5\&account=admin\&domainid=1\&keypair=keypair-doc - Substitute the template, service offering and security group IDs (if you are using the security group feature) that are in your cloud environment. -
-
- Logging In Using the SSH Keypair - To test your SSH key generation is successful, check whether you can log in to the cloud setup. - For exaple, from a Linux OS, run: - ssh -i ~/.ssh/keypair-doc <ip address> - The -i parameter tells the ssh client to use a ssh key found at ~/.ssh/keypair-doc. -
-
- Resetting SSH Keys - With the API command resetSSHKeyForVirtualMachine, a user can set or reset the SSH keypair - assigned to a virtual machine. A lost or compromised SSH keypair - can be changed, and the user can access the VM by using the new keypair. Just create or register a - new keypair, then call resetSSHKeyForVirtualMachine. -
-
diff --git a/docs/en-US/using-swift-for-secondary-storage.xml b/docs/en-US/using-swift-for-secondary-storage.xml deleted file mode 100644 index 329cf81de08..00000000000 --- a/docs/en-US/using-swift-for-secondary-storage.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using Swift for Secondary Storage - &PRODUCT; supports OpenStack Object Storage ( Swift) for secondary storage. When using Swift, you configure Swift storage for the entire &PRODUCT;, then set up NFS secondary storage for each zone as usual. The NFS storage in each zone acts as a staging area through which all templates and other secondary storage data pass before being forwarded to Swift. The Swift storage acts as a cloud-wide resource, making templates and other data available to any zone in the cloud. There is no hierarchy in the Swift storage, just one Swift container per storage object. Any secondary storage in the whole cloud can pull a container from Swift at need. It is not necessary to copy templates and snapshots from one zone to another, as would be required when using zone NFS alone. Everything is available everywhere - Swift storage must be set up before you add NFS secondary storage to zones. This is accomplished through some additional configuration steps on a fresh Management Server installation, before you add the first zone. The procedure is described in Adding a Zone in the Advanced Installation Guide. -
diff --git a/docs/en-US/using-vpn-with-mac.xml b/docs/en-US/using-vpn-with-mac.xml deleted file mode 100644 index 769682445e4..00000000000 --- a/docs/en-US/using-vpn-with-mac.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using Remote Access VPN with Mac OS X - First, be sure you've configured the VPN settings in your &PRODUCT; install. This section is only concerned with connecting via Mac OS X to your VPN. - Note, these instructions were written on Mac OS X 10.7.5. They may differ slightly in older or newer releases of Mac OS X. - - On your Mac, open System Preferences and click Network. - Make sure Send all traffic over VPN connection is not checked. - If your preferences are locked, you'll need to click the lock in the bottom left-hand corner to make any changes and provide your administrator credentials. - You will need to create a new network entry. Click the plus icon on the bottom left-hand side and you'll see a dialog that says "Select the interface and enter a name for the new service." Select VPN from the Interface drop-down menu, and "L2TP over IPSec" for the VPN Type. Enter whatever you like within the "Service Name" field. - You'll now have a new network interface with the name of whatever you put in the "Service Name" field. For the purposes of this example, we'll assume you've named it "CloudStack." Click on that interface and provide the IP address of the interface for your VPN under the Server Address field, and the user name for your VPN under Account Name. - Click Authentication Settings, and add the user's password under User Authentication and enter the pre-shared IPSec key in the Shared Secret field under Machine Authentication. Click OK. - You may also want to click the "Show VPN status in menu bar" but that's entirely optional. - Now click "Connect" and you will be connected to the CloudStack VPN. - - - -
diff --git a/docs/en-US/using-vpn-with-windows.xml b/docs/en-US/using-vpn-with-windows.xml deleted file mode 100644 index 82e556c58a4..00000000000 --- a/docs/en-US/using-vpn-with-windows.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Using Remote Access VPN with Windows - The procedure to use VPN varies by Windows version. Generally, the user must edit the VPN properties and make sure that the default route is not the VPN. The following steps are for Windows L2TP clients on Windows Vista. The commands should be similar for other Windows versions. - - Log in to the &PRODUCT; UI and click on the source NAT IP for the account. The VPN tab should display the IPsec preshared key. Make a note of this and the source NAT IP. The UI also lists one or more users and their passwords. Choose one of these users, or, if none exists, add a user and password. - On the Windows box, go to Control Panel, then select Network and Sharing center. Click Setup a connection or network. - In the next dialog, select No, create a new connection. - In the next dialog, select Use my Internet Connection (VPN). - In the next dialog, enter the source NAT IP from step and give the connection a name. Check Don't connect now. - In the next dialog, enter the user name and password selected in step . - Click Create. - Go back to the Control Panel and click Network Connections to see the new connection. The connection is not active yet. - Right-click the new connection and select Properties. In the Properties dialog, select the Networking tab. - In Type of VPN, choose L2TP IPsec VPN, then click IPsec settings. Select Use preshared key. Enter the preshared key from step . - The connection is ready for activation. Go back to Control Panel -> Network Connections and double-click the created connection. - Enter the user name and password from step . - -
diff --git a/docs/en-US/vcenter-maintenance-mode.xml b/docs/en-US/vcenter-maintenance-mode.xml deleted file mode 100644 index d36dd7cdb44..00000000000 --- a/docs/en-US/vcenter-maintenance-mode.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- vCenter and Maintenance Mode - To enter maintenance mode on a vCenter host, both vCenter and &PRODUCT; must be used in concert. &PRODUCT; and vCenter have separate maintenance modes that work closely together. - - Place the host into &PRODUCT;'s "scheduled maintenance" mode. This does not invoke the vCenter maintenance mode, but only causes VMs to be migrated off the host - When the &PRODUCT; maintenance mode is requested, the host first moves into the Prepare for Maintenance state. In this state it cannot be the target of new guest VM starts. Then all VMs will be migrated off the server. Live migration will be used to move VMs off the host. This allows the guests to be migrated to other hosts with no disruption to the guests. After this migration is completed, the host will enter the Ready for Maintenance mode. - Wait for the "Ready for Maintenance" indicator to appear in the UI. - Now use vCenter to perform whatever actions are necessary to maintain the host. During this time, the host cannot be the target of new VM allocations. - When the maintenance tasks are complete, take the host out of maintenance mode as follows: - - First use vCenter to exit the vCenter maintenance mode. - This makes the host ready for &PRODUCT; to reactivate it. - Then use &PRODUCT;'s administrator UI to cancel the &PRODUCT; maintenance mode - When the host comes back online, the VMs that were migrated off of it may be migrated back to it manually and new VMs can be added. - - - -
diff --git a/docs/en-US/verifying-source.xml b/docs/en-US/verifying-source.xml deleted file mode 100644 index 668ea84f266..00000000000 --- a/docs/en-US/verifying-source.xml +++ /dev/null @@ -1,82 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Verifying the downloaded release - - There are a number of mechanisms to check the authenticity and validity of a - downloaded release. - -
- Getting the KEYS - - To enable you to verify the GPG signature, you will need to download the - KEYS - file. - - - You next need to import those keys, which you can do by running: - # gpg --import KEYS - -
-
- GPG - - The &PRODUCT; project provides a detached GPG signature of the release. - To check the signature, run the following command: - $ gpg --verify apache-cloudstack-4.0.0-incubating-src.tar.bz2.asc - - - If the signature is valid you will see a line of output that contains 'Good signature'. - -
-
- MD5 - - In addition to the cryptographic signature, &PRODUCT; has an MD5 checksum - that you can use to verify the download matches the release. - You can verify this hash by executing the following command: - $ gpg --print-md MD5 apache-cloudstack-4.0.0-incubating-src.tar.bz2 | diff - apache-cloudstack-4.0.0-incubating-src.tar.bz2.md5 - - - If this successfully completes you should see no output. If there is any output from them, - then there is a difference between the hash you generated locally and the hash that has been - pulled from the server. - -
-
- SHA512 - - In addition to the MD5 hash, the &PRODUCT; project provides a SHA512 - cryptographic hash to aid in assurance of the validity of the downloaded - release. You can verify this hash by executing the following command: - $ gpg --print-md SHA512 apache-cloudstack-4.0.0-incubating-src.tar.bz2 | diff - apache-cloudstack-4.0.0-incubating-src.tar.bz2.sha - - - If this command successfully completes you should see no output. If there is any output from them, - then there is a difference between the hash you generated locally and the hash that has been - pulled from the server. - -
-
diff --git a/docs/en-US/virtual-machine-usage-record-format.xml b/docs/en-US/virtual-machine-usage-record-format.xml deleted file mode 100644 index ac8fec9a9d2..00000000000 --- a/docs/en-US/virtual-machine-usage-record-format.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Virtual Machine Usage Record Format - For running and allocated virtual machine usage, the following fields exist in a usage record: - - account – name of the account - accountid – ID of the account - domainid – ID of the domain in which this account resides - zoneid – Zone where the usage occurred - description – A string describing what the usage record is tracking - usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for VM running time) - usagetype – A number representing the usage type (see Usage Types) - rawusage – A number representing the actual usage in hours - virtualMachineId – The ID of the virtual machine - name – The name of the virtual machine - offeringid – The ID of the service offering - templateid – The ID of the template or the ID of the parent template. The parent template value is present when the current template was created from a volume. - usageid – Virtual machine - type – Hypervisor - startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record - -
diff --git a/docs/en-US/virtual-machines.xml b/docs/en-US/virtual-machines.xml deleted file mode 100644 index 8d8847853db..00000000000 --- a/docs/en-US/virtual-machines.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Working With Virtual Machines - - - - - - - - - - - -
- Resetting the Virtual Machine Root Volume on Reboot - For secure environments, and to ensure that VM state is not persisted across reboots, - you can reset the root disk. For more information, see . -
- - - -
diff --git a/docs/en-US/virtual-router.xml b/docs/en-US/virtual-router.xml deleted file mode 100644 index c9b403b1e3e..00000000000 --- a/docs/en-US/virtual-router.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Virtual Router - The virtual router is a type of System Virtual Machine. The virtual router is one of the most frequently used service providers in &PRODUCT;. The end user has no direct access to the virtual router. Users can ping the virtual router and take actions that affect it (such as setting up port forwarding), but users do not have SSH access into the virtual router. - There is no mechanism for the administrator to log in to the virtual router. Virtual routers can be restarted by administrators, but this will interrupt public network access and other services for end users. A basic test in debugging networking issues is to attempt to ping the virtual router from a guest VM. Some of the characteristics of the virtual router are determined by its associated system service offering.. - - - -
diff --git a/docs/en-US/vlan-allocation-eg.xml b/docs/en-US/vlan-allocation-eg.xml deleted file mode 100644 index 3ffd1666730..00000000000 --- a/docs/en-US/vlan-allocation-eg.xml +++ /dev/null @@ -1,71 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- VLAN Allocation Example - VLANs are required for public and guest traffic. The following is an example of a VLAN - allocation scheme: - - - - - VLAN IDs - Traffic type - Scope - - - - - less than 500 - Management traffic. Reserved for administrative purposes. - &PRODUCT; software can access this, hypervisors, system VMs. - - - 500-599 - VLAN carrying public traffic. - &PRODUCT; accounts. - - - 600-799 - VLANs carrying guest traffic. - &PRODUCT; accounts. Account-specific VLAN is chosen from this - pool. - - - 800-899 - VLANs carrying guest traffic. - &PRODUCT; accounts. Account-specific VLAN chosen by &PRODUCT; admin to assign - to that account. - - - 900-999 - VLAN carrying guest traffic - &PRODUCT; accounts. Can be scoped by project, domain, or all - accounts. - - - greater than 1000 - Reserved for future use - - - - - -
diff --git a/docs/en-US/vlan-assign-isolated-nw.xml b/docs/en-US/vlan-assign-isolated-nw.xml deleted file mode 100644 index 424ecd2ac4a..00000000000 --- a/docs/en-US/vlan-assign-isolated-nw.xml +++ /dev/null @@ -1,66 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Assigning VLANs to Isolated Networks - &PRODUCT; provides you the ability to control VLAN assignment to Isolated networks. As a - Root admin, you can assign a VLAN ID when a network is created, just the way it's done for - Shared networks. - The former behaviour also is supported — VLAN is randomly allocated to a network from - the VNET range of the physical network when the network turns to Implemented state. The VLAN is - released back to the VNET pool when the network shuts down as a part of the Network Garbage - Collection. The VLAN can be re-used either by the same network when it is implemented again, or - by any other network. On each subsequent implementation of a network, a new VLAN can be - assigned. - Only the Root admin can assign VLANs because the regular users or domain admin are not aware - of the physical network topology. They cannot even view what VLAN is assigned to a - network. - To enable you to assign VLANs to Isolated networks, - - - Create a network offering by specifying the following: - - - Guest Type: Select Isolated. - - - Specify VLAN: Select the option. - - - For more information, see . - For more information, see the &PRODUCT; Installation Guide. - - - Using this network offering, create a network. - You can create a VPC tier or an Isolated network. - - - Specify the VLAN when you create the network. - When VLAN is specified, a CIDR and gateway are assigned to this network and the state is - changed to Setup. In this state, the network will not be garbage collected. - - - - You cannot change a VLAN once it's assigned to the network. The VLAN remains with the - network for its entire life cycle. - -
diff --git a/docs/en-US/vlan-provisioning.xml b/docs/en-US/vlan-provisioning.xml deleted file mode 100644 index 2d1a49e04c5..00000000000 --- a/docs/en-US/vlan-provisioning.xml +++ /dev/null @@ -1,46 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- VLAN Provisioning - &PRODUCT; automatically creates and destroys interfaces bridged to VLANs on the hosts. In - general the administrator does not need to manage this process. - &PRODUCT; manages VLANs differently based on hypervisor type. For XenServer or KVM, the - VLANs are created on only the hosts where they will be used and then they are destroyed when all - guests that require them have been terminated or moved to another host. - For vSphere the VLANs are provisioned on all hosts in the cluster even if there is no guest - running on a particular Host that requires the VLAN. This allows the administrator to perform - live migration and other functions in vCenter without having to create the VLAN on the - destination Host. Additionally, the VLANs are not removed from the Hosts when they are no longer - needed. - You can use the same VLANs on different physical networks provided that each physical - network has its own underlying layer-2 infrastructure, such as switches. For example, you can - specify VLAN range 500 to 1000 while deploying physical networks A and B in an Advanced zone - setup. This capability allows you to set up an additional layer-2 physical infrastructure on a - different physical NIC and use the same set of VLANs if you run out of VLANs. Another advantage - is that you can use the same set of IPs for different customers, each one with their own routers - and the guest networks on different physical NICs. - - - -
diff --git a/docs/en-US/vm-lifecycle.xml b/docs/en-US/vm-lifecycle.xml deleted file mode 100644 index 15d9f7df590..00000000000 --- a/docs/en-US/vm-lifecycle.xml +++ /dev/null @@ -1,43 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- VM Lifecycle - Virtual machines can be in the following states: - - - - - basic-deployment.png: Basic two-machine &PRODUCT; deployment - - Once a virtual machine is destroyed, it cannot be recovered. All the resources used by the virtual machine will be reclaimed by the system. This includes the virtual machine’s IP address. - A stop will attempt to gracefully shut down the operating system, which typically involves terminating all the running applications. If the operation system cannot be stopped, it will be forcefully terminated. This has the same effect as pulling the power cord to a physical machine. - A reboot is a stop followed by a start. - &PRODUCT; preserves the state of the virtual machine hard disk until the machine is destroyed. - A running virtual machine may fail because of hardware or network issues. A failed virtual machine is in the down state. - The system places the virtual machine into the down state if it does not receive the heartbeat from the hypervisor for three minutes. - The user can manually restart the virtual machine from the down state. - The system will start the virtual machine from the down state automatically if the virtual machine is marked as HA-enabled. -
- diff --git a/docs/en-US/vm-snapshots.xml b/docs/en-US/vm-snapshots.xml deleted file mode 100644 index 3e72fe40ff6..00000000000 --- a/docs/en-US/vm-snapshots.xml +++ /dev/null @@ -1,148 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Virtual Machine Snapshots for VMware - (VMware hosts only) - In addition to the existing &PRODUCT; ability to snapshot individual VM volumes, - you can now take a VM snapshot to preserve all the VM's data volumes as well as (optionally) its CPU/memory state. - This is useful for quick restore of a VM. - For example, you can snapshot a VM, then make changes such as software upgrades. - If anything goes wrong, simply restore the VM to its previous state using the previously saved VM snapshot. - - The snapshot is created using the VMware native snapshot facility. The VM snapshot - includes not only the data volumes, but optionally also whether the VM is running or - turned off (CPU state) and the memory contents. The snapshot is stored in &PRODUCT;'s - primary storage. - VM snapshots can have a parent/child relationship. - Each successive snapshot of the same VM is the child of the snapshot that came before it. - Each time you take an additional snapshot of the same VM, it saves only the differences - between the current state of the VM and the state stored in the most recent previous snapshot. - The previous snapshot becomes a parent, and the new snapshot is its child. - It is possible to create a long chain of these parent/child snapshots, - which amount to a "redo" record leading from the current state of the VM back to the - original. - If you need more information about VM snapshots, check out the VMware documentation - and the VMware Knowledge Base, especially - Understanding virtual machine snapshots. -
- Limitations on VM Snapshots - - If a VM has some stored snapshots, you can't attach new volume to the VM - or delete any existing volumes. - If you change the volumes on the VM, it would become impossible to restore the VM snapshot - which was created with the previous volume structure. - If you want to attach a volume to such a VM, first delete its snapshots. - - VM snapshots which include both data volumes and memory can't be kept if you change the VM's - service offering. Any existing VM snapshots of this type will be discarded. - - You can't make a VM snapshot at the same time as you are taking a volume - snapshot. - - - The "quiesce" option is not supported. This option is provided by the underlying - VMware snapshot facility so that you can choose whether to quiesce the file system - on a running virtual machine before taking the snapshot. In &PRODUCT;, the quiesce option is always - set to false; the file system is not quiesced before taking a snapshot of a running VM. - - - You should use only &PRODUCT; to create VM snapshots on VMware hosts managed by &PRODUCT;. - Any snapshots that you make directly on vSphere will not be tracked in &PRODUCT;. - -
-
- Configuring VM Snapshots - The cloud administrator can use global configuration variables to control the behavior of VM snapshots. - To set these variables, go through the Global Settings area of the &PRODUCT; UI. - - - - - Configuration Setting Name - Description - - - - - vmsnapshots.max - The maximum number of VM snapshots that can be saved for any given virtual machine in the cloud. - The total possible number of VM snapshots in the cloud is (number of VMs) * vmsnapshots.max. - If the number of snapshots for any VM ever hits the maximum, the older ones are removed - by the snapshot expunge job. - - - - vmsnapshot.create.wait - Number of seconds to wait for a snapshot job to succeed before declaring failure and issuing an error. - - - - -
-
- Using VM Snapshots - To create a VM snapshot using the &PRODUCT; UI: - - Log in to the &PRODUCT; UI as a user or administrator. - Click Instances. - Click the name of the VM you want to snapshot. - Click the Take VM Snapshot button. - - - - - - If a snapshot is already in progress, then clicking this button will have no effect. - - Provide a name and description. These will be displayed in the VM Snapshots list. - (For running VMs only) If you want to include the VM's memory in the snapshot, click the - Memory checkbox. This saves the CPU and memory state of the virtual machine. If you - don't check this box, then only the current state of the VM disk is saved. Checking - this box makes the snapshot take longer. - Click OK. - - To delete a snapshot or restore a VM to the state saved in a particular snapshot: - - Navigate to the VM as described in the earlier steps. - Click View VM Snapshots. - In the list of snapshots, click the name of the snapshot you want to work with. - Depending on what you want to do: - To delete the snapshot, click the Delete button. - - - - - - To revert to the snapshot, click the Revert button. - - - - - - - - VM snapshots are deleted automatically when a VM is destroyed. - You don't have to manually delete the snapshots in this case. -
-
diff --git a/docs/en-US/vm-storage-migration.xml b/docs/en-US/vm-storage-migration.xml deleted file mode 100644 index 51c6f34a757..00000000000 --- a/docs/en-US/vm-storage-migration.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- VM Storage Migration - Supported in XenServer, KVM, and VMware. - This procedure is different from moving disk volumes from one VM to another as described in - . - - You can migrate a virtual machine’s root disk volume or any additional data disk volume from one storage pool to another in the same zone. - You can use the storage migration feature to achieve some commonly desired administration goals, such as balancing the load on storage pools and increasing the reliability of virtual machines by moving them away from any storage pool that is experiencing issues. - On XenServer and VMware, live migration of VM storage is enabled through &PRODUCT; - support for XenMotion and vMotion. - Live storage migration allows VMs to be moved from one host to another, where the VMs are - not located on storage shared between the two hosts. It provides the option to live - migrate a VM’s disks along with the VM itself. It is possible to migrate a VM from one - XenServer resource pool / VMware cluster to another, or to migrate a VM whose disks are on - local storage, or even to migrate a VM’s disks from one storage repository to another, all - while the VM is running. - Because of a limitation in VMware, live migration of storage for a VM is allowed only - if the source and target storage pool are accessible to the source host; that is, the host - where the VM is running when the live migration operation is requested. - - - -
diff --git a/docs/en-US/vmware-cluster-config-dvswitch.xml b/docs/en-US/vmware-cluster-config-dvswitch.xml deleted file mode 100644 index 2aeea2a5e5c..00000000000 --- a/docs/en-US/vmware-cluster-config-dvswitch.xml +++ /dev/null @@ -1,283 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Configuring a VMware Datacenter with VMware Distributed Virtual Switch - &PRODUCT; supports VMware vNetwork Distributed Switch (VDS) for virtual network - configuration in a VMware vSphere environment. This section helps you configure VMware VDS in a - &PRODUCT; deployment. Each vCenter server instance can support up to 128 VDS instances and each - VDS instance can manage up to 500 VMware hosts. -
- About VMware Distributed Virtual Switch - VMware VDS is an aggregation of host-level virtual switches on a VMware vCenter server. - VDS abstracts the configuration of individual virtual switches that span across a large number - of hosts, and enables centralized provisioning, administration, and monitoring for your entire - datacenter from a centralized interface. In effect, a VDS acts as a single virtual switch at - the datacenter level and manages networking for a number of hosts in a datacenter from a - centralized VMware vCenter server. Each VDS maintains network runtime state for VMs as they - move across multiple hosts, enabling inline monitoring and centralized firewall services. A - VDS can be deployed with or without Virtual Standard Switch and a Nexus 1000V virtual - switch. -
-
- Prerequisites and Guidelines - - - VMware VDS is supported only on Public and Guest traffic in &PRODUCT;. - - - VMware VDS does not support multiple VDS per traffic type. If a user has many VDS - switches, only one can be used for Guest traffic and another one for Public - traffic. - - - Additional switches of any type can be added for each cluster in the same zone. While - adding the clusters with different switch type, traffic labels is overridden at the - cluster level. - - - Management and Storage network does not support VDS. Therefore, use Standard Switch - for these networks. - - - When you remove a guest network, the corresponding dvportgroup will not be removed on - the vCenter. You must manually delete them on the vCenter. - - -
-
- Preparation Checklist - For a smoother configuration of VMware VDS, note down the VDS name you have added in the - datacenter before you start: - - - - - - vds-name.png: Name of the dvSwitch as specified in the vCenter. - - - Use this VDS name in the following: - - - The switch name in the Edit traffic label dialog while configuring a public and guest - traffic during zone creation. - During a zone creation, ensure that you select VMware vNetwork Distributed Virtual Switch - when you configure guest and public traffic type. - - - - - - traffic-type.png: virtual switch type - - - - - The Public Traffic vSwitch Type field when you add a VMware VDS-enabled cluster. - - - The switch name in the traffic label while updating the switch type in a zone. - - - Traffic label format in the last case is [["Name of - vSwitch/dvSwitch/EthernetPortProfile"][,"VLAN ID"[,"vSwitch Type"]]] - The possible values for traffic labels are: - - - empty string - - - dvSwitch0 - - - dvSwitch0,200 - - - dvSwitch1,300,vmwaredvs - - - myEthernetPortProfile,,nexusdvs - - - dvSwitch0,,vmwaredvs - - - - - - - - - - Fields - Name - Description - - - - - 1 - Represents the name of the virtual / distributed virtual switch at - vCenter. - The default value depends on the type of virtual switch: - vSwitch0: If type of virtual switch is VMware - vNetwork Standard virtual switch - dvSwitch0: If type of virtual switch is VMware - vNetwork Distributed virtual switch - epp0: If type of virtual switch is Cisco Nexus - 1000v Distributed virtual switch - - - 2 - VLAN ID to be used for this traffic wherever applicable. - This field would be used for only public traffic as of now. In case of - guest traffic this field would be ignored and could be left empty for guest traffic. - By default empty string would be assumed which translates to untagged VLAN for that - specific traffic type. - - - 3 - Type of virtual switch. Specified as string. - Possible valid values are vmwaredvs, vmwaresvs, nexusdvs. - vmwaresvs: Represents VMware vNetwork Standard - virtual switch - vmwaredvs: Represents VMware vNetwork - distributed virtual switch - nexusdvs: Represents Cisco Nexus 1000v - distributed virtual switch. - If nothing specified (left empty), zone-level default virtual switch would be - defaulted, based on the value of global parameter you specify. - Following are the global configuration parameters: - vmware.use.dvswitch: Set to true to enable any - kind (VMware DVS and Cisco Nexus 1000v) of distributed virtual switch in a &PRODUCT; - deployment. If set to false, the virtual switch that can be used in that &PRODUCT; - deployment is Standard virtual switch. - vmware.use.nexus.vswitch: This parameter is - ignored if vmware.use.dvswitch is set to false. Set to true to enable Cisco Nexus - 1000v distributed virtual switch in a &PRODUCT; deployment. - - - - - -
-
- Enabling Virtual Distributed Switch in &PRODUCT; - To make a &PRODUCT; deployment VDS enabled, set the vmware.use.dvswitch parameter to true - by using the Global Settings page in the &PRODUCT; UI and restart the Management Server. - Unless you enable the vmware.use.dvswitch parameter, you cannot see any UI options specific to - VDS, and &PRODUCT; ignores the VDS-specific parameters that you specify. Additionally, - &PRODUCT; uses VDS for virtual network infrastructure if the value of vmware.use.dvswitch - parameter is true and the value of vmware.use.nexus.dvswitch parameter is false. Another - global parameter that defines VDS configuration is vmware.ports.per.dvportgroup. This is the - default number of ports per VMware dvPortGroup in a VMware environment. Default value is 256. - This number directly associated with the number of guest network you can create. - &PRODUCT; supports orchestration of virtual networks in a deployment with a mix of Virtual - Distributed Switch, Standard Virtual Switch and Nexus 1000v Virtual Switch. -
-
- Configuring Distributed Virtual Switch in &PRODUCT; - You can configure VDS by adding the necessary resources while a zone is created. - Alternatively, at the cluster level, you can create an additional cluster with VDS enabled - in the existing zone. Use the Add Cluster option. For information as given in . - In both these cases, you must specify the following parameters to configure VDS: - - - - - - dvSwitchConfig.png: Configuring dvSwitch - - - - - - - - - Parameters - Description - - - - - Cluster Name - Enter the name of the cluster you created in vCenter. For example, - "cloudcluster". - - - vCenter Host - Enter the name or the IP address of the vCenter host where you have - deployed the VMware VDS. - - - vCenter User name - Enter the username that &PRODUCT; should use to connect to vCenter. This - user must have all administrative privileges. - - - vCenter Password - Enter the password for the user named above. - - - vCenter Datacenter - Enter the vCenter datacenter that the cluster is in. For example, - "clouddcVM". - - - Override Public Traffic - Enable this option to override the zone-wide public traffic for the cluster - you are creating. - - - Public Traffic vSwitch Type - This option is displayed only if you enable the Override Public Traffic - option. Select VMware vNetwork Distributed Virtual Switch. - If the vmware.use.dvswitch global parameter is true, the default option will be - VMware vNetwork Distributed Virtual Switch. - - - Public Traffic vSwitch Name - Name of virtual switch to be used for the public traffic. - - - Override Guest Traffic - Enable the option to override the zone-wide guest traffic for the cluster - you are creating. - - - Guest Traffic vSwitch Type - This option is displayed only if you enable the Override Guest Traffic - option. Select VMware vNetwork Distributed Virtual Switch. - If the vmware.use.dvswitch global parameter is true, the default option will be - VMware vNetwork Distributed Virtual Switch. - - - Guest Traffic vSwitch Name - Name of virtual switch to be used for guest traffic. - - - - -
-
diff --git a/docs/en-US/vmware-install.xml b/docs/en-US/vmware-install.xml deleted file mode 100644 index 282cf2ec6e2..00000000000 --- a/docs/en-US/vmware-install.xml +++ /dev/null @@ -1,928 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- VMware vSphere Installation and Configuration - If you want to use the VMware vSphere hypervisor to run guest virtual machines, install - vSphere on the host(s) in your cloud. - -
- Preparation Checklist for VMware - For a smoother installation, gather the following information before you start: - - - Information listed in - - - Information listed in - - -
- vCenter Checklist - You will need the following information about vCenter. - - - - - - - - vCenter Requirement - Value - Notes - - - - - vCenter User - - This user must have admin privileges. - - - vCenter User Password - - Password for the above user. - - - vCenter Datacenter Name - - Name of the datacenter. - - - vCenter Cluster Name - - Name of the cluster. - - - - -
-
- Networking Checklist for VMware - You will need the following information about VLAN. - - - - - - - - VLAN Information - Value - Notes - - - - - ESXi VLAN - - VLAN on which all your ESXi hypervisors reside. - - - ESXI VLAN IP Address - - IP Address Range in the ESXi VLAN. One address per Virtual Router is used - from this range. - - - ESXi VLAN IP Gateway - - - - - ESXi VLAN Netmask - - - - - Management Server VLAN - - VLAN on which the &PRODUCT; Management server is - installed. - - - Public VLAN - - VLAN for the Public Network. - - - Public VLAN Gateway - - - - - Public VLAN Netmask - - - - - Public VLAN IP Address Range - - Range of Public IP Addresses available for &PRODUCT; use. These addresses - will be used for virtual router on &PRODUCT; to route private traffic to external - networks. - - - VLAN Range for Customer use - - A contiguous range of non-routable VLANs. One VLAN will be assigned for - each customer. - - - - -
-
-
- vSphere Installation Steps - - - If you haven't already, you'll need to download and purchase vSphere from the VMware - Website (https://www.vmware.com/tryvmware/index.php?p=vmware-vsphere&lp=1) and - install it by following the VMware vSphere Installation Guide. - - - Following installation, perform the following configuration, which are described in - the next few sections: - - - - - - - Required - Optional - - - - - ESXi host setup - NIC bonding - - - Configure host physical networking, virtual switch, vCenter Management - Network, and extended port range - Multipath storage - - - Prepare storage for iSCSI - - - - Configure clusters in vCenter and add hosts to them, or add hosts - without clusters to vCenter - - - - - - - -
-
- ESXi Host setup - All ESXi hosts should enable CPU hardware virtualization support in BIOS. Please note - hardware virtualization support is not enabled by default on most servers. -
-
- Physical Host Networking - You should have a plan for cabling the vSphere hosts. Proper network configuration is - required before adding a vSphere host to &PRODUCT;. To configure an ESXi host, you can use - vClient to add it as standalone host to vCenter first. Once you see the host appearing in the - vCenter inventory tree, click the host node in the inventory tree, and navigate to the - Configuration tab. - - - - - - vsphereclient.png: vSphere client - - - In the host configuration tab, click the "Hardware/Networking" link to bring up - the networking configuration page as above. -
- Configure Virtual Switch - A default virtual switch vSwitch0 is created. &PRODUCT; requires all ESXi hosts in the - cloud to use the same set of virtual switch names. If you change the default virtual switch - name, you will need to configure one or more &PRODUCT; configuration variables as - well. -
- Separating Traffic - &PRODUCT; allows you to use vCenter to configure three separate networks per ESXi - host. These networks are identified by the name of the vSwitch they are connected to. The - allowed networks for configuration are public (for traffic to/from the public internet), - guest (for guest-guest traffic), and private (for management and usually storage traffic). - You can use the default virtual switch for all three, or create one or two other vSwitches - for those traffic types. - If you want to separate traffic in this way you should first create and configure - vSwitches in vCenter according to the vCenter instructions. Take note of the vSwitch names - you have used for each traffic type. You will configure &PRODUCT; to use these - vSwitches. -
-
- Increasing Ports - By default a virtual switch on ESXi hosts is created with 56 ports. We recommend - setting it to 4088, the maximum number of ports allowed. To do that, click the - "Properties..." link for virtual switch (note this is not the Properties link - for Networking). - - - - - - vsphereclient.png: vSphere client - - - In vSwitch properties dialog, select the vSwitch and click Edit. You should see the - following dialog: - - - - - - vsphereclient.png: vSphere client - - - In this dialog, you can change the number of switch ports. After you've done - that, ESXi hosts are required to reboot in order for the setting to take effect. -
-
-
- Configure vCenter Management Network - In the vSwitch properties dialog box, you may see a vCenter management network. This - same network will also be used as the &PRODUCT; management network. &PRODUCT; requires the - vCenter management network to be configured properly. Select the management network item in - the dialog, then click Edit. - - - - - - vsphereclient.png: vSphere client - - - Make sure the following values are set: - - - VLAN ID set to the desired ID - - - vMotion enabled. - - - Management traffic enabled. - - - If the ESXi hosts have multiple VMKernel ports, and ESXi is not using the default value - "Management Network" as the management network name, you must follow these - guidelines to configure the management network port group so that &PRODUCT; can find - it: - - - Use one label for the management network port across all ESXi hosts. - - - In the &PRODUCT; UI, go to Configuration - Global Settings and set - vmware.management.portgroup to the management network label from the ESXi hosts. - - -
-
- Extend Port Range for &PRODUCT; Console Proxy - (Applies only to VMware vSphere version 4.x) - You need to extend the range of firewall ports that the console proxy works with on the - hosts. This is to enable the console proxy to work with VMware-based VMs. The default - additional port range is 59000-60000. To extend the port range, log in to the VMware ESX - service console on each host and run the following commands: - -esxcfg-firewall -o 59000-60000,tcp,in,vncextras -esxcfg-firewall -o 59000-60000,tcp,out,vncextras - -
-
- Configure NIC Bonding for vSphere - NIC bonding on vSphere hosts may be done according to the vSphere installation - guide. -
-
-
- Configuring a vSphere Cluster with Nexus 1000v Virtual Switch - &PRODUCT; supports Cisco Nexus 1000v dvSwitch (Distributed Virtual Switch) for virtual - network configuration in a VMware vSphere environment. This section helps you configure a - vSphere cluster with Nexus 1000v virtual switch in a VMware vCenter environment. For - information on creating a vSphere cluster, see -
- About Cisco Nexus 1000v Distributed Virtual Switch - The Cisco Nexus 1000V virtual switch is a software-based virtual machine access switch - for VMware vSphere environments. It can span multiple hosts running VMware ESXi 4.0 and - later. A Nexus virtual switch consists of two components: the Virtual Supervisor Module - (VSM) and the Virtual Ethernet Module (VEM). The VSM is a virtual appliance that acts as the - switch's supervisor. It controls multiple VEMs as a single network device. The VSM is - installed independent of the VEM and is deployed in redundancy mode as pairs or as a - standalone appliance. The VEM is installed on each VMware ESXi server to provide - packet-forwarding capability. It provides each virtual machine with dedicated switch ports. - This VSM-VEM architecture is analogous to a physical Cisco switch's supervisor - (standalone or configured in high-availability mode) and multiple linecards - architecture. - Nexus 1000v switch uses vEthernet port profiles to simplify network provisioning for - virtual machines. There are two types of port profiles: Ethernet port profile and vEthernet - port profile. The Ethernet port profile is applied to the physical uplink ports-the NIC - ports of the physical NIC adapter on an ESXi server. The vEthernet port profile is - associated with the virtual NIC (vNIC) that is plumbed on a guest VM on the ESXi server. The - port profiles help the network administrators define network policies which can be reused - for new virtual machines. The Ethernet port profiles are created on the VSM and are - represented as port groups on the vCenter server. -
-
- Prerequisites and Guidelines - This section discusses prerequisites and guidelines for using Nexus virtual switch in - &PRODUCT;. Before configuring Nexus virtual switch, ensure that your system meets the - following requirements: - - - A cluster of servers (ESXi 4.1 or later) is configured in the vCenter. - - - Each cluster managed by &PRODUCT; is the only cluster in its vCenter - datacenter. - - - A Cisco Nexus 1000v virtual switch is installed to serve the datacenter that - contains the vCenter cluster. This ensures that &PRODUCT; doesn't have to deal with - dynamic migration of virtual adapters or networks across other existing virtual - switches. See Cisco Nexus 1000V Installation and Upgrade Guide for guidelines on how to - install the Nexus 1000v VSM and VEM modules. - - - The Nexus 1000v VSM is not deployed on a vSphere host that is managed by - &PRODUCT;. - - - When the maximum number of VEM modules per VSM instance is reached, an additional - VSM instance is created before introducing any more ESXi hosts. The limit is 64 VEM - modules for each VSM instance. - - - &PRODUCT; expects that the Management Network of the ESXi host is configured on the - standard vSwitch and searches for it in the standard vSwitch. Therefore, ensure that you - do not migrate the management network to Nexus 1000v virtual switch during - configuration. - - - All information given in - - -
-
- Nexus 1000v Virtual Switch Preconfiguration -
- Preparation Checklist - For a smoother configuration of Nexus 1000v switch, gather the following information - before you start: - - - vCenter credentials - - - Nexus 1000v VSM IP address - - - Nexus 1000v VSM Credentials - - - Ethernet port profile names - - -
- vCenter Credentials Checklist - You will need the following information about vCenter: - - - - - - - - Nexus vSwitch Requirements - Value - Notes - - - - - vCenter IP - - The IP address of the vCenter. - - - Secure HTTP Port Number - 443 - Port 443 is configured by default; however, you can change the port - if needed. - - - vCenter User ID - - The vCenter user with administrator-level privileges. The vCenter - User ID is required when you configure the virtual switch in - &PRODUCT;. - - - vCenter Password - - The password for the vCenter user specified above. The password for - this vCenter user is required when you configure the switch in - &PRODUCT;. - - - - -
-
- Network Configuration Checklist - The following information specified in the Nexus Configure Networking screen is - displayed in the Details tab of the Nexus dvSwitch in the &PRODUCT; UI: - - - - - - - - Network Requirements - Value - Notes - - - - - Control Port Group VLAN ID - - The VLAN ID of the Control Port Group. The control VLAN is used for - communication between the VSM and the VEMs. - - - Management Port Group VLAN ID - - The VLAN ID of the Management Port Group. The management VLAN - corresponds to the mgmt0 interface that is used to establish and maintain the - connection between the VSM and VMware vCenter Server. - - - Packet Port Group VLAN ID - - The VLAN ID of the Packet Port Group. The packet VLAN forwards - relevant data packets from the VEMs to the VSM. - - - - - - The VLANs used for control, packet, and management port groups can be the - same. - - For more information, see Cisco Nexus 1000V Getting Started Guide. -
-
- VSM Configuration Checklist - You will need the following information about network configuration: - - - - - - - - VSM Configuration Parameters Value Notes - Value - Notes - - - - - Admin Name and Password - - The admin name and password to connect to the VSM appliance. You must - specify these credentials while configuring Nexus virtual - switch. - - - Management IP Address - - This is the IP address of the VSM appliance. This is the IP address - you specify in the virtual switch IP Address field while configuting Nexus - virtual switch. - - - SSL - Enable - Always enable SSL. SSH is usually enabled by default during the VSM - installation. However, check whether the SSH connection to the VSM is working, - without which &PRODUCT; failes to connect to the VSM. - - - - -
-
-
- Creating a Port Profile - - - Whether you create a Basic or Advanced zone configuration, ensure that you always - create an Ethernet port profile on the VSM after you install it and before you create - the zone. - - - The Ethernet port profile created to represent the physical network or - networks used by an Advanced zone configuration trunk all the VLANs including - guest VLANs, the VLANs that serve the native VLAN, and the - packet/control/data/management VLANs of the VSM. - - - The Ethernet port profile created for a Basic zone configuration does not - trunk the guest VLANs because the guest VMs do not get their own VLANs provisioned - on their network interfaces in a Basic zone. - - - - - An Ethernet port profile configured on the Nexus 1000v virtual switch should not - use in its set of system VLANs, or any of the VLANs configured or intended to be - configured for use towards VMs or VM resources in the &PRODUCT; environment. - - - You do not have to create any vEthernet port profiles – &PRODUCT; does that during - VM deployment. - - - Ensure that you create required port profiles to be used by &PRODUCT; for - different traffic types of &PRODUCT;, such as Management traffic, Guest traffic, - Storage traffic, and Public traffic. The physical networks configured during zone - creation should have a one-to-one relation with the Ethernet port profiles. - - - - - - - - vsphereclient.png: vSphere client - - - For information on creating a port profile, see Cisco Nexus 1000V Port Profile Configuration Guide. -
-
- Assigning Physical NIC Adapters - Assign ESXi host's physical NIC adapters, which correspond to each physical - network, to the port profiles. In each ESXi host that is part of the vCenter cluster, - observe the physical networks assigned to each port profile and note down the names of the - port profile for future use. This mapping information helps you when configuring physical - networks during the zone configuration on &PRODUCT;. These Ethernet port profile names are - later specified as VMware Traffic Labels for different traffic types when configuring - physical networks during the zone configuration. For more information on configuring - physical networks, see . -
-
- Adding VLAN Ranges - Determine the public VLAN, System VLAN, and Guest VLANs to be used by the &PRODUCT;. - Ensure that you add them to the port profile database. Corresponding to each physical - network, add the VLAN range to port profiles. In the VSM command prompt, run the - switchport trunk allowed vlan<range> command to add the VLAN ranges to the port - profile. - For example: - switchport trunk allowed vlan 1,140-147,196-203 - In this example, the allowed VLANs added are 1, 140-147, and 196-203 - You must also add all the public and private VLANs or VLAN ranges to the switch. This - range is the VLAN range you specify in your zone. - - Before you run the vlan command, ensure that the configuration mode is enabled in - Nexus 1000v virtual switch. - - For example: - If you want the VLAN 200 to be used on the switch, run the following command: - vlan 200 - If you want the VLAN range 1350-1750 to be used on the switch, run the following - command: - vlan 1350-1750 - Refer to Cisco Nexus 1000V Command Reference of specific product version. -
-
-
- Enabling Nexus Virtual Switch in &PRODUCT; - To make a &PRODUCT; deployment Nexus enabled, you must set the vmware.use.nexus.vswitch - parameter true by using the Global Settings page in the &PRODUCT; UI. Unless this parameter - is set to "true" and restart the management server, you cannot see any UI options - specific to Nexus virtual switch, and &PRODUCT; ignores the Nexus virtual switch specific - parameters specified in the AddTrafficTypeCmd, UpdateTrafficTypeCmd, and AddClusterCmd API - calls. - Unless the &PRODUCT; global parameter "vmware.use.nexus.vswitch" is set to - "true", &PRODUCT; by default uses VMware standard vSwitch for virtual network - infrastructure. In this release, &PRODUCT; doesn’t support configuring virtual networks in a - deployment with a mix of standard vSwitch and Nexus 1000v virtual switch. The deployment can - have either standard vSwitch or Nexus 1000v virtual switch. -
-
- Configuring Nexus 1000v Virtual Switch in &PRODUCT; - You can configure Nexus dvSwitch by adding the necessary resources while the zone is - being created. - - - - - - vsphereclient.png: vSphere client - - - After the zone is created, if you want to create an additional cluster along with Nexus - 1000v virtual switch in the existing zone, use the Add Cluster option. For information on - creating a cluster, see . - In both these cases, you must specify the following parameters to configure Nexus - virtual switch: - - - - - - - Parameters - Description - - - - - Cluster Name - Enter the name of the cluster you created in vCenter. For example, - "cloud.cluster". - - - vCenter Host - Enter the host name or the IP address of the vCenter host where you have - deployed the Nexus virtual switch. - - - vCenter User name - Enter the username that &PRODUCT; should use to connect to vCenter. This - user must have all administrative privileges. - - - vCenter Password - Enter the password for the user named above. - - - vCenter Datacenter - Enter the vCenter datacenter that the cluster is in. For example, - "cloud.dc.VM". - - - Nexus dvSwitch IP Address - The IP address of the VSM component of the Nexus 1000v virtual - switch. - - - Nexus dvSwitch Username - The admin name to connect to the VSM appliance. - - - Nexus dvSwitch Password - The corresponding password for the admin user specified - above. - - - - -
-
- Removing Nexus Virtual Switch - - - In the vCenter datacenter that is served by the Nexus virtual switch, ensure that - you delete all the hosts in the corresponding cluster. - - - Log in with Admin permissions to the &PRODUCT; administrator UI. - - - In the left navigation bar, select Infrastructure. - - - In the Infrastructure page, click View all under Clusters. - - - Select the cluster where you want to remove the virtual switch. - - - In the dvSwitch tab, click the name of the virtual switch. - - - In the Details page, click Delete Nexus dvSwitch icon. - - - - - DeleteButton.png: button to delete dvSwitch - - - - Click Yes in the confirmation dialog box. - - -
-
- -
- Storage Preparation for vSphere (iSCSI only) - Use of iSCSI requires preparatory work in vCenter. You must add an iSCSI target and create - an iSCSI datastore. - If you are using NFS, skip this section. -
- Enable iSCSI initiator for ESXi hosts - - - In vCenter, go to hosts and Clusters/Configuration, and click Storage Adapters link. - You will see: - - - - - - vsphereclient.png: vSphere client - - - - - Select iSCSI software adapter and click Properties. - - - - - - vsphereclient.png: vSphere client - - - - - Click the Configure... button. - - - - - - vsphereclient.png: vSphere client - - - - - Check Enabled to enable the initiator. - - - Click OK to save. - - -
-
- Add iSCSI target - Under the properties dialog, add the iSCSI target info: - - - - - - vsphereclient.png: vSphere client - - - Repeat these steps for all ESXi hosts in the cluster. -
-
- Create an iSCSI datastore - You should now create a VMFS datastore. Follow these steps to do so: - - - Select Home/Inventory/Datastores. - - - Right click on the datacenter node. - - - Choose Add Datastore... command. - - - Follow the wizard to create a iSCSI datastore. - - - This procedure should be done on one host in the cluster. It is not necessary to do this - on all hosts. - - - - - - vsphereclient.png: vSphere client - - -
-
- Multipathing for vSphere (Optional) - Storage multipathing on vSphere nodes may be done according to the vSphere installation - guide. -
-
-
- Add Hosts or Configure Clusters (vSphere) - Use vCenter to create a vCenter cluster and add your desired hosts to the cluster. You - will later add the entire cluster to &PRODUCT;. (see ). -
-
- Applying Hotfixes to a VMware vSphere Host - - - Disconnect the VMware vSphere cluster from &PRODUCT;. It should remain disconnected - long enough to apply the hotfix on the host. - - - Log in to the &PRODUCT; UI as root. - See . - - - Navigate to the VMware cluster, click Actions, and select Unmanage. - - - Watch the cluster status until it shows Unmanaged. - - - - - Perform the following on each of the ESXi hosts in the cluster: - - - Move each of the ESXi hosts in the cluster to maintenance mode. - - - Ensure that all the VMs are migrated to other hosts in that cluster. - - - If there is only one host in that cluster, shutdown all the VMs and move the host - into maintenance mode. - - - Apply the patch on the ESXi host. - - - Restart the host if prompted. - - - Cancel the maintenance mode on the host. - - - - - Reconnect the cluster to &PRODUCT;: - - - Log in to the &PRODUCT; UI as root. - - - Navigate to the VMware cluster, click Actions, and select Manage. - - - Watch the status to see that all the hosts come up. It might take several minutes - for the hosts to come up. - Alternatively, verify the host state is properly synchronized and updated in the - &PRODUCT; database. - - - - -
-
diff --git a/docs/en-US/vmware-requirements.xml b/docs/en-US/vmware-requirements.xml deleted file mode 100644 index d7a6d70e6a4..00000000000 --- a/docs/en-US/vmware-requirements.xml +++ /dev/null @@ -1,80 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- System Requirements for vSphere Hosts -
- Software requirements: - - - vSphere and vCenter, both version 4.1 or 5.0. - vSphere Standard is recommended. Note however that customers need to consider the CPU constraints in place with vSphere licensing. See http://www.vmware.com/files/pdf/vsphere_pricing.pdf and discuss with your VMware sales representative. - vCenter Server Standard is recommended. - - Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor's support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches. - - Apply All Necessary HotfixesThe lack of up-do-date hotfixes can lead to data corruption and lost VMs. -
-
- Hardware requirements: - - The host must be certified as compatible with vSphere. See the VMware Hardware Compatibility Guide at http://www.vmware.com/resources/compatibility/search.php. - All hosts must be 64-bit and must support HVM (Intel-VT or AMD-V enabled). - All hosts within a cluster must be homogenous. That means the CPUs must be of the same type, count, and feature flags. - 64-bit x86 CPU (more cores results in better performance) - Hardware virtualization support required - 4 GB of memory - 36 GB of local disk - At least 1 NIC - Statically allocated IP Address - -
-
- vCenter Server requirements: - - Processor - 2 CPUs 2.0GHz or higher Intel or AMD x86 processors. Processor requirements may be higher if the database runs on the same machine. - Memory - 3GB RAM. RAM requirements may be higher if your database runs on the same machine. - Disk storage - 2GB. Disk requirements may be higher if your database runs on the same machine. - Microsoft SQL Server 2005 Express disk requirements. The bundled database requires up to 2GB free disk space to decompress the installation archive. - Networking - 1Gbit or 10Gbit. - - For more information, see "vCenter Server and the vSphere Client Hardware Requirements" at http://pubs.vmware.com/vsp40/wwhelp/wwhimpl/js/html/wwhelp.htm#href=install/c_vc_hw.html. -
-
- Other requirements: - - VMware vCenter Standard Edition 4.1 or 5.0 must be installed and available to manage the vSphere hosts. - vCenter must be configured to use the standard port 443 so that it can communicate with the &PRODUCT; Management Server. - You must re-install VMware ESXi if you are going to re-use a host from a previous install. - &PRODUCT; requires VMware vSphere 4.1 or 5.0. VMware vSphere 4.0 is not supported. - All hosts must be 64-bit and must support HVM (Intel-VT or AMD-V enabled). All hosts within a cluster must be homogeneous. That means the CPUs must be of the same type, count, and feature flags. - The &PRODUCT; management network must not be configured as a separate virtual network. The &PRODUCT; management network is the same as the vCenter management network, and will inherit its configuration. See . - &PRODUCT; requires ESXi. ESX is not supported. - All resources used for &PRODUCT; must be used for &PRODUCT; only. &PRODUCT; cannot share instance of ESXi or storage with other management consoles. Do not share the same storage volumes that will be used by &PRODUCT; with a different set of ESXi servers that are not managed by &PRODUCT;. - Put all target ESXi hypervisors in a cluster in a separate Datacenter in vCenter. - The cluster that will be managed by &PRODUCT; should not contain any VMs. Do not run the management server, vCenter or any other VMs on the cluster that is designated for &PRODUCT; use. Create a separate cluster for use of &PRODUCT; and make sure that they are no VMs in this cluster. - All the required VLANS must be trunked into all network switches that are connected to the ESXi hypervisor hosts. These would include the VLANS for Management, Storage, vMotion, and guest VLANs. The guest VLAN (used in Advanced Networking; see Network Setup) is a contiguous range of VLANs that will be managed by &PRODUCT;. - -
-
diff --git a/docs/en-US/vmware-topology-req.xml b/docs/en-US/vmware-topology-req.xml deleted file mode 100644 index cf6ae4d455e..00000000000 --- a/docs/en-US/vmware-topology-req.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- VMware Topology Requirements - - - The Management Server and secondary storage VMs must be able to access vCenter and all - ESXi hosts in the zone. To allow the necessary access through the firewall, keep port 443 - open. - - - The Management Servers communicate with VMware vCenter servers on port 443 - (HTTPs). - - - The Management Servers communicate with the System VMs on port 3922 (ssh) on the - management traffic network. - - -
diff --git a/docs/en-US/vmx-settings-dev.xml b/docs/en-US/vmx-settings-dev.xml deleted file mode 100644 index a0fdf7f7825..00000000000 --- a/docs/en-US/vmx-settings-dev.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- Additional VMX Settings - A VMX (.vmx) file is the primary configuration file for a virtual machine. When a new VM is - created, information on the operating system, disk sizes, and networking is stored in this file. - The VM actively writes to its .vmx file for all the configuration changes. The VMX file is - typically located in the directory where the VM is created. In Windows Vista / Windows 7 / - Windows Server 2008, the default location is C:\Users\<your_user_name>\My - Documents\Virtual Machines\<virtual_machine_name>.vmx. In Linux, vmware-cmd -l lists the - full path to all the registered VMX files. Any manual additions to the .vmx file from ESX/ESXi - are overwritten by the entries stored in the vCenter Server database. Therefore, before you edit - a .vmx file, first remove the VM from the vCenter server's inventory and register the VM again - after editing. - The CloudStack API that supports passing some of the VMX settings is registerTemplate. The - supported parameters are rootDiskController, nicAdapter, and keyboard. In addition to these - existing VMX parameters, you can now use the keyboard.typematicMinDelay parameter in the - registerTemplate API call. This parameter controls the amount of delay for the repeated key - strokes on remote consoles. For more information on keyboard.typematicMinDelay, see keyboard.typematicMinDelay. -
diff --git a/docs/en-US/vnmc-cisco.xml b/docs/en-US/vnmc-cisco.xml deleted file mode 100644 index b0785fc953f..00000000000 --- a/docs/en-US/vnmc-cisco.xml +++ /dev/null @@ -1,400 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- External Guest Firewall Integration for Cisco VNMC (Optional) - Cisco Virtual Network Management Center (VNMC) provides centralized multi-device and policy - management for Cisco Network Virtual Services. You can integrate Cisco VNMC with &PRODUCT; to - leverage the firewall and NAT service offered by ASA 1000v Cloud Firewall. Use it in a Cisco - Nexus 1000v dvSwitch-enabled cluster in &PRODUCT;. In such a deployment, you will be able to: - - - Configure Cisco ASA 1000v firewalls. You can configure one per guest network. - - - Use Cisco ASA 1000v firewalls to create and apply security profiles that contain ACL - policy sets for both ingress and egress traffic. - - - Use Cisco ASA 1000v firewalls to create and apply Source NAT, Port Forwarding, and - Static NAT policy sets. - - - &PRODUCT; supports Cisco VNMC on Cisco Nexus 1000v dvSwich-enabled VMware - hypervisors. -
- Using Cisco ASA 1000v Firewall, Cisco Nexus 1000v dvSwitch, and Cisco VNMC in a - Deployment -
- Guidelines - - - Cisco ASA 1000v firewall is supported only in Isolated Guest Networks. - - - Cisco ASA 1000v firewall is not supported on VPC. - - - Cisco ASA 1000v firewall is not supported for load balancing. - - - When a guest network is created with Cisco VNMC firewall provider, an additional - public IP is acquired along with the Source NAT IP. The Source NAT IP is used for the - rules, whereas the additional IP is used to for the ASA outside interface. Ensure that - this additional public IP is not released. You can identify this IP as soon as the - network is in implemented state and before acquiring any further public IPs. The - additional IP is the one that is not marked as Source NAT. You can find the IP used for - the ASA outside interface by looking at the Cisco VNMC used in your guest - network. - - - Use the public IP address range from a single subnet. You cannot add IP addresses - from different subnets. - - - Only one ASA instance per VLAN is allowed because multiple VLANS cannot be trunked - to ASA ports. Therefore, you can use only one ASA instance in a guest network. - - - Only one Cisco VNMC per zone is allowed. - - - Supported only in Inline mode deployment with load balancer. - - - The ASA firewall rule is applicable to all the public IPs in the guest network. - Unlike the firewall rules created on virtual router, a rule created on the ASA device is - not tied to a specific public IP. - - - Use a version of Cisco Nexus 1000v dvSwitch that support the vservice command. For - example: nexus-1000v.4.2.1.SV1.5.2b.bin - Cisco VNMC requires the vservice command to be available on the Nexus switch to - create a guest network in &PRODUCT;. - - -
-
- Prerequisites - - - Configure Cisco Nexus 1000v dvSwitch in a vCenter environment. - Create Port profiles for both internal and external network interfaces on Cisco - Nexus 1000v dvSwitch. Note down the inside port profile, which needs to be provided - while adding the ASA appliance to &PRODUCT;. - For information on configuration, see . - - - Deploy and configure Cisco VNMC. - For more information, see Installing Cisco Virtual Network Management Center and Configuring Cisco Virtual Network Management Center. - - - Register Cisco Nexus 1000v dvSwitch with Cisco VNMC. - For more information, see Registering a Cisco Nexus 1000V with Cisco VNMC. - - - Create Inside and Outside port profiles in Cisco Nexus 1000v dvSwitch. - For more information, see . - - - Deploy and Cisco ASA 1000v appliance. - For more information, see Setting Up the ASA 1000V Using VNMC. - Typically, you create a pool of ASA 1000v appliances and register them with - &PRODUCT;. - Specify the following while setting up a Cisco ASA 1000v instance: - - - VNMC host IP. - - - Ensure that you add ASA appliance in VNMC mode. - - - Port profiles for the Management and HA network interfaces. This need to be - pre-created on Cisco Nexus 1000v dvSwitch. - - - Internal and external port profiles. - - - The Management IP for Cisco ASA 1000v appliance. Specify the gateway such that - the VNMC IP is reachable. - - - Administrator credentials - - - VNMC credentials - - - - - Register Cisco ASA 1000v with VNMC. - After Cisco ASA 1000v instance is powered on, register VNMC from the ASA - console. - - -
-
- Using Cisco ASA 1000v Services - - - Ensure that all the prerequisites are met. - See . - - - Add a VNMC instance. - See . - - - Add a ASA 1000v instance. - See . - - - Create a Network Offering and use Cisco VNMC as the service provider for desired - services. - See . - - - Create an Isolated Guest Network by using the network offering you just - created. - - -
-
-
- Adding a VNMC Instance - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View More. - - - Choose the zone you want to work with. - - - Click the Physical Network tab. - - - In the Network Service Providers node of the diagram, click Configure. - You might have to scroll down to see this. - - - Click Cisco VNMC. - - - Click View VNMC Devices. - - - Click the Add VNMC Device and provide the following: - - - Host: The IP address of the VNMC instance. - - - Username: The user name of the account on the VNMC instance that &PRODUCT; should - use. - - - Password: The password of the account. - - - - - Click OK. - - -
-
- Adding an ASA 1000v Instance - - - Log in to the &PRODUCT; UI as administrator. - - - In the left navigation bar, click Infrastructure. - - - In Zones, click View More. - - - Choose the zone you want to work with. - - - Click the Physical Network tab. - - - In the Network Service Providers node of the diagram, click Configure. - You might have to scroll down to see this. - - - Click Cisco VNMC. - - - Click View ASA 1000v. - - - Click the Add CiscoASA1000v Resource and provide the following: - - - Host: The management IP address of the ASA 1000v - instance. The IP address is used to connect to ASA 1000V. - - - Inside Port Profile: The Inside Port Profile - configured on Cisco Nexus1000v dvSwitch. - - - Cluster: The VMware cluster to which you are - adding the ASA 1000v instance. - Ensure that the cluster is Cisco Nexus 1000v dvSwitch enabled. - - - - - Click OK. - - -
-
- Creating a Network Offering Using Cisco ASA 1000v - To have Cisco ASA 1000v support for a guest network, create a network offering as follows: - - - Log in to the &PRODUCT; UI as a user or admin. - - - From the Select Offering drop-down, choose Network Offering. - - - Click Add Network Offering. - - - In the dialog, make the following choices: - - - Name: Any desired name for the network - offering. - - - Description: A short description of the offering - that can be displayed to users. - - - Network Rate: Allowed data transfer rate in MB - per second. - - - Traffic Type: The type of network traffic that - will be carried on the network. - - - Guest Type: Choose whether the guest network is - isolated or shared. - - - Persistent: Indicate whether the guest network is - persistent or not. The network that you can provision without having to deploy a VM on - it is termed persistent network. - - - VPC: This option indicate whether the guest - network is Virtual Private Cloud-enabled. A Virtual Private Cloud (VPC) is a private, - isolated part of &PRODUCT;. A VPC can have its own virtual network topology that - resembles a traditional physical network. For more information on VPCs, see . - - - Specify VLAN: (Isolated guest networks only) - Indicate whether a VLAN should be specified when this offering is used. - - - Supported Services: Use Cisco VNMC as the service - provider for Firewall, Source NAT, Port Forwarding, and Static NAT to create an - Isolated guest network offering. - - - System Offering: Choose the system service - offering that you want virtual routers to use in this network. - - - Conserve mode: Indicate whether to use conserve - mode. In this mode, network resources are allocated only when the first virtual - machine starts in the network. - - - - - Click OK - The network offering is created. - - -
-
- Reusing ASA 1000v Appliance in new Guest Networks - You can reuse an ASA 1000v appliance in a new guest network after the necessary cleanup. - Typically, ASA 1000v is cleaned up when the logical edge firewall is cleaned up in VNMC. If - this cleanup does not happen, you need to reset the appliance to its factory settings for use - in new guest networks. As part of this, enable SSH on the appliance and store the SSH - credentials by registering on VNMC. - - - Open a command line on the ASA appliance: - - - Run the following: - ASA1000V(config)# reload - You are prompted with the following message: - System config has been modified. Save? [Y]es/[N]o:" - - - Enter N. - You will get the following confirmation message: - "Proceed with reload? [confirm]" - - - Restart the appliance. - - - - - Register the ASA 1000v appliance with the VNMC: - ASA1000V(config)# vnmc policy-agent -ASA1000V(config-vnmc-policy-agent)# registration host vnmc_ip_address -ASA1000V(config-vnmc-policy-agent)# shared-secret key where key is the shared secret for authentication of the ASA 1000V connection to the Cisco VNMC - - -
-
diff --git a/docs/en-US/volume-deletion-garbage-collection.xml b/docs/en-US/volume-deletion-garbage-collection.xml deleted file mode 100644 index 418643890f3..00000000000 --- a/docs/en-US/volume-deletion-garbage-collection.xml +++ /dev/null @@ -1,44 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Volume Deletion and Garbage Collection - The deletion of a volume does not delete the snapshots that have been created from the - volume - When a VM is destroyed, data disk volumes that are attached to the VM are not - deleted. - Volumes are permanently destroyed using a garbage collection process. The global - configuration variables expunge.delay and expunge.interval determine when the physical deletion - of volumes will occur. - - - expunge.delay: determines how old the volume must be before it is destroyed, in - seconds - - - expunge.interval: determines how often to run the garbage collection check - - - Administrators should adjust these values depending on site policies around data - retention. -
diff --git a/docs/en-US/volume-status.xml b/docs/en-US/volume-status.xml deleted file mode 100644 index 35802f98253..00000000000 --- a/docs/en-US/volume-status.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Volume Status - When a snapshot operation is triggered by means of a recurring snapshot - policy, a snapshot is skipped if a volume has remained inactive since its - last snapshot was taken. A volume is considered to be inactive if it is - either detached or attached to a VM that is not running. &PRODUCT; ensures - that at least one snapshot is taken since the volume last became inactive. - - When a snapshot is taken manually, a snapshot is always created - regardless of whether a volume has been active or not. - -
diff --git a/docs/en-US/vpc.xml b/docs/en-US/vpc.xml deleted file mode 100644 index d1f0c52861a..00000000000 --- a/docs/en-US/vpc.xml +++ /dev/null @@ -1,189 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- About Virtual Private Clouds - &PRODUCT; Virtual Private Cloud is a private, isolated part of &PRODUCT;. A VPC can have its - own virtual network topology that resembles a traditional physical network. You can launch VMs - in the virtual network that can have private addresses in the range of your choice, for example: - 10.0.0.0/16. You can define network tiers within your VPC network range, which in turn enables - you to group similar kinds of instances based on IP address range. - For example, if a VPC has the private range 10.0.0.0/16, its guest networks can have the - network ranges 10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24, and so on. - - Major Components of a VPC: - A VPC is comprised of the following network components: - - - - VPC: A VPC acts as a container for multiple isolated - networks that can communicate with each other via its virtual router. - - - Network Tiers: Each tier acts as an isolated network - with its own VLANs and CIDR list, where you can place groups of resources, such as VMs. The - tiers are segmented by means of VLANs. The NIC of each tier acts as its gateway. - - - Virtual Router: A virtual router is automatically - created and started when you create a VPC. The virtual router connect the tiers and direct - traffic among the public gateway, the VPN gateways, and the NAT instances. For each tier, a - corresponding NIC and IP exist in the virtual router. The virtual router provides DNS and - DHCP services through its IP. - - - Public Gateway: The traffic to and from the Internet - routed to the VPC through the public gateway. In a VPC, the public gateway is not exposed to - the end user; therefore, static routes are not support for the public gateway. - - - Private Gateway: All the traffic to and from a private - network routed to the VPC through the private gateway. For more information, see . - - - VPN Gateway: The VPC side of a VPN connection. - - - Site-to-Site VPN Connection: A hardware-based VPN - connection between your VPC and your datacenter, home network, or co-location facility. For - more information, see . - - - Customer Gateway: The customer side of a VPN - Connection. For more information, see . - - - NAT Instance: An instance that provides Port Address - Translation for instances to access the Internet via the public gateway. For more - information, see . - - - Network ACL: Network ACL is a group of Network ACL - items. Network ACL items are nothing but numbered rules that are evaluated in order, - starting with the lowest numbered rule. These rules determine whether traffic is allowed in - or out of any tier associated with the network ACL. For more information, see . - - - - Network Architecture in a VPC - In a VPC, the following four basic options of network architectures are present: - - - - VPC with a public gateway only - - - VPC with public and private gateways - - - VPC with public and private gateways and site-to-site VPN access - - - VPC with a private gateway only and site-to-site VPN access - - - - Connectivity Options for a VPC - You can connect your VPC to: - - - - The Internet through the public gateway. - - - The corporate datacenter by using a site-to-site VPN connection through the VPN - gateway. - - - Both the Internet and your corporate datacenter by using both the public gateway and a - VPN gateway. - - - - VPC Network Considerations - Consider the following before you create a VPC: - - - - A VPC, by default, is created in the enabled state. - - - A VPC can be created in Advance zone only, and can't belong to more than one zone at a - time. - - - The default number of VPCs an account can create is 20. However, you can change it by - using the max.account.vpcs global parameter, which controls the maximum number of VPCs an - account is allowed to create. - - - The default number of tiers an account can create within a VPC is 3. You can configure - this number by using the vpc.max.networks parameter. - - - Each tier should have an unique CIDR in the VPC. Ensure that the tier's CIDR should be - within the VPC CIDR range. - - - A tier belongs to only one VPC. - - - All network tiers inside the VPC should belong to the same account. - - - When a VPC is created, by default, a SourceNAT IP is allocated to it. The Source NAT IP - is released only when the VPC is removed. - - - A public IP can be used for only one purpose at a time. If the IP is a sourceNAT, it - cannot be used for StaticNAT or port forwarding. - - - The instances can only have a private IP address that you provision. To communicate with - the Internet, enable NAT to an instance that you launch in your VPC. - - - Only new networks can be added to a VPC. The maximum number of networks per VPC is - limited by the value you specify in the vpc.max.networks parameter. The default value is - three. - - - The load balancing service can be supported by only one tier inside the VPC. - - - If an IP address is assigned to a tier: - - - That IP can't be used by more than one tier at a time in the VPC. For example, if - you have tiers A and B, and a public IP1, you can create a port forwarding rule by using - the IP either for A or B, but not for both. - - - That IP can't be used for StaticNAT, load balancing, or port forwarding rules for - another guest network inside the VPC. - - - - - Remote access VPN is not supported in VPC networks. - - -
diff --git a/docs/en-US/vpn.xml b/docs/en-US/vpn.xml deleted file mode 100644 index 1f8098ca962..00000000000 --- a/docs/en-US/vpn.xml +++ /dev/null @@ -1,62 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Remote Access VPN - &PRODUCT; account owners can create virtual private networks (VPN) to access their virtual - machines. If the guest network is instantiated from a network offering that offers the Remote - Access VPN service, the virtual router (based on the System VM) is used to provide the service. - &PRODUCT; provides a L2TP-over-IPsec-based remote access VPN service to guest virtual networks. - Since each network gets its own virtual router, VPNs are not shared across the networks. VPN - clients native to Windows, Mac OS X and iOS can be used to connect to the guest networks. The - account owner can create and manage users for their VPN. &PRODUCT; does not use its account - database for this purpose but uses a separate table. The VPN user database is shared across all - the VPNs created by the account owner. All VPN users get access to all VPNs created by the - account owner. - - Make sure that not all traffic goes through the VPN. That is, the route installed by the - VPN should be only for the guest network and not for all traffic. - - - - - Road Warrior / Remote Access. Users want to be able to - connect securely from a home or office to a private network in the cloud. Typically, the IP - address of the connecting client is dynamic and cannot be preconfigured on the VPN - server. - - - Site to Site. In this scenario, two private subnets are - connected over the public Internet with a secure VPN tunnel. The cloud user’s subnet (for - example, an office network) is connected through a gateway to the network in the cloud. The - address of the user’s gateway must be preconfigured on the VPN server in the cloud. Note - that although L2TP-over-IPsec can be used to set up Site-to-Site VPNs, this is not the - primary intent of this feature. For more information, see - - - - - - -
diff --git a/docs/en-US/whatis.xml b/docs/en-US/whatis.xml deleted file mode 100644 index 5103b2ec583..00000000000 --- a/docs/en-US/whatis.xml +++ /dev/null @@ -1,41 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- What Is &PRODUCT;? - &PRODUCT; is an open source software platform that pools computing resources to build public, private, and hybrid Infrastructure as a Service (IaaS) clouds. &PRODUCT; manages the network, storage, and compute nodes that make up a cloud infrastructure. Use &PRODUCT; to deploy, manage, and configure cloud computing environments. - Typical users are service providers and enterprises. With &PRODUCT;, you can: - - - Set up an on-demand, elastic cloud computing service. Service providers can sell self service virtual machine instances, storage volumes, and networking configurations over the Internet. - - Set up an on-premise private cloud for use by employees. Rather than managing virtual machines in the same way as physical machines, with &PRODUCT; an enterprise can offer self-service virtual machines to users without involving IT departments. - - - - - - 1000-foot-view.png: Overview of &PRODUCT; - -
diff --git a/docs/en-US/whats-in-this-adminguide.xml b/docs/en-US/whats-in-this-adminguide.xml deleted file mode 100644 index 68717dd596e..00000000000 --- a/docs/en-US/whats-in-this-adminguide.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Who Should Read This - If you have already installed &PRODUCT; or you want to learn more about the ongoing operation and maintenance of a &PRODUCT;-powered cloud, read this documentation. It will help you start using, configuring, and managing the ongoing operation of your cloud. -
diff --git a/docs/en-US/whats-new.xml b/docs/en-US/whats-new.xml deleted file mode 100644 index 04733c71a75..00000000000 --- a/docs/en-US/whats-new.xml +++ /dev/null @@ -1,55 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - What's New in the API? - The following describes any new major features of each &PRODUCT; version as it applies to - API usage. -
- What's New in the API for 4.2 - - - -
-
- What's New in the API for 4.1 - - - - - - -
-
- What's New in the API for 4.0 - - -
-
- What's New in the API for 3.0 - - - - - - -
-
diff --git a/docs/en-US/who-should-read-installation.xml b/docs/en-US/who-should-read-installation.xml deleted file mode 100644 index eb55ee476ec..00000000000 --- a/docs/en-US/who-should-read-installation.xml +++ /dev/null @@ -1,28 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Who Should Read This - For those who have already gone through a design phase and planned a more sophisticated deployment, or those who are ready to start scaling up a trial installation. With the following procedures, you can start using the more powerful features of &PRODUCT;, such as advanced VLAN networking, high availability, additional network elements such as load balancers and firewalls, and support for multiple hypervisors including Citrix XenServer, KVM, and VMware vSphere. -
diff --git a/docs/en-US/windows-installation.xml b/docs/en-US/windows-installation.xml deleted file mode 100644 index 29c84b4e278..00000000000 --- a/docs/en-US/windows-installation.xml +++ /dev/null @@ -1,29 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Windows OS Installation - Download the installer, CloudInstanceManager.msi, from Download page and run the installer in the - newly created Windows VM. -
diff --git a/docs/en-US/work-with-usage.xml b/docs/en-US/work-with-usage.xml deleted file mode 100644 index 00172934644..00000000000 --- a/docs/en-US/work-with-usage.xml +++ /dev/null @@ -1,38 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - Working with Usage - The Usage Server is an optional, separately-installed part of &PRODUCT; that provides - aggregated usage records which you can use to create billing integration for &PRODUCT;. The - Usage Server works by taking data from the events log and creating summary usage records that - you can access using the listUsageRecords API call. - The usage records show the amount of resources, such as VM run time or template storage - space, consumed by guest instances. - The Usage Server runs at least once per day. It can be configured to run multiple times per - day. - - - - - diff --git a/docs/en-US/working-with-documentation.xml b/docs/en-US/working-with-documentation.xml deleted file mode 100644 index 67748427299..00000000000 --- a/docs/en-US/working-with-documentation.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Preparing and Building &PRODUCT; Documentation - This chapter describes how to install publican, how to write new documentation and build a guide as well as how to build a translated version of the documentation using transifex - - - - - diff --git a/docs/en-US/working-with-hosts.xml b/docs/en-US/working-with-hosts.xml deleted file mode 100644 index d1fc74fd207..00000000000 --- a/docs/en-US/working-with-hosts.xml +++ /dev/null @@ -1,39 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Working With Hosts -
- Adding Hosts - Additional hosts can be added at any time to provide more capacity for guest VMs. For requirements and instructions, see . -
- - - - - - - - -
diff --git a/docs/en-US/working-with-iso.xml b/docs/en-US/working-with-iso.xml deleted file mode 100644 index 9872106ceec..00000000000 --- a/docs/en-US/working-with-iso.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Working with ISOs - &PRODUCT; supports ISOs and their attachment to guest VMs. An ISO is a read-only file that has an ISO/CD-ROM style file system. Users can upload their own ISOs and mount them on their guest VMs. - ISOs are uploaded based on a URL. HTTP is the supported protocol. Once the ISO is available via HTTP specify an upload URL such as http://my.web.server/filename.iso. - ISOs may be public or private, like templates.ISOs are not hypervisor-specific. That is, a guest on vSphere can mount the exact same image that a guest on KVM can mount. - ISO images may be stored in the system and made available with a privacy level similar to templates. ISO images are classified as either bootable or not bootable. A bootable ISO image is one that contains an OS image. &PRODUCT; allows a user to boot a guest VM off of an ISO image. Users can also attach ISO images to guest VMs. For example, this enables installing PV drivers into Windows. ISO images are not hypervisor-specific. - - - -
diff --git a/docs/en-US/working-with-snapshots.xml b/docs/en-US/working-with-snapshots.xml deleted file mode 100644 index 674b23254fb..00000000000 --- a/docs/en-US/working-with-snapshots.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Working with Snapshots - (Supported for the following hypervisors: XenServer, VMware vSphere, and KVM) - &PRODUCT; supports snapshots of disk volumes. Snapshots are a point-in-time capture of virtual machine disks. Memory and CPU states are not captured. - Snapshots may be taken for volumes, including both root and data disks. The administrator places a limit on the number of stored snapshots per user. Users can create new volumes from the snapshot for recovery of particular files and they can create templates from snapshots to boot from a restored disk. - Users can create snapshots manually or by setting up automatic recurring snapshot policies. Users can also create disk volumes from snapshots, which may be attached to a VM like any other disk volume. Snapshots of both root disks and data disks are supported. However, &PRODUCT; does not currently support booting a VM from a recovered root disk. A disk recovered from snapshot of a root disk is treated as a regular data disk; the data on recovered disk can be accessed by attaching the disk to a VM. - A completed snapshot is copied from primary storage to secondary storage, where it is stored until deleted or purged by newer snapshot. - - - - - - -
diff --git a/docs/en-US/working-with-system-vm.xml b/docs/en-US/working-with-system-vm.xml deleted file mode 100644 index 073d0772561..00000000000 --- a/docs/en-US/working-with-system-vm.xml +++ /dev/null @@ -1,40 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - Working with System Virtual Machines - &PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In - general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based - on scale and immediate needs. However, the administrator should be aware of them and their roles - to assist in debugging issues. - - You can configure the system.vm.random.password parameter to create a random system VM - password to ensure higher security. If you reset the value for system.vm.random.password to - true and restart the Management Server, a random password is generated and stored encrypted in - the database. You can view the decrypted password under the system.vm.password global - parameter on the &PRODUCT; UI or by calling the listConfigurations API. - - - - - - - - diff --git a/docs/en-US/working-with-templates.xml b/docs/en-US/working-with-templates.xml deleted file mode 100755 index c66fd0cf4f9..00000000000 --- a/docs/en-US/working-with-templates.xml +++ /dev/null @@ -1,45 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Working with Templates - A template is a reusable configuration for virtual machines. When users launch VMs, they can choose from a list of templates in &PRODUCT;. - Specifically, a template is a virtual disk image that includes one of a variety of operating systems, optional additional software such as office applications, and settings such as access control to determine who can use the template. Each template is associated with a particular type of hypervisor, which is specified when the template is added to &PRODUCT;. - &PRODUCT; ships with a default template. In order to present more choices to users, &PRODUCT; administrators and users can create templates and add them to &PRODUCT;. - - - - - - - - - - - - - - - - diff --git a/docs/en-US/working-with-usage-data.xml b/docs/en-US/working-with-usage-data.xml deleted file mode 100644 index 5324617ab23..00000000000 --- a/docs/en-US/working-with-usage-data.xml +++ /dev/null @@ -1,35 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - Working With Usage Data - The Usage Server provides aggregated usage records which you can use to create billing integration for the &PRODUCT; platform. The Usage Server works by taking data from the events log and creating summary usage records that you can access using the listUsageRecords API call. - The usage records show the amount of resources, such as VM run time or template storage space, consumed by guest instances. In the special case of bare metal instances, no template storage resources are consumed, but records showing zero usage are still included in the Usage Server's output. - The Usage Server runs at least once per day. It can be configured to run multiple times per day. Its behavior is controlled by configuration settings as described in the &PRODUCT; Administration Guide. - - - - - - diff --git a/docs/en-US/working-with-volumes.xml b/docs/en-US/working-with-volumes.xml deleted file mode 100644 index 5de5e6c7bd8..00000000000 --- a/docs/en-US/working-with-volumes.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- Working With Volumes - A volume provides storage to a guest VM. The volume can provide for a root disk or an - additional data disk. &PRODUCT; supports additional volumes for guest VMs. - Volumes are created for a specific hypervisor type. A volume that has been attached to guest - using one hypervisor type (e.g, XenServer) may not be attached to a guest that is using another - hypervisor type, for example:vSphere, KVM. This is because the different hypervisors use different - disk image formats. - &PRODUCT; defines a volume as a unit of storage available to a guest VM. Volumes are either - root disks or data disks. The root disk has "/" in the file system and is usually the boot - device. Data disks provide for additional storage, for example: "/opt" or "D:". Every guest VM - has a root disk, and VMs can also optionally have a data disk. End users can mount multiple data - disks to guest VMs. Users choose data disks from the disk offerings created by administrators. - The user can create a template from a volume as well; this is the standard procedure for private - template creation. Volumes are hypervisor-specific: a volume from one hypervisor type may not be - used on a guest of another hypervisor type. - - &PRODUCT; supports attaching up to 13 data disks to a VM on XenServer hypervisor versions - 6.0 and above. For the VMs on other hypervisor types, the data disk limit is 6. - - - - - - - - - -
diff --git a/docs/en-US/writing-new-documentation.xml b/docs/en-US/writing-new-documentation.xml deleted file mode 100644 index 7557359fd09..00000000000 --- a/docs/en-US/writing-new-documentation.xml +++ /dev/null @@ -1,100 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Writing &PRODUCT; Documentation - &PRODUCT; documentation is written in DocBook xml format. Each guide defined with a publican configuration file refers to a DocBook book. - These books are defined in xml files in docs/en-US, for instance if we look at the Developers guide, its configuration file contains: - - xml_lang: en-US - type: Book - docname: Developers_Guide - brand: cloudstack - chunk_first: 1 - chunk_section_depth: 1 - - The docname key gives you the basename of the DocBook file located in the en-US directory that contains the description of the book. - Looking closely at Developers_Guide.xml we see that it contains book tags and several references to other xml files. These are the chapters of the book, currently they are: - - - - - - - - - - - - - - ]]> - - All these xml files are written in DocBook format. - - DocBook format is well documented, refer to the documentation for any questions about DocBook tags - - When writing documentation, you therefore need to located the book,chapter and section of the content you want to write/correct. - Or create a new book,chapter,section. - You will then learn much more about DocBook tagging. In order to write this chapter about documentation, I added the working-with-documentation.xmlfile describing a chapter in the Developer book and I created several sections within that chapter like so: - - - Preparing and Building &PRODUCT; Documentation - This chapter describes how to install publican, how to write new documentation and build a guide as well as how to build a translated version of the documentation using transifex - - - - - - ]]> - - - Note the id witin the chapter tag, it represents the basename of the xml file describing the chapter. - For translation purposes it is important that this basename be less than 50 characters long. - - This chapter also refers to xml files which contains each section. While you could embed the sections directly in the chapter file and as a matter of fact also write the chapters within a single book file. Breaking things up in smaller files at the granularity of the section, allows us to re-use any section to build different books. - For completeness here is an example of a section: - - - Building &PRODUCT; Documentation - To build a specific guide, go to the source tree of the documentation in /docs and identify the guide you want to build. - Currently there are four guides plus the release notes, all defined in publican configuration files: - - publican-adminguide.cfg - publican-devguide.cfg - publican-installation.cfg - publican-plugin-niciranvp.cfg - publican-release-notes.cfg - - To build the Developer guide for example, do the following: - publican build --config=publican-devguide.cfg --formats=pdf --langs=en-US - A pdf file will be created in tmp/en-US/pdf, you may choose to build the guide in a different format like html. In that case just replace the format value. -
- ]]> - - Happy Publicating and DocBooking. - diff --git a/docs/en-US/xenserver-maintenance-mode.xml b/docs/en-US/xenserver-maintenance-mode.xml deleted file mode 100644 index b947278a9bb..00000000000 --- a/docs/en-US/xenserver-maintenance-mode.xml +++ /dev/null @@ -1,51 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - -
- XenServer and Maintenance Mode - For XenServer, you can take a server offline temporarily by using the Maintenance Mode feature in XenCenter. When you place a server into Maintenance Mode, all running VMs are automatically migrated from it to another host in the same pool. If the server is the pool master, a new master will also be selected for the pool. While a server is Maintenance Mode, you cannot create or start any VMs on it. - To place a server in Maintenance Mode: - - In the Resources pane, select the server, then do one of the following: - - Right-click, then click Enter Maintenance Mode on the shortcut menu. - - On the Server menu, click Enter Maintenance Mode. - - - Click Enter Maintenance Mode. - - The server's status in the Resources pane shows when all running VMs have been successfully migrated off the server. - To take a server out of Maintenance Mode: - - In the Resources pane, select the server, then do one of the following: - - Right-click, then click Exit Maintenance Mode on the shortcut menu. - - On the Server menu, click Exit Maintenance Mode. - - - Click Exit Maintenance Mode. - - -
diff --git a/docs/en-US/xenserver-topology-req.xml b/docs/en-US/xenserver-topology-req.xml deleted file mode 100644 index 12b9b077fe7..00000000000 --- a/docs/en-US/xenserver-topology-req.xml +++ /dev/null @@ -1,24 +0,0 @@ - - -%BOOK_ENTITIES; -]> - -
- XenServer Topology Requirements - The Management Servers communicate with XenServer hosts on ports 22 (ssh), 80 (HTTP), and 443 (HTTPs). -
diff --git a/docs/en-US/zone-add.xml b/docs/en-US/zone-add.xml deleted file mode 100644 index 4137b671ee2..00000000000 --- a/docs/en-US/zone-add.xml +++ /dev/null @@ -1,48 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - -
- Adding a Zone - When you add a new zone, you will be prompted to configure the zone’s physical network and add the first pod, cluster, host, primary storage, and secondary storage. - - Log in to the &PRODUCT; UI as the root administrator. See . - In the left navigation, choose Infrastructure. - On Zones, click View More. - Click Add Zone. The zone creation wizard will appear. - Choose one of the following network types: - - Basic. For AWS-style networking. Provides a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering). - Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support. - - - The rest of the steps differ depending on whether you chose Basic or Advanced. Continue with the steps that apply to you: - - - - - - - - -
diff --git a/docs/pot/Admin_Guide.pot b/docs/pot/Admin_Guide.pot deleted file mode 100644 index d3acd9b8e29..00000000000 --- a/docs/pot/Admin_Guide.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Administrator's Guide" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Administration Guide for &PRODUCT;." -msgstr "" - diff --git a/docs/pot/Author_Group.pot b/docs/pot/Author_Group.pot deleted file mode 100644 index 7e9c48f48bd..00000000000 --- a/docs/pot/Author_Group.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: firstname -#, no-c-format -msgid "Apache" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "CloudStack" -msgstr "" - diff --git a/docs/pot/Book_Info.pot b/docs/pot/Book_Info.pot deleted file mode 100644 index a2f07956e9c..00000000000 --- a/docs/pot/Book_Info.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Guide" -msgstr "" - -#. Tag: subtitle -#, no-c-format -msgid "Revised August 9, 2012 10:48 pm Pacific" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Complete technical documentation of &PRODUCT;." -msgstr "" - diff --git a/docs/pot/Book_Info_Release_Notes_4-0.pot b/docs/pot/Book_Info_Release_Notes_4-0.pot deleted file mode 100644 index ea3ff8bf1ca..00000000000 --- a/docs/pot/Book_Info_Release_Notes_4-0.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Version 4.0.0-incubating Release Notes" -msgstr "" - -#. Tag: subtitle -#, no-c-format -msgid "Revised October 17, 2012 19:49 UTC" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Release notes for the Apache CloudStack 4.0.0-incubating release." -msgstr "" - diff --git a/docs/pot/CloudStack_Nicira_NVP_Guide.pot b/docs/pot/CloudStack_Nicira_NVP_Guide.pot deleted file mode 100644 index 5320c4b66a8..00000000000 --- a/docs/pot/CloudStack_Nicira_NVP_Guide.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Plugin Guide for the Nicira NVP Plugin" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Plugin Guide for the Nicira NVP Plugin." -msgstr "" - diff --git a/docs/pot/Common_Content/Legal_Notice.pot b/docs/pot/Common_Content/Legal_Notice.pot deleted file mode 100644 index f059a2bc57f..00000000000 --- a/docs/pot/Common_Content/Legal_Notice.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: para -#, no-c-format -msgid "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://www.apache.org/licenses/LICENSE-2.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apache CloudStack is an effort undergoing incubation at The Apache Software Foundation (ASF)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Incubation is required of all newly accepted projects until a further review indicates that the infrastructure, communications, and decision making process have stabilized in a manner consistent with other successful ASF projects. While incubation status is not necessarily a reflection of the completeness or stability of the code, it does indicate that the project has yet to be fully endorsed by the ASF." -msgstr "" - diff --git a/docs/pot/Developers_Guide.pot b/docs/pot/Developers_Guide.pot deleted file mode 100644 index c5706c2e7e6..00000000000 --- a/docs/pot/Developers_Guide.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Developer's Guide" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This guide shows how to develop &PRODUCT;, use the API for operation and integration, access the usage data and use &PRODUCT; specific tools to ease development, testing and integration." -msgstr "" - diff --git a/docs/pot/Installation_Guide.pot b/docs/pot/Installation_Guide.pot deleted file mode 100644 index ee6bb9da0e2..00000000000 --- a/docs/pot/Installation_Guide.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Installation Guide" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Installation Guide for &PRODUCT;." -msgstr "" - diff --git a/docs/pot/LDAPserver-for-user-authentication.pot b/docs/pot/LDAPserver-for-user-authentication.pot deleted file mode 100644 index fbac556906c..00000000000 --- a/docs/pot/LDAPserver-for-user-authentication.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using an LDAP Server for User Authentication" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can use an external LDAP server such as Microsoft Active Directory or ApacheDS to authenticate &PRODUCT; end-users. Just map &PRODUCT; accounts to the corresponding LDAP accounts using a query filter. The query filter is written using the query syntax of the particular LDAP server, and can include special wildcard characters provided by &PRODUCT; for matching common values such as the user’s email address and name. &PRODUCT; will search the external LDAP directory tree starting at a specified base directory and return the distinguished name (DN) and password of the matching user. This information along with the given password is used to authenticate the user.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up LDAP authentication in &PRODUCT;, call the &PRODUCT; API command ldapConfig and provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hostname or IP address and listening port of the LDAP server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Base directory and query filter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Search user DN credentials, which give &PRODUCT; permission to search on the LDAP server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSL keystore and password, if SSL is used" -msgstr "" - diff --git a/docs/pot/Preface.pot b/docs/pot/Preface.pot deleted file mode 100644 index 0684f6ca3e2..00000000000 --- a/docs/pot/Preface.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:55\n" -"PO-Revision-Date: 2013-02-02T20:11:55\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Preface" -msgstr "" - diff --git a/docs/pot/Release_Notes.pot b/docs/pot/Release_Notes.pot deleted file mode 100644 index e95dc9e5985..00000000000 --- a/docs/pot/Release_Notes.pot +++ /dev/null @@ -1,4478 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Submitting Feedback and Getting Help" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Apache CloudStack project has mailing lists for users and developers. These are the official channels of communication for the project and are the best way to get answers about using and contributing to CloudStack. It's a good idea to subscribe to the cloudstack-users mailing list if you've deployed or are deploying CloudStack into production, and even for test deployments." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The CloudStack developer's mailing list (cloudstack-dev) is for discussions about CloudStack development, and is the best list for discussing possible bugs in CloudStack. Anyone contributing to CloudStack should be on this mailing list." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can also report bugs in CloudStack using the Apache Defect Tracking System." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To posts to the lists, you'll need to be subscribed. See the CloudStack Web site for instructions." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Upgrade Instructions" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Upgrade from 3.0.2 to 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Perform the following to upgrade from version 3.0.2 to version 4.0.0-incubating. Note that some of the steps here are only required if you're using a specific hypervisor. The steps that are hypervisor-specific are called out with a note." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that you query your IP address usage records and process them or make a backup. During the upgrade you will lose the old IP address usage records." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Starting in 3.0.2, the usage record format for IP addresses is the same as the rest of the usage types. Instead of a single record with the assignment and release dates, separate records are generated per aggregation period with start and end dates. After upgrading, any existing IP address usage records in the old format will no longer be available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following upgrade instructions apply only if you're using VMware hosts. If you're not using VMware hosts, skip this step and move on to step 3: stopping all usage servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In each zone that includes VMware hosts, you need to add a new system VM template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While running the existing 3.0.2 system, log in to the UI as root administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select view, click Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Register template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Register template dialog box is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Register template dialog box, specify the following values (do not change these):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Field" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Value" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "systemvm-vmware-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the zone where this hypervisor is used" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OVA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Debian GNU/Linux 5.0 (32-bit)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extractable" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "no" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password Enabled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Featured" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the screen to be sure that the template downloads successfully and enters the READY state. Do not proceed until this is successful." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop all Usage Servers if running. Run this on all Usage Server hosts." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-usage stop" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the Management Servers. Run this on all Management Server hosts." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management stop" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the MySQL master, take a backup of the MySQL databases. We recommend performing this step even in test upgrades. If there is an issue, this will assist with debugging." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the following commands, it is assumed that you have set the root password on the database, which is a CloudStack recommended best practice. Substitute your own MySQL root password." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mysqldump -u root -pmysql_password cloud > cloud-backup.dmp\n" -"# mysqldump -u root -pmysql_password cloud_usage > cloud-usage-backup.dmp" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Either build RPM/DEB packages as detailed in the Installation Guide, or use one of the community provided yum/apt repositories to gain access to the &PRODUCT; binaries." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you have configured an appropriate yum or apt repository, you may execute the one of the following commands as appropriate for your environment in order to upgrade &PRODUCT;:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# yum update cloud-*" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# apt-get update\n" -"# apt-get upgrade cloud-*" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You will, of course, have to agree to the changes suggested by Yum or APT." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the upgrade output includes a message similar to the following, then some custom content was found in your old components.xml, and you need to merge the two files:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "warning: /etc/cloud/management/components.xml created as /etc/cloud/management/components.xml.rpmnew " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Instructions follow in the next step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have made changes to your copy of /etc/cloud/management/components.xml the changes will be preserved in the upgrade. However, you need to do the following steps to place these changes in a new version of the file which is compatible with version 4.0.0-incubating." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make a backup copy of /etc/cloud/management/components.xml. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mv /etc/cloud/management/components.xml /etc/cloud/management/components.xml-backup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy /etc/cloud/management/components.xml.rpmnew to create a new /etc/cloud/management/components.xml:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cp -ap /etc/cloud/management/components.xml.rpmnew /etc/cloud/management/components.xml" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Merge your changes from the backup file into the new components.xml." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/cloud/management/components.xml" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have more than one management server node, repeat the upgrade steps on each node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the first Management Server. Do not start any other Management Server nodes yet." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait until the databases are upgraded. Ensure that the database upgrade is complete. After confirmation, start the other Management Servers one at a time by running the same command on each node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Failing to restart the Management Server indicates a problem in the upgrade. Having the Management Server restarted without any issues indicates that the upgrade is successfully completed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start all Usage Servers (if they were running on your previous version). Perform this on each Usage Server host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# service cloud-usage start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional steps are required for each KVM host. These steps will not affect running guests in the cloud. These steps are required only for clouds using KVM as hosts and only on the KVM hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure a yum or apt respository containing the &PRODUCT; packages as outlined in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the running agent." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# service cloud-agent stop" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Update the agent software with one of the following command sets as appropriate for your environment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# yum update cloud-*" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# apt-get update" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# apt-get upgrade cloud-*" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the agent." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-agent start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit /etc/cloud/agent/agent.properties to change the resource parameter from \"com.cloud.agent.resource.computing.LibvirtComputingResource\" to \"com.cloud.hypervisor.kvm.resource.LibvirtComputingResource\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the cloud agent and cloud management services." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the Management Server is up and running, log in to the CloudStack UI and restart the virtual router for proper functioning of all the features." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the CloudStack UI as administrator, and check the status of the hosts. All hosts should come to Up state (except those that you know to be offline). You may need to wait 20 or 30 minutes, depending on the number of hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Troubleshooting: If login fails, clear your browser cache and reload the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not proceed to the next step until the hosts show in Up state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are upgrading from 3.0.2, perform the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the admin port is set to 8096 by using the \"integration.api.port\" global parameter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This port is used by the cloud-sysvmadm script at the end of the upgrade procedure. For information about how to set this parameter, see \"Setting Global Configuration Parameters\" in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you don't want the admin port to remain open, you can set it to null after the upgrade is done and restart the management server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the cloud-sysvmadm script to stop, then start, all Secondary Storage VMs, Console Proxy VMs, and virtual routers. Run the script once on each management server. Substitute your own IP address of the MySQL instance, the MySQL user to connect as, and the password to use for that user. In addition to those parameters, provide the -c and -r arguments. For example:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# nohup cloud-sysvmadm -d 192.168.1.5 -u cloud -p password -c -r > sysvm.log 2>&1 &" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# tail -f sysvm.log" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This might take up to an hour or more to run, depending on the number of accounts in the system." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If needed, upgrade all Citrix XenServer hypervisor hosts in your cloud to a version supported by CloudStack 4.0.0-incubating. The supported versions are XenServer 5.6 SP2 and 6.0.2. Instructions for upgrade can be found in the CloudStack 4.0.0-incubating Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now apply the XenServer hotfix XS602E003 (and any other needed hotfixes) to XenServer v6.0.2 hypervisor hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disconnect the XenServer cluster from CloudStack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar of the CloudStack UI, select Infrastructure. Under Clusters, click View All. Select the XenServer cluster and click Actions - Unmanage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This may fail if there are hosts not in one of the states Up, Down, Disconnected, or Alert. You may need to fix that before unmanaging this cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait until the status of the cluster has reached Unmanaged. Use the CloudStack UI to check on the status. When the cluster is in the unmanaged state, there is no connection to the hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To clean up the VLAN, log in to one XenServer host and run:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/cloud-clean-vlan.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now prepare the upgrade by running the following on one XenServer host:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/cloud-prepare-upgrade.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you see a message like \"can't eject CD\", log in to the VM and unmount the CD, then run this script again." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upload the hotfix to the XenServer hosts. Always start with the Xen pool master, then the slaves. Using your favorite file copy utility (e.g. WinSCP), copy the hotfixes to the host. Place them in a temporary folder such as /tmp." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Xen pool master, upload the hotfix with this command:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "xe patch-upload file-name=XS602E003.xsupdate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make a note of the output from this command, which is a UUID for the hotfix file. You'll need it in another step later." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) If you are applying other hotfixes as well, you can repeat the commands in this section with the appropriate hotfix number. For example, XS602E004.xsupdate." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Manually live migrate all VMs on this host to another host. First, get a list of the VMs on this host:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# xe vm-list" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then use this command to migrate each VM. Replace the example host name and VM name with your own:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# xe vm-migrate live=true host=host-name vm=VM-name" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Troubleshooting" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you see a message like \"You attempted an operation on a VM which requires PV drivers to be installed but the drivers were not detected,\" run:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/make_migratable.sh b6cf79c8-02ee-050b-922f-49583d9f1a14." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apply the hotfix. First, get the UUID of this host:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe host-list" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then use the following command to apply the hotfix. Replace the example host UUID with the current host ID, and replace the hotfix UUID with the output from the patch-upload command you ran on this machine earlier. You can also get the hotfix UUID by running xe patch-list." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "xe patch-apply host-uuid=host-uuid uuid=hotfix-uuid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the following files from the CloudStack Management Server to the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy from here..." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "...to here" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/sm/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Only for hotfixes XS602E005 and XS602E007) You need to apply a new Cloud Support Pack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download the CSP software onto the XenServer host from one of the following links:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For hotfix XS602E005: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E005/56710/xe-phase-2/xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For hotfix XS602E007: http://coltrane.eng.hq.xensource.com/release/XenServer-6.x/XS-6.0.2/hotfixes/XS602E007/57824/xe-phase-2/xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extract the file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# tar xf xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe-install-supplemental-pack xenserver-cloud-supp.iso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the XenServer host is part of a zone that uses basic networking, disable Open vSwitch (OVS):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe-switch-network-backend bridge" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reboot this XenServer host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the message \"mv: cannot stat `/etc/cron.daily/logrotate': No such file or directory\" appears, you can safely ignore it." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk '{print $NF}'`; do xe pbd-plug uuid=$pbd ; " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On each slave host in the Xen pool, repeat these steps, starting from \"manually live migrate VMs.\"" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Troubleshooting Tip" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If passwords which you know to be valid appear not to work after upgrade, or other UI issues are seen, try clearing your browser cache and reloading the UI page." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Upgrade from 2.2.14 to 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that you query your IPaddress usage records and process them; for example, issue invoices for any usage that you have not yet billed users for." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Starting in 3.0.2, the usage record format for IP addresses is the same as the rest of the usage types. Instead of a single record with the assignment and release dates, separate records are generated per aggregation period with start and end dates. After upgrading to 4.0.0-incubating, any existing IP address usage records in the old format will no longer be available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using version 2.2.0 - 2.2.13, first upgrade to 2.2.14 by using the instructions in the 2.2.14 Release Notes." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "KVM Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If KVM hypervisor is used in your cloud, be sure you completed the step to insert a valid username and password into the host_details table on each KVM node as described in the 2.2.14 Release Notes. This step is critical, as the database will be encrypted after the upgrade to 4.0.0-incubating." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While running the 2.2.14 system, log in to the UI as root administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Using the UI, add a new System VM template for each hypervisor type that is used in your cloud. In each zone, add a system VM template for each hypervisor used in that zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Register template dialog box, specify the following values depending on the hypervisor type (do not change these):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: systemvm-xenserver-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: systemvm-xenserver-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL: http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone: Choose the zone where this hypervisor is used" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor: XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format: VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type: Debian GNU/Linux 5.0 (32-bit)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extractable: no" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password Enabled: no" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public: no" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Featured: no" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: systemvm-kvm-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: systemvm-kvm-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL: http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor: KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format: QCOW2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: systemvm-vmware-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: systemvm-vmware-3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL: http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor: VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format: OVA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the screen to be sure that the template downloads successfully and enters the READY state. Do not proceed until this is successful" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "WARNING: If you use more than one type of hypervisor in your cloud, be sure you have repeated these steps to download the system VM template for each hypervisor type. Otherwise, the upgrade will fail." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mysqldump -u root -pmysql_password cloud > cloud-backup.dmp\n" -"# mysqldump -u root -pmysql_password cloud_usage > cloud-usage-backup.dmp\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have made changes to your existing copy of the file components.xml in your previous-version CloudStack installation, the changes will be preserved in the upgrade. However, you need to do the following steps to place these changes in a new version of the file which is compatible with version 4.0.0-incubating." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How will you know whether you need to do this? If the upgrade output in the previous step included a message like the following, then some custom content was found in your old components.xml, and you need to merge the two files:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make a backup copy of your /etc/cloud/management/components.xml file. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mv /etc/cloud/management/components.xml /etc/cloud/management/components.xml-backup" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cp -ap /etc/cloud/management/components.xml.rpmnew /etc/cloud/management/components.xml" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Merge your changes from the backup file into the new components.xml file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/cloud/management/components.xml\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have made changes to your existing copy of the /etc/cloud/management/db.properties file in your previous-version CloudStack installation, the changes will be preserved in the upgrade. However, you need to do the following steps to place these changes in a new version of the file which is compatible with version 4.0.0-incubating." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make a backup copy of your file /etc/cloud/management/db.properties. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mv /etc/cloud/management/db.properties /etc/cloud/management/db.properties-backup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy /etc/cloud/management/db.properties.rpmnew to create a new /etc/cloud/management/db.properties:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cp -ap /etc/cloud/management/db.properties.rpmnew etc/cloud/management/db.properties" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Merge your changes from the backup file into the new db.properties file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/cloud/management/db.properties" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the management server node, run the following command. It is recommended that you use the command-line flags to provide your own encryption keys. See Password and Key Encryption in the Installation Guide." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cloud-setup-encryption -e encryption_type -m management_server_key -k database_key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When used without arguments, as in the following example, the default encryption type and keys will be used:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For encryption_type, use file or web to indicate the technique used to pass in the database encryption password. Default: file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For management_server_key, substitute the default key that is used to encrypt confidential parameters in the properties file. Default: password. It is highly recommended that you replace this with a more secure value" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For database_key, substitute the default key that is used to encrypt confidential parameters in the CloudStack database. Default: password. It is highly recommended that you replace this with a more secure value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat steps 10 - 14 on every management server node. If you provided your own encryption key in step 14, use the same key on all other management servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait until the databases are upgraded. Ensure that the database upgrade is complete. You should see a message like \"Complete! Done.\" After confirmation, start the other Management Servers one at a time by running the same command on each node." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-usage start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(KVM only) Additional steps are required for each KVM host. These steps will not affect running guests in the cloud. These steps are required only for clouds using KVM as hosts and only on the KVM hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure your CloudStack package repositories as outlined in the Installation Guide" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-agent stop" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Update the agent software with one of the following command sets as appropriate." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" # apt-get update\n" -"# apt-get upgrade cloud-*\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the contents of the agent.properties file to the new agent.properties file by using the following command" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sed -i 's/com.cloud.agent.resource.computing.LibvirtComputingResource/com.cloud.hypervisor.kvm.resource.LibvirtComputingResource/g' /etc/cloud/agent/agent.properties" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the CloudStack UI as admin, and check the status of the hosts. All hosts should come to Up state (except those that you know to be offline). You may need to wait 20 or 30 minutes, depending on the number of hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not proceed to the next step until the hosts show in the Up state. If the hosts do not come to the Up state, contact support." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following script to stop, then start, all Secondary Storage VMs, Console Proxy VMs, and virtual routers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the command once on one management server. Substitute your own IP address of the MySQL instance, the MySQL user to connect as, and the password to use for that user. In addition to those parameters, provide the \"-c\" and \"-r\" arguments. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# nohup cloud-sysvmadm -d 192.168.1.5 -u cloud -p password -c -r > sysvm.log 2>&1 &\n" -"# tail -f sysvm.log" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the script terminates, check the log to verify correct execution:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# tail -f sysvm.log" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The content should be like the following:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"Stopping and starting 1 secondary storage vm(s)...\n" -"Done stopping and starting secondary storage vm(s)\n" -"Stopping and starting 1 console proxy vm(s)...\n" -"Done stopping and starting console proxy vm(s).\n" -"Stopping and starting 4 running routing vm(s)...\n" -"Done restarting router(s).\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you would like additional confirmation that the new system VM templates were correctly applied when these system VMs were rebooted, SSH into the System VM and check the version." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use one of the following techniques, depending on the hypervisor." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "XenServer or KVM:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSH in by using the link local IP address of the system VM. For example, in the command below, substitute your own path to the private key used to log in to the system VM and your own link local IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following commands on the XenServer or KVM host on which the system VM is present:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ssh -i private-key-path link-local-ip -p 3922\n" -"# cat /etc/cloudstack-release" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The output should be like the following:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "Cloudstack Release 4.0.0-incubating Mon Oct 9 15:10:04 PST 2012" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "ESXi" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSH in using the private IP address of the system VM. For example, in the command below, substitute your own path to the private key used to log in to the system VM and your own private IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following commands on the Management Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ssh -i private-key-path private-ip -p 3922\n" -"# cat /etc/cloudstack-release\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apply the XenServer hotfix XS602E003 (and any other needed hotfixes) to XenServer v6.0.2 hypervisor hosts." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/opt/xensource/bin/cloud-clean-vlan.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the upgrade by running the following on one XenServer host:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/opt/xensource/bin/cloud-prepare-upgrade.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you see a message like \"can't eject CD\", log in to the VM and umount the CD, then run this script again." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upload the hotfix to the XenServer hosts. Always start with the Xen pool master, then the slaves. Using your favorite file copy utility (e.g. WinSCP), copy the hotfixes to the host. Place them in a temporary folder such as /root or /tmp." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "xe patch-upload file-name=XS602E003.xsupdate" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe vm-list" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe vm-migrate live=true host=host-name vm=VM-name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# xe host-list" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "xe patch-apply host-uuid=host-uuid uuid=hotfix-uuid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/sm/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# tar xf xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# xe-install-supplemental-pack xenserver-cloud-supp.iso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# xe-switch-network-backend bridge" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk '{print $NF}'`; do xe pbd-plug uuid=$pbd ; " -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Version 4.0.0-incubating" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "What’s New in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apache CloudStack 4.0.0-incubating includes the following new features:" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Inter-VLAN Routing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Inter-VLAN Routing is the capability to route network traffic between VLANs. This feature enables you to set up Virtual Private Clouds (VPC) that can hold multi-tier applications. These tiers are deployed on different VLANs that can communicate with each other. You can provision VLANs to the tiers your create, and VMs can be deployed on different tiers, such as Web, Application, or Database. The VLANs are connected to a virtual router, which facilitates communication between the VMs. In effect, you can segment VMs by means of VLANs into different networks that can host multi-tier applications. Such segmentation by means of VLANs logically separate application VMs for higher security and lower broadcasts, while remaining physically connected to the same device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This feature is supported on XenServer and VMware hypervisors." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A Site-to-Site VPN connection helps you establish a secure connection from an enterprise datacenter to the cloud infrastructure. This allows users to access the guest VMs by establishing a VPN connection to the virtual router of the account from a device in the datacenter of the enterprise. Having this facility eliminates the need to establish VPN connections to individual VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The supported endpoints on the remote datacenters are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cisco ISR with IOS 12.4 or later" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Juniper J-Series routers with JunOS 9.5 or later" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Local Storage Support for Data Volumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can now create data volumes on local storage. The data volume is placed on the same XenServer host as the VM instance that is attached to the data volume. These local data volumes can be attached to virtual machines, detached, re-attached, and deleted just as with the other types of data volume. In earlier releases of CloudStack, only the root disk could be placed in local storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Local storage is ideal for scenarios where persistence of data volumes and HA is not required. Some of the benefits include reduced disk I/O latency and cost reduction from using inexpensive local disks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order for local volumes to be used, the feature must be enabled for the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can create a data disk offering for local storage. When a user creates a new VM, they can select this disk offering in order to cause the data disk volume to be placed in local storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can not migrate a VM that has a volume in local storage to a different host, nor migrate the volume itself away to a different host. If you want to put a host into maintenance mode, you must first stop any VMs with local data volumes on that host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Local storage support for volumes is available for XenServer, KVM, and VMware hypervisors." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A tag is a key-value pair that stores metadata about a resource in the cloud. Tags are useful for categorizing resources. For example, you can tag a user VM with a value that indicates the user's city of residence. In this case, the key would be \"city\" and the value might be \"Toronto\" or \"Tokyo.\" You can then request CloudStack to find all resources that have a given tag; for example, VMs for users in a given city." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can tag a user virtual machine, volume, snapshot, guest network, template, ISO, firewall rule, port forwarding rule, public IP address, security group, load balancer rule, project, VPC, network ACL, or static route. You can not tag a remote access VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can work with tags through the UI or through the new API commands createTags, deleteTags, and listTags. You can define multiple tags for each resource. There is no limit on the number of tags you can define. Each tag can be up to 255 characters long. Users can define tags on the resources they own, and administrators can define tags on any resources in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new optional input parameter, \"tags,\" has been added to many of the list* API commands. The following example shows how to use this new parameter to find all the volumes having tag region=canada OR tag city=Toronto:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=listVolumes\n" -"&listAll=true\n" -"&tags[0].key=region\n" -"&tags[0].value=canada\n" -"&tags[1].key=city\n" -"&tags[1].value=Toronto" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following API commands have the new \"tags\" input parameter:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVirtualMachines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVolumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSnapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTemplates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listIsos" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listFirewallRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPortForwardingRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPublicIpAddresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSecurityGroups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLoadBalancerRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listStaticRoutes" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "AWS API Changes for Tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Some changes have been made to the Amazon Web Services API compatibility support in order to accommodate the new tagging feature." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New APIs:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ec2-create-tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add tags to one or more resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ec2-delete-tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remove tags from one or more resources." -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Show currently defined tags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Changed APIs:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Changed API" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-images" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Output now shows tags defined for each image." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ec2-describe-instances" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following filters can now be passed in to limit the output result set: tag-key, tag-value and tag:key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ec2-describe-snapshots" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-volumes" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Secure Console Access on XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With the addition of Secure Console feature, users can now securely access the VM consoles on the XenServer hypervisor. You can either SSH or use the View Console option in the Management Server to securely connect to the VMs on the XenServer host. The Management Server uses the xapi API to stream the VM consoles. However, there is no change in the way you can access the console of a VM. This feature is supported on XenServer 5.6 and 6.0 versions." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Stopped VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This release supports creating VMs without starting them on the backend. You can determine whether the VM needs to be started as part of the VM deployment. A VM can be deployed in two ways: create and start a VM (the default method); create a VM and leave it in the stopped state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new request parameter, startVM, is introduced in the deployVm API to support the stopped VM feature. The possible values are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "true - The VM starts as a part of the VM deployment" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "false - The VM is left in stopped state at the end of the VM deployment" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Uploading an Existing Volume to a Virtual Machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Existing data can now be made accessible to a virtual machine. This is called uploading a volume to the VM. For example, this is useful to upload data from a local file system and attach it to a VM. Root administrators, domain administrators, and end users can all upload existing volumes to VMs. The upload is performed by using HTTP. The uploaded volume is placed in the zone's secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This functionality is supported for the following hypervisors:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor : Disk Image Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer : VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware : OVA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM : QCOW2" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Dedicated High-Availability Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "One or more hosts can now be designated for use only by high-availability (HA) enabled VMs that are restarted due to a host failure. Setting up a pool of such dedicated HA hosts as the recovery destination for all HA-enabled VMs make it easier to determine which VMs are restarted as part of the high-availability function. You can designate a host as a dedicated-HA restart node only if the Dedicated HA Hosts feature is enabled by setting the appropriate global configuration parameter." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Support for Amazon Web Services API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This release supports Amazon Web Services APIs, including Elastic Compute Cloud (EC2) API. Fidelity with the EC2 API and the installation experience for this functionality are both enhanced. In prior releases, users were required to install a separate component called CloudBridge, in addition to installing the Management Server. For new installations of CloudStack 4.0.0-incubating, this software is installed automatically along with CloudStack and runs in a more closely integrated fashion. The feature is disabled by default, but can be easily enabled by setting the appropriate global configuration parameter and performing a few setup steps." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "The Nicira NVP Plugin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Nicira NVP plug-in allows CloudStack to use the Nicira solution for virtualized network as a provider for CloudStack networks and services. In CloudStack 4.0.0-incubating this plug-in supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by guests. When a tenant creates a new network, instead of a traditional VLAN, a logical network will be created by sending the appropriate calls to the Nicira NVP Controller. The plug-in has been tested with Nicira NVP versions 2.1.0, 2.2.0 and 2.2.1." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Support for CAStor Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 4.0.0-incubating supports using a CAStor cluster as the back-end storage system for a CloudStack S3 front-end. The CAStor back-end storage for CloudStack extends the existing storage classes and allows the storage configuration attribute to point to a CAStor cluster. This feature makes use of the CloudStack server's local disk to spool files before writing them to CAStor when handling the PUT operations. However, a file must be successfully written into the CAStor cluster prior to the return of a success code to the S3 client to ensure that the transaction outcome is correctly reported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The S3 multipart file upload is not supported in this release. You are prompted with proper error message if a multipart upload is attempted." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Clustered Logical Volume Manager Support for KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This release adds Clustered Logical Volume Manager (CLVM) storage support for KVM hosts. With this support, you can use CLVM as primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The CLVM support for KVM allows root and data disks (primary storage) to reside on Linux logical volumes. The administrators are required to configure CLVM on the KVM hosts independent of CloudStack. When the volume groups are available, an administrator can simply add primary storage of type CLVM, providing the volume group name. Then CloudStack creates and manages logical volumes as needed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CLVM also supports Snapshots. CloudStack creates an LVM snapshot, copy the applicable logical volume to the secondary storage in the qcow2 format, and then delete the LVM snapshot." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Rados Block Device Support for KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can now use Rados Block Device (RBD) to run instances on Apache CloudStack 4.0.0-incubating. This can be done by adding a RBD pool as primary storage. Before using RBD, ensure that Qemu is compiled with RBD enabled, and the libvirt version is at least 0.10 with RBD enabled on the KVM host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a disk offering for RBD so that you can ensure that StoragePoolAllocator chooses the RBD pool to deploy instances." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Issues Fixed in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many bugs include a defect number that reflects the bug number that was held in the bug tracker run by Citrix (bugs.cloudstack.org). The Apache CloudStack project now uses Jira to manage its bugs, so some of the bugs that are referenced here may not be available to view. However, we are still including them for completeness." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Defect" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere 5.0 now has GA support. Formerly only Beta support was provided." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16135" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Creating volumes after upgrading from snapshot taken in 2.2.14 no longer deletes the snapshot physically from the secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16122" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a site-to-site VPN setup, alerts are generated when the VPC virtual router is rebooted with multiple vpn connections." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16022" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If host connection fails due to a database error, host now disconnects and the Managerment Server id is removed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16011" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name of network offering is no longer truncated due to too-narrow field width in Add Guest Network dialog box." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15978" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the virtual router and its host go down, the high availability mechanism now works for the virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15921" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The 2.2.x security group script now accounts for the VMs created in the version 2.1 timeframe." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15919" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A level parameter is added to the listVolumes command; therefore queries return the response more quickly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15904" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upgrade from version 2.2.14 to CloudStack-3.0.5-0.2944-rhel5 works as expected. The upgrade script, /usr/share/cloud/setup/db/schema-2214to30-cleanup.sql, works as expected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15879" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The database upgrade from version 3.0.4 to 3.0.5 works as expected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15807" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network label for OVM now available in UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15779" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the thumbnail is requested, the console session will not be terminated." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15778" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fetching a VM thumbnail now gets a thumbnail of appropriate visual dimensions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15734" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM Snapshots no longer shows incorrect disk usage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15733" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The domainId parameter for the listNetworks command now lists the resources belonging to the domain specified." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15676" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stopping the router no longer fails with the null pointer exception." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15648" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If creating a volume from a snapshot fails, the error is reported on the UI but the volume is stuck in the creating state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15646" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createFirewallRule API no longer causes null pointer exception." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15628" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a KVM host, the high availability mechanism no longer takes a long time to migrate VMs to another KVM host if there are multiple storage pools." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15627" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Metadata instance-id and vm-id for existing VMs stays the same after upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15621" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Solved difficulty with allocating disk volumes when running multiple VM deployment in parallel." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15603" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack now stop the VMs when destroyVM command is called." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15586" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Vlan for an account no longer fails if multiple physical networks are present." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15582" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The dns-name filter is now supported for ec2-describe-instances in the Amazon Web Services API compatibility commands. The filter maps to the name of a user VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15503" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An IP address which has static NAT rules can now be released. Subsequently, restarting this network after it was shutdown can succeed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15464" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Can now delete static route whose state is set to Revoke." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15443" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Creating a firewall rule no longer fails with an internal server error." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15398" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Corrected technique for programming DNS on the user VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15356" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Internal DNS 2 entry now correctly shown in UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15335" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The CloudBridge S3 Engine now connects to the database by using the deciphered password in the db.properties file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15318" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UI now correctly prevents the user from stopping a VM that is in the Starting state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15307" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fixed Japanese localization of instance statuses in the Instances menu." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15278" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The deployment planner no longer takes long time to locate a suitable host to deploy VMs when large number of clusters are present." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15274" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Creating a VLAN range using Zone ID without network ID now succeeds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15243" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now check to be sure source NAT and VPN have same provider." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15232" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that networks using external load balancer/firewall in 2.2.14 or earlier can properly upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15200" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "No exception when trying to attach the same volume while attaching the first volume is in progress." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15173" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional cluster can no longer be added with same VSM IP address as another cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15167" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "AWS API calls now honor the admin account's ability to view or act on the resources owned by the regular users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15163" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The minimum limit is not honored when there is not enough capacity to deploy all the VMs and the ec2-run-instances command with the -n >n1 -n2> option is used to deploy multiple VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15157" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Can now add/enable service providers for multiple physical networks through the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15145" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "AWS API call ec2-register has better error handling for negative cases." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15122" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Filters now supported for AWS API call ec2-describe-availability-zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15120" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Actions column in UI of Volume page now shows action links." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15099" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Buttons no longer overlap text on Account Deletion confirmation page in UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15095" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensures you can not create a VM with a CPU frequency greater than the host CPU frequency." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15094" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU cap now set properly in VMware." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15077" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NullPointerException is no longer observed while executing the command to list the public IP in a basic zone created with the default shared NetScaler EIP and ELB network offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15044" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UI now provides option to view the list of instances which are part of the guest network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15026" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UI in Deploy VM dialog now lists only templates or ISOs depending on which is selected in previous dialog." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14989" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In KVM, the Create Instance wizard now shows only templates from the current (KVM) zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14986, CS-14985" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Listing filters works as expected in the ec2-describe-volumes and ec2-describe-snapshots commands." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14964" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Automatically starting the Console Proxy no longer fails due to its missing volume on the primary storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14907" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User is now correctly prevented from trying to download an uploaded volume which has not yet been moved to primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14879" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a user VM is stopped or terminated, the static NAT associated with this VM is now disabled. This public IP address is no longer owned by this account and can be associated to any other user VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14854" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Only the admin user can change the template permission to Public, so this option is removed from the UI for domain Admins and regular Users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14817" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While checking if network has any external provider, CloudStack will consider all providers in the network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14796" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When deploying a VM with ec2-run-instances, userdata is now encoded." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14770" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The API returns the keypair information when a VM is deployed with sshkey. This affects the API commands related to virtual machines (deployVirtualMachine, listVirtualMachines, ... *VirtualMachine), as well as the corresponding AWS APIs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14724" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UI no longer displays the dropdown list of isolation method choices if sdn.ovs.controller is false." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14345" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Logout API returns XML header." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host IPs now associated with appropriate IPs according to traffic type." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14253" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Can now delete and re-create port forwarding rule on same firewall." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14452" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Data disk volumes are now automatically copied from one cluster to another." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13539" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Windows VM can get IP after reboot." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13537" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When user tries to delete a domain that contains sub-domains, an error message is now sent to convey the reason for the delete failure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13153" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System VMs support HTTP proxy." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12642" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Added Close button to Select Project list view popup in UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12510" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deleting and reinserting host_details no longer causes deadlocks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12407" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "F5 and Netscaler - when dedicated is selected, capacity field is disabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12111" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Email validation for edit user form." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-10928" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network read/write values now always positive numbers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15376, CS-15373" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The AWS APIs (EC2 and S3) now listen on the 7080 port and send request to CloudStack on the 8080 port just as any other clients of CloudStack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13944" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The CloudStack 2.2.x to 3.0.x database upgrade for multiple physical networks is now supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15300" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The admin accounts of a domain now honour the limits imposed on that domain just like the regular accounts do. A domain admin now is not allowed to create an unlimited number of instances, volumes, snapshots, and so on." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15396" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The CloudStack database now contain the UUD information after the 2.2.14 to 3.0.4 upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15450" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upgrade from 2.2.14 to 3.0.4 no longer fails on a VMware host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15449" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Running cloudstack-aws-api-register no longer fails with the \"User registration failed with error: [Errno 113] No route to host\" error." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15455" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The iptable rules are configured to open the awsapi port (7080) as part of the installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15429" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While creating an instance with data volume, disk offering also is considered while checking the account limit on volume resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15414" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the 2.2.14 to 3.0.4 upgrade, the value of the global parameter xen.guest.network.device is now decrypted before setting the traffic label." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15382" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "During 2.2.14 to 3.0.4 upgrade, the hosts no longer go to the Alert state if destroyed networks existed with non-existent tags prior to upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15323" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack supports the following Citrix XenServer hotfixes: XS602E003, XS602E004, and XS602E005." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15430" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create snapshot now fails if creating a snapshot exceeds the snapshot resource limit for a domain admin or a user account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14256" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual Router no longer remains in starting state for subdomain or user on a KVM 3.0.1 prerlease host on RHEL 6.2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-7495" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Implemented a variety of Xen management host improvements." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-8105" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS v4 for primary storage now works as expected on KVM hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-9989" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The error messages returned during VM deployment failure will have much more details than before." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12584" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can no longer add security groups not supported by the hypervisor in use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12705" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When creating a Network offering by using SRX as the service provider for SourceNAT servcies, an option is given in the CloudStack UI now to set the source_nat type to \"per Zone\"/\"per account\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12782" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Assigning a VM from Basic to Advanced zone no longer ignores the network ID. A warning message is displayed for VM movements across zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12591" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Broadcast Address on the Second Public IP NIC is now corrected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13272" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a user is deleted, all the associated properties, such as IPs and virtual routers, are now deleted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13377" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Creating template from a root disk of a stopped instance now provides an option to make it a \"Featured template\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13500" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reaching the first guest VM by using its public IP from the second guest VM no longer fails." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13853" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default gateway can no longer be 0.0.0.0 in the Secondary Storage VM (SSVM)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13863" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The queryAsyncJobResult command in XML format now returns the correct UUIDs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13867" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Corrected CSP xenserver-cloud-supp.tgz for XenServer 5.6 and 6.0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13904" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Labels and values for the service offerings CPU and memory are now consistent." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13998" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The SSVM kernel panic issue is fixed on XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14090" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The issue is fixed where running the VMware snapshots randomly fails with the ArrayIndexOutOfBoundsException error." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14021" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The java.lang.OutOfMemoryError is fixed on the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14025" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Python Eggs are provided to easily package the test client for each branch of CloudStack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14068" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Resetting the VM password through the CloudStack UI no longer causes any error." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14156" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The pod which has the administrator's virtual router is no longer selected while creating the virtual routers for guests." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14182" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The users can now delete their ISOs as normal users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14185" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The listOSTypes API now filters out the types of operating system by using the keywords." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14204" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The cloud-setup-bonding.sh command no longer generates the \"command not found\" error." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14214" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Specify VLAN option cannot be enabled now for an isolated Network offering with SourceNAT enabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14234" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Sending project invite email to an account now requires SMTP configured in CloudStack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14237" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The garbage collector of the primary storage no longer fails when the first host in the cluster is not up." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14241" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Custom Volume Disk Offering is now matching the Global configuration value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14270" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The listNetworks API no longer assumes that the broadcast type is always VLAN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14319" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The internal name of the VM is no longer present in the error message that is displayed to a domain administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14321" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The listVolumes API call now returns a valid value for the isExtractable parameter for the ISO-derived disk and data disk volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14323" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Invalid API calls will now give valid response in json/xml format." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14339" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Custom Disk Size will now allow values larger than 100GB." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14357" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The ConsoleProxyLoadReportCommand is no longer fired continuously." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14421" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fixed the issue of virtual router deployments. The DHCP entries can now be assigned to the router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14555" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unzipped downloaded template MD5SUM will no longer override the zipped template MD5SUM in the database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14598" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The complete screen of the running VM is now displayed in the console proxy." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14600" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Windows or Linux based consoles are no longer lost upon rebooting VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14784" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multiple subnets with the same VLAN now work as expected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13303, 14874, 13897, 13944, 14088, 14190" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A variety of upgrade issues have been fixed in release 3.0.3." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15080" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Setting a private network on a VLAN for VMWare environment is now supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15168" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The console proxy now works as expected and no exception is shown in the log after upgrading from version 2.2.14 to 3.0.2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15172" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Version 3.0.2 now accepts the valid public key." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Known Issues in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Issue ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CLOUDSTACK-301" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Nexus 1000v DVS integration is not functional" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This source code release includes some partial functionality to support the Cisco Nexus 1000v Distributed Virtual Switch within a VMware hypervisor environment. The functionality is not complete at this time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CLOUDSTACK-368" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OVM - cannot create guest VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This source code release has regressed from the CloudStack 2.2.x code and is unable to support Oracle VM (OVM)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CLOUDSTACK-279" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deleting a project fails when executed by the regular user. This works as expected for root/domain admin. To workaround, perform either of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the account cleanup thread which will eventually complete the project deletion." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Execute the call as the root/domain admin on behalf of the regular user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16067" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The command=listTags&key=city command does not work as expected. The command does not return tags for the resources of the account with the tag, city" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16063" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The current values of volumes and snapshots are incorrect when using KVM as a host. To fix this, the database upgrade codes, volumes.size and snapshots.size, should be changed to show the virtual sizes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16058" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Null pointer Exception while deleting the host after moving the host to maintenance state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16045" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Only the root administrator can handle the API keys. The domain administrators are not allowed to create, delete, or retrieve API keys for the users in their domain." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16019" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CIDR list in the Add VPN Customer Gateway dialog does not prompt the user that they can provide a comma separated CIDRs if multiple CIDRs have to be supplied." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16015" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deleting a network is not supported when its network providers are disabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-16012" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unable to delete a zone in the UI because the necessary cleanup cannot be completed. When the hosts are removed, the expunge process fails to delete the volumes as no hosts are present to send the commands to. Therefore, the storage pool removal fails, and zone can't be cleaned and deleted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name of network offering might be truncated due to too-narrow field width in Add Guest Network dialog box." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15789" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Invalid global setting prevents management server to restart. For example, if you configure the \"project.invite.timeout\" parameter to \"300\" and attempt to restart management server, it fails without throwing a warning or setting the value to the default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15749" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restarting VPC is resulting in intermittent connection loss to the port forwarding and StaticNAT rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15690" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IpAssoc command failed as a part of starting the virtual router, but the final start result is reported as succes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15672, CS-15635" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The FQDN of the VM is not configured if it is deployed as a part of default shared network and isolated guest network (DefaultIsolatedNetworkOfferingWithSourceNatService)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15634" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The FQDN of a VM that is deployed as a part of both a shared network and default isolated guest network has the suffix of the shared network instead of the default isolated guest network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15576" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stopping a VM on XenServer creates a backlog of API commands. For example, the Attach volume calls become delayed while waiting for the stopVirtualMachine command to be executed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15569" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Misleading error message in the exception when creating a StaticNAT rule fails in a VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15566" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "External device such as Netscaler is not supported in VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15557" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Intermittent traffic loss in the VPN connection if Juniper is the remote router and the life time is 300 seconds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15361" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Egress rules are not working in NetScaler loadbalancer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15105" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The cloud-sysvmadm script does not work if the integration.api.port parameter is set to any port other than 8096." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15092" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Connecting to the guest VMs through SSH is extremely slow, and it results in connection timeout." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15037" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hairpin NAT is not supported when NetScaler is used for EIP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15009" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The port_profile table will not be populated with port profile information. In this release, CloudStack directly connects to the VSM for all the port profile operations; therefore, no port profile information is cached." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14939" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Adding a VMware cluster is not supported when the Management Network is migrated to the Distributed Virtual Switch environment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14780" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You are allowed to ping the elastic IP address of the VM even though no ingress rule is set that allows the ICMP protocol." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14756" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Installing KVM on RHEL 6.2 will result in unreliable network performance. Workaround: blacklist vhost-net. Edit /etc/modprobe.d/blacklist-kvm.conf and include vhost-net." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14346" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The UpdateVirtualMachine API call does not check whether the VM is stopped. Therefore, stop the VM manually before issuing this call." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14303 (was 14537)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP addresses for a shared network are still being consumed even if no services are defined for that network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14296 (was 14530)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OVM: Network traffic labels are not supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14291 (was 14523)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The EIP/ELB network offering for basic zones does not support multiple NetScalers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14275 (was 14506)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "F5: Unable to properly remove a F5 device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14201 (was 14430)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMWare: Template sizes are being reported different depending on whether the primary storage is using ISCSI or NFS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13758 (was 13963)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere: template download from templates created off of the root volume does not work properly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13733 (was 13935)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere: detaching an ISO from a restored VM instance fails." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13682 (was 13883)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multiple NetScalers are not supported in Basic Networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13599 (was 13359)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Programming F5/NetScaler rules can be better optimized." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13337 (was 13518)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Security Groups are not supported in Advanced Networking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-13173 (was 13336)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere: cross cluster volume migration does not work properly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12714 (was 12840)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Capacity view is not available for pods or clusters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-12624 (was 12741)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere: maintenance mode will not live migrate system VM to another host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15476" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The 2.2.14 to 4.0.0-incubating upgrade fails if multiple untagged physical networks exist before the upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15407" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the 2.2.14 to 4.0.0-incubating upgrade, VLAN allocation on multiple physical networks does not happen as expected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To workaround this issue, follow the instructions given below:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Revert to your 2.2.14 setup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop all the VMs with the isolated virtual networks in your cloud setup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run following query to find if any networks still have the NICs allocated:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check if any virtual guest networks have the NICs allocated:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#SELECT DISTINCT op.id from `cloud`.`op_networks` op JOIN `cloud`.`networks` n on op.id=n.id WHERE nics_count != 0 AND guest_type = 'Virtual'; " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If this returns any network IDs, then ensure the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VMs are stopped." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "No new VM is started." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shutdown the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remove the NICs count for the virtual network IDs returned in step (a), and set the NIC count to 0:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "UPDATE `cloud`.`op_networks` SET nics_count = 0 WHERE id = enter id of virtual network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server, and wait for all the networks to shut down." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Networks shutdown is determined by the network.gc.interval and network.gc.wait parameters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that all the networks are shut down and all the guest VNETs are free." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the upgrade script." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This allocates all your guest VNET ranges to the first physical network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By using the updatePhysicalNetwork API, reconfigure the VNET ranges for each physical network as desired." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start all the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-14680" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack and LDAP user validation cannot happen simultaneously because the user password is hashed and stored in the database, and LDAP requires the passwords in plain text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To work with the LDAP user, the MD5 hash should be disabled in the login process by commenting the following variable in sharedFunctions.js file available at /usr/share/cloud/management/webapps/client/scripts, and restart the cloud-management service." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "var md5HashedLogin = false;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "However, if md5HashedLogin is set to false, the end user can login with the LDAP credentials but not with the CloudStack user credentials." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CS-15130" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Data disk volumes are not automatically copied from one cluster to another." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "API Changes from 3.0.2 to 4.0.0-incubating" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "New API Commands in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createCounter (Adds metric counter)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteCounter (Deletes a counter)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listCounters (List the counters)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createCondition (Creates a condition)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteCondition (Removes a condition)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listConditions (List Conditions for the specific user)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createTags. Add tags to one or more resources. Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=createTags\n" -"&resourceIds=1,10,12\n" -"&resourceType=userVm\n" -"&tags[0].key=region\n" -"&tags[0].value=canada\n" -"&tags[1].key=city\n" -"&tags[1].value=Toronto" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteTags. Remove tags from one or more resources. Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=deleteTags\n" -"&resourceIds=1,12\n" -"&resourceType=Snapshot\n" -"&tags[0].key=city" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTags (Show currently defined resource tags)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVPC (Creates a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCs (Lists VPCs)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVPC (Deletes a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVPC (Updates a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restartVPC (Restarts a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVPCOffering (Creates VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVPCOffering (Updates VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVPCOffering (Deletes VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCOfferings (Lists VPC offerings)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createPrivateGateway (Creates a private gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPrivateGateways (List private gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deletePrivateGateway (Deletes a Private gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createNetworkACL (Creates a ACL rule the given network (the network has to belong to VPC))" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNetworkACL (Deletes a Network ACL)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkACLs (Lists all network ACLs)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createStaticRoute (Creates a static route)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteStaticRoute (Deletes a static route)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listStaticRoutes (Lists all static routes)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnCustomerGateway (Creates site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnGateway (Creates site to site vpn local gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnConnection (Create site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnCustomerGateway (Delete site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnGateway (Delete site to site vpn gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnConnection (Delete site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVpnCustomerGateway (Update site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "resetVpnConnection (Reset site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnCustomerGateways (Lists site to site vpn customer gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnGateways (Lists site 2 site vpn gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnConnections (Lists site to site vpn connection gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "markDefaultZoneForAccount (Marks a default zone for the current account)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "uploadVolume (Uploads a data disk)" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Changed API Commands in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "API Commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "copyTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "prepareTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "registerTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "activateProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "suspendProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjectAccounts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "migrateVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "attachVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "detachVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "uploadVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createSecurityGroup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "registerIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "copyIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createIpForwardingRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listIpForwardingRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createLoadBalancerRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateLoadBalancerRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createSnapshot" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have a single new response parameter, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many other commands also have the new tags(*) parameter in addition to other changes; those commands are listed separately." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rebootVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "attachIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "detachIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLoadBalancerRuleInstances" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "resetPasswordForVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "changeServiceForVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "recoverVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "migrateVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deployVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "assignVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restoreVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "stopVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "destroyVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have two new response parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: keypair, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have the following new parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: tags (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listF5LoadBalancerNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetscalerLoadBalancerNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSrxFirewallNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have three new response parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: canusefordeploy, vpcid, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createZone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateZone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: localstorageenabled (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: localstorageenabled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listZones" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rebootRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "changeServiceForRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "destroyRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "stopRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcid, nic(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "disableAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listAccounts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "markDefaultZoneForAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enableAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcavailable, vpclimit, vpctotal" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listRouters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: forvpc (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkOfferings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: forvpc (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: forvpc" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: details (optional), tags (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addTrafficMonitor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: excludezones (optional), includezones (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: tags (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcid, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: canusefordeploy (optional), forvpc (optional), tags (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restartNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enableStaticNat" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: networkid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createDiskOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: storagetype (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: storagetype" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listDiskOfferings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateDiskOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createFirewallRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Changed request parameters: ipaddressid (old version - optional, new version - required)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: isoid (optional), tags (optional), templateid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateStorageNetworkIpRange" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: id, endip, gateway, netmask, networkid, podid, startip, vlan, zoneid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "reconnectHost" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new response parameter is added: hahost." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addCluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following request parameters are added:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vsmipaddress (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vsmpassword (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vsmusername (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following parameter is made mandatory: podid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new response parameter is added: status" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "prepareHostForMaintenance" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addSecondaryStorage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new response parameter is added: defaultzoneid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "cancelHostMaintenance" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new response parameter is added: hahost" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addSwift" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSwifts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listExternalLoadBalancers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listCapabilities" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new response parameter is added: customdiskofferingmaxsize" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new request parameter is added: startvm (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteStoragePool" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new request parameter is added: forced (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addHost" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateHost" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listHosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new request parameter is added: hahost (optional)" -msgstr "" - diff --git a/docs/pot/Revision_History.pot b/docs/pot/Revision_History.pot deleted file mode 100644 index 3f213ced877..00000000000 --- a/docs/pot/Revision_History.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Revision History" -msgstr "" - -#. Tag: firstname -#, no-c-format -msgid "Jessica" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "Tomechak" -msgstr "" - -#. Tag: member -#, no-c-format -msgid "Initial creation of book by publican" -msgstr "" - diff --git a/docs/pot/Revision_History_Install_Guide.pot b/docs/pot/Revision_History_Install_Guide.pot deleted file mode 100644 index 632f931b123..00000000000 --- a/docs/pot/Revision_History_Install_Guide.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Revision History" -msgstr "" - -#. Tag: firstname -#, no-c-format -msgid "Jessica" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "Tomechak" -msgstr "" - -#. Tag: firstname -#, no-c-format -msgid "Radhika" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "PC" -msgstr "" - -#. Tag: firstname -#, no-c-format -msgid "Wido" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "den Hollander" -msgstr "" - -#. Tag: member -#, no-c-format -msgid "Initial publication" -msgstr "" - diff --git a/docs/pot/SSL-keystore-path-and-password.pot b/docs/pot/SSL-keystore-path-and-password.pot deleted file mode 100644 index 54955c82243..00000000000 --- a/docs/pot/SSL-keystore-path-and-password.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "SSL Keystore Path and Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the LDAP server requires SSL, you need to enable it in the ldapConfig command by setting the parameters ssl, truststore, and truststorepass. Before enabling SSL for ldapConfig, you need to get the certificate which the LDAP server is using and add it to a trusted keystore. You will need to know the path to the keystore and the password." -msgstr "" - diff --git a/docs/pot/VPN-user-usage-record-format.pot b/docs/pot/VPN-user-usage-record-format.pot deleted file mode 100644 index fd5669107a3..00000000000 --- a/docs/pot/VPN-user-usage-record-format.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VPN User Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – VPN user ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/about-clusters.pot b/docs/pot/about-clusters.pot deleted file mode 100644 index 8247f2d16c4..00000000000 --- a/docs/pot/about-clusters.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Clusters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A cluster provides a way to group hosts. To be precise, a cluster is a XenServer server pool, a set of KVM servers, , or a VMware cluster preconfigured in vCenter. The hosts in a cluster all have identical hardware, run the same hypervisor, are on the same subnet, and access the same shared primary storage. Virtual machine instances (VMs) can be live-migrated from one host to another within the same cluster, without interrupting service to the user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A cluster is the third-largest organizational unit within a &PRODUCT; deployment. Clusters are contained within pods, and pods are contained within zones. Size of the cluster is limited by the underlying hypervisor, although the &PRODUCT; recommends less in most cases; see Best Practices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A cluster consists of one or more hosts and one or more primary storage servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; allows multiple clusters in a cloud deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Even when local storage is used exclusively, clusters are still required organizationally, even if there is just one host per cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When VMware is used, every VMware cluster is managed by a vCenter server. Administrator must register the vCenter server with &PRODUCT;. There may be multiple vCenter servers per zone. Each vCenter server may manage multiple VMware clusters." -msgstr "" - diff --git a/docs/pot/about-hosts.pot b/docs/pot/about-hosts.pot deleted file mode 100644 index 9a5e67c03cc..00000000000 --- a/docs/pot/about-hosts.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A host is a single computer. Hosts provide the computing resources that run the guest virtual machines. Each host has hypervisor software installed on it to manage the guest VMs. For example, a Linux KVM-enabled server, a Citrix XenServer server, and an ESXi server are hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host is the smallest organizational unit within a &PRODUCT; deployment. Hosts are contained within clusters, clusters are contained within pods, and pods are contained within zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts in a &PRODUCT; deployment:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the CPU, memory, storage, and networking resources needed to host the virtual machines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Interconnect using a high bandwidth TCP/IP network and connect to the Internet" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "May reside in multiple data centers across different geographic locations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "May have different capacities (different CPU speeds, different amounts of RAM, etc.), although the hosts within a cluster must all be homogeneous" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional hosts can be added at any time to provide more capacity for guest VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; automatically detects the amount of CPU and memory resources provided by the Hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts are not visible to the end user. An end user cannot determine which host their guest has been assigned to." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For a host to function in &PRODUCT;, you must do the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install hypervisor software on the host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Assign an IP address to the host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure the host is connected to the &PRODUCT; Management Server" -msgstr "" - diff --git a/docs/pot/about-password-encryption.pot b/docs/pot/about-password-encryption.pot deleted file mode 100644 index 2219780d907..00000000000 --- a/docs/pot/about-password-encryption.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Password and Key Encryption" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; stores several sensitive passwords and secret keys that are used to provide security. These values are always automatically encrypted:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Database secret key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Database password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSH keys" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Compute node root password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User API secret key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VNC password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses the Java Simplified Encryption (JASYPT) library. The data values are encrypted and decrypted using a database secret key, which is stored in one of &PRODUCT;’s internal properties files along with the database password. The other encrypted values listed above, such as SSH keys, are in the &PRODUCT; internal database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Of course, the database secret key itself can not be stored in the open – it must be encrypted. How then does &PRODUCT; read it? A second secret key must be provided from an external source during Management Server startup. This key can be provided in one of two ways: loaded from a file or provided by the &PRODUCT; administrator. The &PRODUCT; database has a new configuration setting that lets it know which of these methods will be used. If the encryption type is set to \"file,\" the key must be in a file in a known location. If the encryption type is set to \"web,\" the administrator runs the utility com.cloud.utils.crypt.EncryptionSecretKeySender, which relays the key to the Management Server over a known port." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The encryption type, database secret key, and Management Server secret key are set during &PRODUCT; installation. They are all parameters to the &PRODUCT; database setup script (cloud-setup-databases). The default values are file, password, and password. It is, of course, highly recommended that you change these to more secure keys." -msgstr "" - diff --git a/docs/pot/about-physical-networks.pot b/docs/pot/about-physical-networks.pot deleted file mode 100644 index 734803c236c..00000000000 --- a/docs/pot/about-physical-networks.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Physical Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Part of adding a zone is setting up the physical network. One or (in an advanced zone) more physical networks can be associated with each zone. The network corresponds to a NIC on the hypervisor host. Each physical network can carry one or more types of network traffic. The choices of traffic type for each network vary depending on whether you are creating a zone with basic networking or advanced networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A physical network is the actual network hardware and wiring in a zone. A zone can have multiple physical networks. An administrator can:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add/Remove/Update physical networks in a zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure VLANs on the physical network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure a name so the network can be recognized by hypervisors" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the service providers (firewalls, load balancers, etc.) available on a physical network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the IP addresses trunked to a physical network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify what type of traffic is carried on the physical network, as well as other properties like network speed" -msgstr "" - diff --git a/docs/pot/about-pods.pot b/docs/pot/about-pods.pot deleted file mode 100644 index f80760c0654..00000000000 --- a/docs/pot/about-pods.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Pods" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A pod often represents a single rack. Hosts in the same pod are in the same subnet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A pod is the second-largest organizational unit within a &PRODUCT; deployment. Pods are contained within zones. Each zone can contain one or more pods." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pods are not visible to the end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A pod consists of one or more clusters of hosts and one or more primary storage servers." -msgstr "" - diff --git a/docs/pot/about-primary-storage.pot b/docs/pot/about-primary-storage.pot deleted file mode 100644 index c2ba526788d..00000000000 --- a/docs/pot/about-primary-storage.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Primary storage is associated with a cluster, and it stores the disk volumes for all the VMs running on hosts in that cluster. You can add multiple primary storage servers to a cluster. At least one is required. It is typically located close to the hosts for increased performance." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; is designed to work with all standards-compliant iSCSI and NFS servers that are supported by the underlying hypervisor, including, for example:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Dell EqualLogicâ„¢ for iSCSI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Appliances filers for NFS and iSCSI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Scale Computing for NFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you intend to use only local disk for your installation, you can skip to Add Secondary Storage." -msgstr "" - diff --git a/docs/pot/about-secondary-storage.pot b/docs/pot/about-secondary-storage.pot deleted file mode 100644 index 4543890df21..00000000000 --- a/docs/pot/about-secondary-storage.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage is associated with a zone, and it stores the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Templates — OS images that can be used to boot VMs and can include additional configuration information, such as installed applications" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO images — disc images containing data or bootable media for operating systems" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk volume snapshots — saved copies of VM data which can be used for data recovery or to create new templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The items in zone-based NFS secondary storage are available to all hosts in the zone. &PRODUCT; manages the allocation of guest virtual disks to particular primary storage devices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To make items in secondary storage available to all hosts throughout the cloud, you can add OpenStack Object Storage (Swift, swift.openstack.org) in addition to the zone-based NFS secondary storage. When using Swift, you configure Swift storage for the entire &PRODUCT;, then set up NFS secondary storage for each zone as usual. The NFS storage in each zone acts as a staging area through which all templates and other secondary storage data pass before being forwarded to Swift. The Swift storage acts as a cloud-wide resource, making templates and other data available to any zone in the cloud. There is no hierarchy in the Swift storage, just one Swift container per storage object. Any secondary storage in the whole cloud can pull a container from Swift at need. It is not necessary to copy templates and snapshots from one zone to another, as would be required when using zone NFS alone. Everything is available everywhere." -msgstr "" - diff --git a/docs/pot/about-security-groups.pot b/docs/pot/about-security-groups.pot deleted file mode 100644 index 2acdbb4b484..00000000000 --- a/docs/pot/about-security-groups.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Security Groups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Security groups provide a way to isolate traffic to VMs. A security group is a group of VMs that filter their incoming and outgoing traffic according to a set of rules, called ingress and egress rules. These rules filter network traffic according to the IP address that is attempting to communicate with the VM. Security groups are particularly useful in zones that use basic networking, because there is a single guest network for all guest VMs. In &PRODUCT; 3.0.3 - 3.0.5, security groups are supported only in zones that use basic networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a zone that uses advanced networking, you can instead define multiple guest networks to isolate traffic to VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each &PRODUCT; account comes with a default security group that denies all inbound traffic and allows all outbound traffic. The default security group can be modified so that all new VMs inherit some other desired set of rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Any &PRODUCT; user can set up any number of additional security groups. When a new VM is launched, it is assigned to the default security group unless another user-defined security group is specified. A VM can be a member of any number of security groups. Once a VM is assigned to a security group, it remains in that group for its entire lifetime; you can not move a running VM from one security group to another." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can modify a security group by deleting or adding any number of ingress and egress rules. When you do, the new rules apply to all VMs in the group, whether running or stopped." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If no ingress rules are specified, then no traffic will be allowed in, except for responses to any traffic that has been allowed out through an egress rule." -msgstr "" - diff --git a/docs/pot/about-virtual-networks.pot b/docs/pot/about-virtual-networks.pot deleted file mode 100644 index 7d396bf51e6..00000000000 --- a/docs/pot/about-virtual-networks.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Virtual Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A virtual network is a logical construct that enables multi-tenancy on a single physical network. In &PRODUCT; a virtual network can be shared or isolated." -msgstr "" - diff --git a/docs/pot/about-working-with-vms.pot b/docs/pot/about-working-with-vms.pot deleted file mode 100644 index 1b566121ba8..00000000000 --- a/docs/pot/about-working-with-vms.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Working with Virtual Machines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides administrators with complete control over the lifecycle of all guest VMs executing in the cloud. &PRODUCT; provides several guest management operations for end users and administrators. VMs may be stopped, started, rebooted, and destroyed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest VMs have a name and group. VM names and groups are opaque to &PRODUCT; and are available for end users to organize their VMs. Each VM can have three names for use in different contexts. Only two of these names can be controlled by the user:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Instance name – a unique, immutable ID that is generated by &PRODUCT; and can not be modified by the user. This name conforms to the requirements in IETF RFC 1123." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Display name – the name displayed in the &PRODUCT; web UI. Can be set by the user. Defaults to instance name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name – host name that the DHCP server assigns to the VM. Can be set by the user. Defaults to instance name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest VMs can be configured to be Highly Available (HA). An HA-enabled VM is monitored by the system. If the system detects that the VM is down, it will attempt to restart the VM, possibly on a different host. For more information, see HA-Enabled Virtual Machines on" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each new VM is allocated one public IP address. When the VM is started, &PRODUCT; automatically creates a static NAT between this public IP address and the private IP address of the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If elastic IP is in use (with the NetScaler load balancer), the IP address initially allocated to the new VM is not marked as elastic. The user must replace the automatically configured IP with a specifically acquired elastic IP, and set up the static NAT mapping between this new IP and the guest VM’s private IP. The VM’s original IP address is then released and returned to the pool of available public IPs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; cannot distinguish a guest VM that was shut down by the user (such as with the “shutdown†command in Linux) from a VM that shut down unexpectedly. If an HA-enabled VM is shut down from inside the VM, &PRODUCT; will restart it. To shut down an HA-enabled VM, you must go through the &PRODUCT; UI or API." -msgstr "" - diff --git a/docs/pot/about-zones.pot b/docs/pot/about-zones.pot deleted file mode 100644 index c9111a7e484..00000000000 --- a/docs/pot/about-zones.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Zones" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A zone is the largest organizational unit within a &PRODUCT; deployment. A zone typically corresponds to a single datacenter, although it is permissible to have multiple zones in a datacenter. The benefit of organizing infrastructure into zones is to provide physical isolation and redundancy. For example, each zone can have its own power supply and network uplink, and the zones can be widely separated geographically (though this is not required)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A zone consists of:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "One or more pods. Each pod contains one or more clusters of hosts and one or more primary storage servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage, which is shared by all the pods in the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zones are visible to the end user. When a user starts a guest VM, the user must select a zone for their guest. Users might also be required to copy their private templates to additional zones to enable creation of guest VMs using their templates in those zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zones can be public or private. Public zones are visible to all users. This means that any user may create a guest in that zone. Private zones are reserved for a specific domain. Only users in that domain or its subdomains may create guests in that zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts in the same zone are directly accessible to each other without having to go through a firewall. Hosts in different zones can access each other through statically configured VPN tunnels." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For each zone, the administrator must decide the following." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How many pods to place in a zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How many clusters to place in each pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How many hosts to place in each cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How many primary storage servers to place in each cluster and total capacity for the storage servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How much secondary storage to deploy in a zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you add a new zone, you will be prompted to configure the zone’s physical network and add the first pod, cluster, host, primary storage, and secondary storage." -msgstr "" - diff --git a/docs/pot/accept-membership-invite.pot b/docs/pot/accept-membership-invite.pot deleted file mode 100644 index 977962aa115..00000000000 --- a/docs/pot/accept-membership-invite.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Accepting a Membership Invitation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have received an invitation to join a &PRODUCT; project, and you want to accept the invitation, follow these steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Invitations." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you see the invitation listed onscreen, click the Accept button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Invitations listed on screen were sent to you using your &PRODUCT; account name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you received an email invitation, click the Enter Token button, and provide the project ID and unique ID code (token) from the email." -msgstr "" - diff --git a/docs/pot/accessing-vms.pot b/docs/pot/accessing-vms.pot deleted file mode 100644 index 2ca92867a87..00000000000 --- a/docs/pot/accessing-vms.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Accessing VMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Any user can access their own virtual machines. The administrator can access all VMs running in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To access a VM through the &PRODUCT; UI:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Instances, then click the name of a running VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the View Console button ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To access a VM directly over the network:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VM must have some port open to incoming traffic. For example, in a basic zone, a new VM might be assigned to a security group which allows incoming traffic. This depends on what security group you picked when creating the VM. In other cases, you can open a port by setting up a port forwarding policy. See IP Forwarding and Firewalling." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If a port is open but you can not access the VM using ssh, it’s possible that ssh is not already enabled on the VM. This will depend on whether ssh is enabled in the template you picked when creating the VM. Access the VM through the &PRODUCT; UI and enable ssh on the machine using the commands for the VM’s operating system." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the network has an external firewall device, you will need to create a firewall rule to allow access. See IP Forwarding and Firewalling." -msgstr "" - diff --git a/docs/pot/accounts-users-domains.pot b/docs/pot/accounts-users-domains.pot deleted file mode 100644 index 542934b8c62..00000000000 --- a/docs/pot/accounts-users-domains.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Accounts, Users, and Domains" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Accounts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An account typically represents a customer of the service provider or a department in a large organization. Multiple users can exist in an account." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Domains" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Accounts are grouped by domains. Domains usually contain multiple accounts that have some logical relationship to each other and a set of delegated administrators with some authority over the domain and its subdomains. For example, a service provider with several resellers could create a domain for each reseller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For each account created, the Cloud installation creates three different types of user accounts: root administrator, domain administrator, and user." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Users are like aliases in the account. Users in the same account are not isolated from each other, but they are isolated from users in other accounts. Most installations need not surface the notion of users; they just have one user per account. The same user cannot belong to multiple accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username is unique in a domain across accounts in that domain. The same username can exist in other domains, including sub-domains. Domain name can repeat only if the full pathname from root is unique. For example, you can create root/d1, as well as root/foo/d1, and root/sales/d1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Administrators are accounts with special privileges in the system. There may be multiple administrators in the system. Administrators can create or delete other administrators, and change the password for any user in the system." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Domain Administrators" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Domain administrators can perform administrative operations for users who belong to that domain. Domain administrators do not have visibility into physical servers or other domains." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Root Administrator" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Root administrators have complete access to the system, including managing templates, service offerings, customer care administrators, and domains" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The resources belong to the account, not individual users in that account. For example, billing, resource limits, and so on are maintained by the account, not the users. A user can operate on any resource in the account provided the user has privileges for that operation. The privileges are determined by the role." -msgstr "" - diff --git a/docs/pot/accounts.pot b/docs/pot/accounts.pot deleted file mode 100644 index 1c37079b7e8..00000000000 --- a/docs/pot/accounts.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Accounts" -msgstr "" - diff --git a/docs/pot/acquire-new-ip-address.pot b/docs/pot/acquire-new-ip-address.pot deleted file mode 100644 index 16b58478319..00000000000 --- a/docs/pot/acquire-new-ip-address.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Acquiring a New IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network where you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Acquire New IP, and click Yes in the confirmation dialog." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You are prompted for confirmation because, typically, IP addresses are a limited resource. Within a few moments, the new IP address should appear with the state Allocated. You can now use the IP address in port forwarding or static NAT rules." -msgstr "" - diff --git a/docs/pot/acquire-new-ip-for-vpc.pot b/docs/pot/acquire-new-ip-for-vpc.pot deleted file mode 100644 index b11b61eb072..00000000000 --- a/docs/pot/acquire-new-ip-for-vpc.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Acquiring a New IP Address for a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you acquire an IP address, all IP addresses are allocated to VPC, not to the guest networks within the VPC. The IPs are associated to the guest network only when the first port-forwarding, load balancing, or Static NAT rule is created for the IP or the network. IP can't be associated to more than one network at a time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP Addresses page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Acquire New IP, and click Yes in the confirmation dialog." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You are prompted for confirmation because, typically, IP addresses are a limited resource. Within a few moments, the new IP address should appear with the state Allocated. You can now use the IP address in port forwarding, load balancing, and static NAT rules." -msgstr "" - diff --git a/docs/pot/add-additional-guest-network.pot b/docs/pot/add-additional-guest-network.pot deleted file mode 100644 index a673cbc9e07..00000000000 --- a/docs/pot/add-additional-guest-network.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding an Additional Guest Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add guest network. Provide the following information:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: The name of the network. This will be user-visible." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Display Text: The description of the network. This will be user-visible." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone. The name of the zone this network applies to. Each zone is a broadcast domain, and therefore each zone has a different IP range for the guest network. The administrator must configure the IP range for each zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network offering: If the administrator has configured multiple network offerings, select the one you want to use for this network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Gateway: The gateway that the guests should use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Netmask: The netmask in use on the subnet the guests will use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create." -msgstr "" - diff --git a/docs/pot/add-clusters-kvm-xenserver.pot b/docs/pot/add-clusters-kvm-xenserver.pot deleted file mode 100644 index 97fb78c96ab..00000000000 --- a/docs/pot/add-clusters-kvm-xenserver.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add Cluster: KVM or XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These steps assume you have already installed the hypervisor on the hosts and logged in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Clusters node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the hypervisor type for this cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the pod in which you want to create the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enter a name for the cluster. This can be text of your choosing and is not used by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/add-clusters-ovm.pot b/docs/pot/add-clusters-ovm.pot deleted file mode 100644 index 25ad8ce2c01..00000000000 --- a/docs/pot/add-clusters-ovm.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add Cluster: OVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add a Cluster of hosts that run Oracle VM (OVM):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add a companion non-OVM cluster to the Pod. This cluster provides an environment where the &PRODUCT; System VMs can run. You should have already installed a non-OVM hypervisor on at least one Host to prepare for this step. Depending on which hypervisor you used:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For VMWare, follow the steps in Add Cluster: vSphere. When finished, return here and continue with the next step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM or XenServer, follow the steps in . When finished, return here and continue with the next step" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab. In the Pods node, click View All. Select the same pod you used in step 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View Clusters, then click Add Cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Add Cluster dialog is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Hypervisor, choose OVM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Cluster, enter a name for the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/add-clusters-vsphere.pot b/docs/pot/add-clusters-vsphere.pot deleted file mode 100644 index 2c4ebe4e3aa..00000000000 --- a/docs/pot/add-clusters-vsphere.pot +++ /dev/null @@ -1,125 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add Cluster: vSphere" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host management for vSphere is done through a combination of vCenter and the &PRODUCT; admin UI. &PRODUCT; requires that all hosts be in a &PRODUCT; cluster, but the cluster may consist of a single host. As an administrator you must decide if you would like to use clusters of one host or of multiple hosts. Clusters of multiple hosts allow for features like live migration. Clusters also require shared storage such as NFS or iSCSI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. Follow these requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not put more than 8 hosts in a vSphere cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the hypervisor hosts do not have any VMs already running before you add them to &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add a vSphere cluster to &PRODUCT;:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create the cluster of hosts in vCenter. Follow the vCenter instructions to do this. You will create a cluster that looks something like this in vCenter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab, and click View All on Pods. Choose the pod to which you want to add the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View Clusters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Hypervisor, choose VMware." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information in the dialog. The fields below make reference to values from vCenter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster Name. Enter the name of the cluster you created in vCenter. For example, \"cloud.cluster.2.2.1\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Host. Enter the hostname or IP address of the vCenter server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Username. Enter the username that &PRODUCT; should use to connect to vCenter. This user must have all administrative privileges." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Password. Enter the password for the user named above" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Datacenter. Enter the vCenter datacenter that the cluster is in. For example, \"cloud.dc.VM\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There might be a slight delay while the cluster is provisioned. It will automatically display in the UI" -msgstr "" - diff --git a/docs/pot/add-gateway-vpc.pot b/docs/pot/add-gateway-vpc.pot deleted file mode 100644 index a73b92ad54d..00000000000 --- a/docs/pot/add-gateway-vpc.pot +++ /dev/null @@ -1,145 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Private Gateway to a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A private gateway can be added by the root admin only. The VPC private network has 1:1 relationship with the NIC of the physical network. No gateways with duplicated VLAN and IP are allowed in the same data center." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to configure load balancing rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Private Gateways." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Gateways page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add new gateway:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Physical Network: The physical network you have created in the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address: The IP address associated with the VPC gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway: The gateway through which the traffic is routed to and from the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask: The netmask associated with the VPC gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN: The VLAN associated with the VPC gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new gateway appears in the list. You can repeat these steps to add more gateway for this VPC." -msgstr "" - diff --git a/docs/pot/add-ingress-egress-rules.pot b/docs/pot/add-ingress-egress-rules.pot deleted file mode 100644 index c0d3e4eef12..00000000000 --- a/docs/pot/add-ingress-egress-rules.pot +++ /dev/null @@ -1,125 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Ingress and Egress Rules to a Security Group" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select view, choose Security Groups, then click the security group you want ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add an ingress rule, click the Ingress Rules tab and fill out the following fields to specify what network traffic is allowed into VM instances in this security group. If no ingress rules are specified, then no traffic will be allowed in, except for responses to any traffic that has been allowed out through an egress rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add by CIDR/Account. Indicate whether the source of the traffic will be defined by IP address (CIDR) or an existing security group in a &PRODUCT; account (Account). Choose Account if you want to allow incoming traffic from all VMs in another security group" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. The networking protocol that sources will use to send traffic to the security group. TCP and UDP are typically used for data exchange and end-user communications. ICMP is typically used to send error messages or network monitoring data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start Port, End Port. (TCP, UDP only) A range of listening ports that are the destination for the incoming traffic. If you are opening a single port, use the same number in both fields." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ICMP Type, ICMP Code. (ICMP only) The type of message and error code that will be accepted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CIDR. (Add by CIDR only) To accept only traffic from IP addresses within a particular address block, enter a CIDR or a comma-separated list of CIDRs. The CIDR is the base IP address of the incoming traffic. For example, 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Account, Security Group. (Add by Account only) To accept only traffic from another security group, enter the &PRODUCT; account and name of a security group that has already been defined in that account. To allow traffic between VMs within the security group you are editing now, enter the same name you used in step 7." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following example allows inbound HTTP access from anywhere:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add an egress rule, click the Egress Rules tab and fill out the following fields to specify what type of traffic is allowed to be sent out of VM instances in this security group. If no egress rules are specified, then all traffic will be allowed out. Once egress rules are specified, the following types of traffic are allowed out: traffic specified in egress rules; queries to DNS and DHCP servers; and responses to any traffic that has been allowed in through an ingress rule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add by CIDR/Account. Indicate whether the destination of the traffic will be defined by IP address (CIDR) or an existing security group in a &PRODUCT; account (Account). Choose Account if you want to allow outgoing traffic to all VMs in another security group." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. The networking protocol that VMs will use to send outgoing traffic. TCP and UDP are typically used for data exchange and end-user communications. ICMP is typically used to send error messages or network monitoring data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start Port, End Port. (TCP, UDP only) A range of listening ports that are the destination for the outgoing traffic. If you are opening a single port, use the same number in both fields." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ICMP Type, ICMP Code. (ICMP only) The type of message and error code that will be sent" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CIDR. (Add by CIDR only) To send traffic only to IP addresses within a particular address block, enter a CIDR or a comma-separated list of CIDRs. The CIDR is the base IP address of the destination. For example, 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Account, Security Group. (Add by Account only) To allow traffic to be sent to another security group, enter the &PRODUCT; account and name of a security group that has already been defined in that account. To allow traffic between VMs within the security group you are editing now, enter its name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/add-iso.pot b/docs/pot/add-iso.pot deleted file mode 100644 index eb1e630c7e0..00000000000 --- a/docs/pot/add-iso.pot +++ /dev/null @@ -1,215 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding an ISO" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To make additional operating system or other software available for use with guest VMs, you can add an ISO. The ISO is typically thought of as an operating system image, but you can also add ISOs for other types of software, such as desktop applications that you want to be installed as part of a template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose ISOs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add ISO." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Add ISO screen, provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: Short name for the ISO image. For example, CentOS 6.2 64-bit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: Display test for the ISO image. For example, CentOS 6.2 64-bit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL: The URL that hosts the ISO image. The Management Server must be able to access this location via HTTP. If needed you can place the ISO image directly on the Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone: Choose the zone where you want the ISO to be available, or All Zones to make it available throughout &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Bootable: Whether or not a guest could boot off this ISO image. For example, a CentOS ISO is bootable, a Microsoft Office ISO is not bootable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type: This helps &PRODUCT; and the hypervisor perform certain operations and make assumptions that improve the performance of the guest. Select one of the following." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the operating system of your desired ISO image is listed, choose it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the OS Type of the ISO is not listed or if the ISO is not bootable, choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(XenServer only) If you want to boot from this ISO in PV mode, choose Other PV (32-bit) or Other PV (64-bit)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(KVM only) If you choose an OS that is PV-enabled, the VMs created from this ISO will have a SCSI (virtio) root disk. If the OS is not PV-enabled, the VMs will have an IDE root disk. The PV-enabled types are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fedora 13" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fedora 12" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fedora 11" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fedora 10" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fedora 9" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Other PV" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Debian GNU/Linux" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CentOS 5.3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CentOS 5.4" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CentOS 5.5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Red Hat Enterprise Linux 5.3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Red Hat Enterprise Linux 5.4" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Red Hat Enterprise Linux 5.5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Red Hat Enterprise Linux 6" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It is not recommended to choose an older version of the OS than the version in the image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will usually not work. In these cases, choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extractable: Choose Yes if the ISO should be available for extraction." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public: Choose Yes if this ISO should be available to other users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Featured: Choose Yes if you would like this ISO to be more prominent for users to select. The ISO will appear in the Featured ISOs list. Only an administrator can make an ISO Featured." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server will download the ISO. Depending on the size of the ISO, this may take a long time. The ISO status column will display Ready once it has been successfully downloaded into secondary storage. Clicking Refresh updates the download percentage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Important: Wait for the ISO to finish downloading. If you move on to the next task and try to use the ISO right away, it will appear to fail. The entire ISO must be available before &PRODUCT; can work with it." -msgstr "" - diff --git a/docs/pot/add-load-balancer-rule.pot b/docs/pot/add-load-balancer-rule.pot deleted file mode 100644 index b2f82e0dd6c..00000000000 --- a/docs/pot/add-load-balancer-rule.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Load Balancer Rule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network where you want to load balance the traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address for which you want to create the rule, then click the Configuration tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Load Balancing node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a Basic zone, you can also create a load balancing rule without acquiring or selecting an IP address. &PRODUCT; internally assign an IP when you create the load balancing rule, which is listed in the IP Addresses page when the rule is created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To do that, select the name of the network, then click Add Load Balancer tab. Continue with ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fill in the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: A name for the load balancer rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Port: The port receiving incoming traffic to be balanced." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Port: The port that the VMs will use to receive the traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Algorithm: Choose the load balancing algorithm you want &PRODUCT; to use. &PRODUCT; supports a variety of well-known algorithms. If you are not familiar with these choices, you will find plenty of information about them on the Internet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stickiness: (Optional) Click Configure and choose the algorithm for the stickiness policy. See Sticky Session Policies for Load Balancer Rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add VMs, then select two or more VMs that will divide the load of incoming traffic, and click Apply." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new load balancer rule appears in the list. You can repeat these steps to add more load balancer rules for this IP address." -msgstr "" - diff --git a/docs/pot/add-loadbalancer-rule-vpc.pot b/docs/pot/add-loadbalancer-rule-vpc.pot deleted file mode 100644 index 15b5d76a5c2..00000000000 --- a/docs/pot/add-loadbalancer-rule-vpc.pot +++ /dev/null @@ -1,180 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Load Balancing Rules on a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A &PRODUCT; user or administrator may create load balancing rules that balance traffic received at a public IP to one or more VMs that belong to a network tier that provides load balancing service in a VPC. A user creates a rule, specifies an algorithm, and assigns the rule to a set of VMs within a VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to configure load balancing rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP Addresses page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address for which you want to create the rule, then click the Configuration tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Load Balancing node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the tier to which you want to apply the rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a VPC, the load balancing service is supported only on a single tier." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: A name for the load balancer rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Port: The port that receives the incoming traffic to be balanced." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Port: The port that the VMs will use to receive the traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Algorithm. Choose the load balancing algorithm you want &PRODUCT; to use. &PRODUCT; supports the following well-known algorithms:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Round-robin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Least connections" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stickiness. (Optional) Click Configure and choose the algorithm for the stickiness policy. See Sticky Session Policies for Load Balancer Rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add VMs: Click Add VMs, then select two or more VMs that will divide the load of incoming traffic, and click Apply." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new load balancing rule appears in the list. You can repeat these steps to add more load balancing rules for this IP address." -msgstr "" - diff --git a/docs/pot/add-members-to-projects.pot b/docs/pot/add-members-to-projects.pot deleted file mode 100644 index f3a581792de..00000000000 --- a/docs/pot/add-members-to-projects.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Members to a Project" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New members can be added to a project by the project’s administrator, the domain administrator of the domain where the project resides or any parent domain, or the &PRODUCT; root administrator. There are two ways to add members in &PRODUCT;, but only one way is enabled at a time:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If invitations have been enabled, you can send invitations to new members." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If invitations are not enabled, you can add members directly through the UI." -msgstr "" - diff --git a/docs/pot/add-more-clusters.pot b/docs/pot/add-more-clusters.pot deleted file mode 100644 index d2d1d8ddafe..00000000000 --- a/docs/pot/add-more-clusters.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add More Clusters (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to tell &PRODUCT; about the hosts that it will manage. Hosts exist inside clusters, so before you begin adding hosts to the cloud, you must add at least one cluster." -msgstr "" - diff --git a/docs/pot/add-password-management-to-templates.pot b/docs/pot/add-password-management-to-templates.pot deleted file mode 100644 index 0a0df6f65f6..00000000000 --- a/docs/pot/add-password-management-to-templates.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Password Management to Your Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides an optional password reset feature that allows users to set a temporary admin or root password as well as reset the existing admin or root password from the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable the Reset Password feature, you will need to download an additional script to patch your template. When you later upload the template into &PRODUCT;, you can specify whether reset admin/root password feature should be enabled for this template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The password management feature works always resets the account password on instance boot. The script does an HTTP call to the virtual router to retrieve the account password that should be set. As long as the virtual router is accessible the guest will have access to the account password that should be used. When the user requests a password reset the management server generates and sends a new password to the virtual router for the account. Thus an instance reboot is necessary to effect any password changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the script is unable to contact the virtual router during instance boot it will not set the password but boot will continue normally." -msgstr "" - diff --git a/docs/pot/add-portforward-rule-vpc.pot b/docs/pot/add-portforward-rule-vpc.pot deleted file mode 100644 index 1700043e3b6..00000000000 --- a/docs/pot/add-portforward-rule-vpc.pot +++ /dev/null @@ -1,155 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Port Forwarding Rule on a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose an existing IP address or acquire a new IP address. Click the name of the IP address in the list." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP Addresses page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address for which you want to create the rule, then click the Configuration tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Port Forwarding node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the tier to which you want to apply the rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Port: The port to which public traffic will be addressed on the IP address you acquired in the previous step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Port: The port on which the instance is listening for forwarded public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol: The communication protocol in use between the two ports." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UDP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add VM: Click Add VM. Select the name of the instance to which this rule applies, and click Apply." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can test the rule by opening an ssh session to the instance." -msgstr "" - diff --git a/docs/pot/add-primary-storage.pot b/docs/pot/add-primary-storage.pot deleted file mode 100644 index bac6723efee..00000000000 --- a/docs/pot/add-primary-storage.pot +++ /dev/null @@ -1,145 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that nothing stored on the server. Adding the server to CloudStack will destroy any existing data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you create a new zone, the first primary storage is added as part of that procedure. You can add primary storage servers at any time, such as when adding a new cluster or adding more servers to an existing cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Primary Storage node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Primary Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information in the dialog. The information required varies depending on your choice in Protocol." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pod. The pod for the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster. The cluster for the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the storage device" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS or SharedMountPoint. For vSphere choose either VMFS (iSCSI or FiberChannel) or NFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server (for NFS, iSCSI, or PreSetup). The IP address or DNS name of the storage device" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server (for VMFS). The IP address or DNS name of the vCenter server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for NFS). In NFS this is the exported path from the server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for VMFS). In vSphere this is a combination of the datacenter name and the datastore name. The format is \"/\" datacenter name \"/\" datastore name. For example, \"/cloud.dc.VM/cluster1datastore\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for SharedMountPoint). With KVM this is the path on each host that is where this primary storage is mounted. For example, \"/mnt/primary\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SR Name-Label (for PreSetup). Enter the name-label of the SR that has been set up outside &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Target IQN (for iSCSI). In iSCSI this is the IQN of the target. For example, iqn.1986-03.com.sun:02:01ec9bb549-1271378984" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Lun # (for iSCSI). In iSCSI this is the LUN number. For example, 3." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags (optional). The comma-separated list of tags for this storage device. It should be an equivalent set or superset of the tags on your disk offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The tag sets on primary storage across clusters in a Zone must be identical. For example, if cluster A provides primary storage that has tags T1 and T2, all other clusters in the Zone must also provide primary storage that has tags T1 and T2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/add-projects-members-from-ui.pot b/docs/pot/add-projects-members-from-ui.pot deleted file mode 100644 index 26515261a2b..00000000000 --- a/docs/pot/add-projects-members-from-ui.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Project Members From the UI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The steps below tell how to add a new member to a project if the invitations feature is not enabled in the cloud. If the invitations feature is enabled cloud,as described in , use the procedure in ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the project you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Accounts tab. The current members of the project are listed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type the account name of the new member you want to add, and click Add Account. You can add only people who have an account in this cloud and within the same domain as the project." -msgstr "" - diff --git a/docs/pot/add-secondary-storage.pot b/docs/pot/add-secondary-storage.pot deleted file mode 100644 index d75c1dc56cc..00000000000 --- a/docs/pot/add-secondary-storage.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure there is nothing stored on the server. Adding the server to CloudStack will destroy any existing data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you create a new zone, the first secondary storage is added as part of that procedure. You can add secondary storage servers at any time to add more servers to an existing zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are going to use Swift for cloud-wide secondary storage, you must add the Swift storage to &PRODUCT; before you add the local zone secondary storage servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To prepare for local zone secondary storage, you should have created and mounted an NFS share during Management Server installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you prepared the system VM template during Management Server installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4. Now that the secondary storage server for per-zone storage is prepared, add it to &PRODUCT;. Secondary storage is added as part of the procedure for adding a new zone." -msgstr "" - diff --git a/docs/pot/add-security-group.pot b/docs/pot/add-security-group.pot deleted file mode 100644 index 38a117412a9..00000000000 --- a/docs/pot/add-security-group.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Security Group" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A user or administrator can define a new security group." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select view, choose Security Groups." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Security Group." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide a name and description." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new security group appears in the Security Groups Details tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To make the security group useful, continue to Adding Ingress and Egress Rules to a Security Group." -msgstr "" - diff --git a/docs/pot/add-tier.pot b/docs/pot/add-tier.pot deleted file mode 100644 index f51b6eae721..00000000000 --- a/docs/pot/add-tier.pot +++ /dev/null @@ -1,125 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding Tiers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tiers are distinct locations within a VPC that act as isolated networks, which do not have access to other tiers by default. Tiers are set up on different VLANs that can communicate with each other by using a virtual router. Tiers provide inexpensive, low latency network connectivity to other tiers within the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPC that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The end users can see their own VPCs, while root and domain admin can see any VPC they are authorized to see." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC for which you want to set up tiers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Add new tier dialog is displayed, as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have already created tiers, the VPC diagram is displayed. Click Create Tier to add a new tier." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the fields are mandatory." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: A unique name for the tier you create." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Offering: The following default network offerings are listed: DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, DefaultIsolatedNetworkOfferingForVpcNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a VPC, only one tier can be created by using LB-enabled network offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway: The gateway for the tier you create. Ensure that the gateway is within the Super CIDR range that you specified while creating the VPC, and is not overlapped with the CIDR of any existing tier within the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask: The netmask for the tier you create." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, if the VPC CIDR is 10.0.0.0/16 and the network tier CIDR is 10.0.1.0/24, the gateway of the tier is 10.0.1.1, and the netmask of the tier is 255.255.255.0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Continue with configuring access control list for the tier." -msgstr "" - diff --git a/docs/pot/add-vm-to-tier.pot b/docs/pot/add-vm-to-tier.pot deleted file mode 100644 index 0b997934bb8..00000000000 --- a/docs/pot/add-vm-to-tier.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Deploying VMs to the Tier" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Add VM button of the tier for which you want to add a VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Add Instance page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Follow the on-screen instruction to add an instance. For information on adding an instance, see Adding Instances section in the Installation Guide." -msgstr "" - diff --git a/docs/pot/add-vpc.pot b/docs/pot/add-vpc.pot deleted file mode 100644 index 626b41369bb..00000000000 --- a/docs/pot/add-vpc.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Virtual Private Cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When creating the VPC, you simply provide the zone and a set of IP addresses for the VPC network address space. You specify this set of addresses in the form of a Classless Inter-Domain Routing (CIDR) block." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add VPC. The Add VPC page is displayed as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: A short name for the VPC that you are creating." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: A brief description of the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone: Choose the zone where you want the VPC to be available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Super CIDR for Guest Networks: Defines the CIDR range for all the tiers (guest networks) within a VPC. When you create a tier, ensure that its CIDR is within the Super CIDR value you enter. The CIDR must be RFC1918 compliant." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS domain for Guest Networks: If you want to assign a special domain name, specify the DNS suffix. This parameter is applied to all the tiers within the VPC. That implies, all the tiers you create in the VPC belong to the same DNS domain. If the parameter is not specified, a DNS domain name is generated automatically." -msgstr "" - diff --git a/docs/pot/added-API-commands-4-0.pot b/docs/pot/added-API-commands-4-0.pot deleted file mode 100644 index 1a477c316a2..00000000000 --- a/docs/pot/added-API-commands-4-0.pot +++ /dev/null @@ -1,259 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Added API Commands in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createCounter (Adds metric counter)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteCounter (Deletes a counter)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listCounters (List the counters)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createCondition (Creates a condition)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteCondition (Removes a condition)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listConditions (List Conditions for the specific user)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createTags. Add tags to one or more resources. Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=createTags\n" -"&resourceIds=1,10,12\n" -"&resourceType=userVm\n" -"&tags[0].key=region\n" -"&tags[0].value=canada\n" -"&tags[1].key=city\n" -"&tags[1].value=Toronto " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteTags. Remove tags from one or more resources. Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=deleteTags\n" -"&resourceIds=1,12\n" -"&resourceType=Snapshot\n" -"&tags[0].key=city " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTags (Show currently defined resource tags)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVPC (Creates a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCs (Lists VPCs)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVPC (Deletes a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVPC (Updates a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restartVPC (Restarts a VPC)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVPCOffering (Creates VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVPCOffering (Updates VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVPCOffering (Deletes VPC offering)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCOfferings (Lists VPC offerings)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createPrivateGateway (Creates a private gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPrivateGateways (List private gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deletePrivateGateway (Deletes a Private gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createNetworkACL (Creates a ACL rule the given network (the network has to belong to VPC))" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNetworkACL (Deletes a Network ACL)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkACLs (Lists all network ACLs)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createStaticRoute (Creates a static route)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteStaticRoute (Deletes a static route)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listStaticRoutes (Lists all static routes)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnCustomerGateway (Creates site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnGateway (Creates site to site vpn local gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVpnConnection (Create site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnCustomerGateway (Delete site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnGateway (Delete site to site vpn gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteVpnConnection (Delete site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVpnCustomerGateway (Update site to site vpn customer gateway)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "resetVpnConnection (Reset site to site vpn connection)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnCustomerGateways (Lists site to site vpn customer gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnGateways (Lists site 2 site vpn gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVpnConnections (Lists site to site vpn connection gateways)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enableCiscoNexusVSM (Enables Nexus 1000v dvSwitch in &PRODUCT;.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "disableCiscoNexusVSM (Disables Nexus 1000v dvSwitch in &PRODUCT;.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteCiscoNexusVSM (Deletes Nexus 1000v dvSwitch in &PRODUCT;.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listCiscoNexusVSMs (Lists the control VLAN ID, packet VLAN ID, and data VLAN ID, as well as the IP address of the Nexus 1000v dvSwitch.)" -msgstr "" - diff --git a/docs/pot/added-API-commands.pot b/docs/pot/added-API-commands.pot deleted file mode 100644 index fd6cb1e4a47..00000000000 --- a/docs/pot/added-API-commands.pot +++ /dev/null @@ -1,375 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:56\n" -"PO-Revision-Date: 2013-02-02T20:11:56\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Added API commands in 3.0" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Added in 3.0.2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "changeServiceForSystemVm" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Changes the service offering for a system VM (console proxy or secondary storage). The system VM must be in a \"Stopped\" state for this command to take effect." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Added in 3.0.1" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Added in 3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "assignVirtualMachine (Move a user VM to another user under same domain.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restoreVirtualMachine (Restore a VM to original template or specific snapshot)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createLBStickinessPolicy (Creates a Load Balancer stickiness policy )" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteLBStickinessPolicy (Deletes a LB stickiness policy.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLBStickinessPolicies (Lists LBStickiness policies.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ldapConfig (Configure the LDAP context for this site.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addSwift (Adds Swift.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSwifts (List Swift.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "migrateVolume (Migrate volume)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateStoragePool (Updates a storage pool.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "authorizeSecurityGroupEgress (Authorizes a particular egress rule for this security group)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "revokeSecurityGroupEgress (Deletes a particular egress rule from this security group)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createNetworkOffering (Creates a network offering.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNetworkOffering (Deletes a network offering.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createProject (Creates a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteProject (Deletes a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateProject (Updates a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "activateProject (Activates a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "suspendProject (Suspends a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjects (Lists projects and provides detailed information for listed projects)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addAccountToProject (Adds acoount to a project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteAccountFromProject (Deletes account from the project)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjectAccounts (Lists project's accounts)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjectInvitations (Lists an account's invitations to join projects)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateProjectInvitation (Accepts or declines project invitation)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteProjectInvitation (Deletes a project invitation)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateHypervisorCapabilities (Updates a hypervisor capabilities.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listHypervisorCapabilities (Lists all hypervisor capabilities.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createPhysicalNetwork (Creates a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deletePhysicalNetwork (Deletes a Physical Network.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPhysicalNetworks (Lists physical networks)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updatePhysicalNetwork (Updates a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSupportedNetworkServices (Lists all network services provided by &PRODUCT; or for the given Provider.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addNetworkServiceProvider (Adds a network serviceProvider to a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNetworkServiceProvider (Deletes a Network Service Provider.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkServiceProviders (Lists network serviceproviders for a given physical network.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateNetworkServiceProvider (Updates a network serviceProvider of a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addTrafficType (Adds traffic type to a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteTrafficType (Deletes traffic type of a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTrafficTypes (Lists traffic types of a given physical network.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateTrafficType (Updates traffic type of a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTrafficTypeImplementors (Lists implementors of implementor of a network traffic type or implementors of all network traffic types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createStorageNetworkIpRange (Creates a Storage network IP range.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteStorageNetworkIpRange (Deletes a storage network IP Range.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listStorageNetworkIpRange (List a storage network IP range.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateStorageNetworkIpRange (Update a Storage network IP range, only allowed when no IPs in this range have been allocated.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listUsageTypes (List Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addF5LoadBalancer (Adds a F5 BigIP load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "configureF5LoadBalancer (configures a F5 load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteF5LoadBalancer ( delete a F5 load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listF5LoadBalancers (lists F5 load balancer devices)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listF5LoadBalancerNetworks (lists network that are using a F5 load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addSrxFirewall (Adds a SRX firewall device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteSrxFirewall ( delete a SRX firewall device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSrxFirewalls (lists SRX firewall devices in a physical network)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSrxFirewallNetworks (lists network that are using SRX firewall device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addNetscalerLoadBalancer (Adds a netscaler load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNetscalerLoadBalancer ( delete a netscaler load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "configureNetscalerLoadBalancer (configures a netscaler load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetscalerLoadBalancers (lists netscaler load balancer devices)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetscalerLoadBalancerNetworks (lists network that are using a netscaler load balancer device)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVirtualRouterElement (Create a virtual router element.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "configureVirtualRouterElement (Configures a virtual router element.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVirtualRouterElements (Lists all available virtual router elements.)" -msgstr "" - diff --git a/docs/pot/added-error-codes.pot b/docs/pot/added-error-codes.pot deleted file mode 100644 index 5062c9ebbac..00000000000 --- a/docs/pot/added-error-codes.pot +++ /dev/null @@ -1,330 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Added &PRODUCT; Error Codes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can now find the &PRODUCT;-specific error code in the exception response for each type of exception. The following list of error codes is added to the new class named CSExceptionErrorCode. These codes are applicable in &PRODUCT; 3.0.3 and later versions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4250 : \"com.cloud.utils.exception.CloudRuntimeException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4255 : \"com.cloud.utils.exception.ExceptionUtil\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4260 : \"com.cloud.utils.exception.ExecutionException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4265 : \"com.cloud.utils.exception.HypervisorVersionChangedException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4270 : \"com.cloud.utils.exception.RuntimeCloudException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4275 : \"com.cloud.exception.CloudException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4280 : \"com.cloud.exception.AccountLimitException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4285 : \"com.cloud.exception.AgentUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4290 : \"com.cloud.exception.CloudAuthenticationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4295 : \"com.cloud.exception.CloudExecutionException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4300 : \"com.cloud.exception.ConcurrentOperationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4305 : \"com.cloud.exception.ConflictingNetworkSettingsException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4310 : \"com.cloud.exception.DiscoveredWithErrorException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4315 : \"com.cloud.exception.HAStateException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4320 : \"com.cloud.exception.InsufficientAddressCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4325 : \"com.cloud.exception.InsufficientCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4330 : \"com.cloud.exception.InsufficientNetworkCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4335 : \"com.cloud.exception.InsufficientServerCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4340 : \"com.cloud.exception.InsufficientStorageCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4345 : \"com.cloud.exception.InternalErrorException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4350 : \"com.cloud.exception.InvalidParameterValueException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4355 : \"com.cloud.exception.ManagementServerException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4360 : \"com.cloud.exception.NetworkRuleConflictException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4365 : \"com.cloud.exception.PermissionDeniedException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4370 : \"com.cloud.exception.ResourceAllocationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4375 : \"com.cloud.exception.ResourceInUseException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4380 : \"com.cloud.exception.ResourceUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4385 : \"com.cloud.exception.StorageUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4390 : \"com.cloud.exception.UnsupportedServiceException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4395 : \"com.cloud.exception.VirtualMachineMigrationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4400 : \"com.cloud.exception.AccountLimitException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4405 : \"com.cloud.exception.AgentUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4410 : \"com.cloud.exception.CloudAuthenticationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4415 : \"com.cloud.exception.CloudException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4420 : \"com.cloud.exception.CloudExecutionException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4425 : \"com.cloud.exception.ConcurrentOperationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4430 : \"com.cloud.exception.ConflictingNetworkSettingsException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4435 : \"com.cloud.exception.ConnectionException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4440 : \"com.cloud.exception.DiscoveredWithErrorException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4445 : \"com.cloud.exception.DiscoveryException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4450 : \"com.cloud.exception.HAStateException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4455 : \"com.cloud.exception.InsufficientAddressCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4460 : \"com.cloud.exception.InsufficientCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4465 : \"com.cloud.exception.InsufficientNetworkCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4470 : \"com.cloud.exception.InsufficientServerCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4475 : \"com.cloud.exception.InsufficientStorageCapacityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4480 : \"com.cloud.exception.InsufficientVirtualNetworkCapcityException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4485 : \"com.cloud.exception.InternalErrorException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4490 : \"com.cloud.exception.InvalidParameterValueException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4495 : \"com.cloud.exception.ManagementServerException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4500 : \"com.cloud.exception.NetworkRuleConflictException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4505 : \"com.cloud.exception.PermissionDeniedException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4510 : \"com.cloud.exception.ResourceAllocationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4515 : \"com.cloud.exception.ResourceInUseException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4520 : \"com.cloud.exception.ResourceUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4525 : \"com.cloud.exception.StorageUnavailableException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4530 : \"com.cloud.exception.UnsupportedServiceException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4535 : \"com.cloud.exception.VirtualMachineMigrationException\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "9999 : \"org.apache.cloudstack.api.ServerApiException\"" -msgstr "" - diff --git a/docs/pot/adding-IP-addresses-for-the-public-network.pot b/docs/pot/adding-IP-addresses-for-the-public-network.pot deleted file mode 100644 index ef9a609f5c1..00000000000 --- a/docs/pot/adding-IP-addresses-for-the-public-network.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding IP Addresses for the Public Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These instructions assume you have already logged in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the desired zone ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Network tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Public node of the diagram, click Configure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP Ranges tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway. The gateway in use for these IP addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask. The netmask associated with this IP range" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN. The VLAN that will be used for public traffic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start IP/End IP. A range of IP addresses that are assumed to be accessible from the Internet and will be allocated for access to guest networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/additional-installation-options.pot b/docs/pot/additional-installation-options.pot deleted file mode 100644 index 1e00eaed152..00000000000 --- a/docs/pot/additional-installation-options.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Additional Installation Options" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The next few sections describe &PRODUCT; features above and beyond the basic deployment options." -msgstr "" - diff --git a/docs/pot/admin-alerts.pot b/docs/pot/admin-alerts.pot deleted file mode 100644 index 7935a2b3655..00000000000 --- a/docs/pot/admin-alerts.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Administrator Alerts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The system provides alerts and events to help with the management of the cloud. Alerts are notices to an administrator, generally delivered by e-mail, notifying the administrator that an error has occurred in the cloud. Alert behavior is configurable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Events track all of the user and administrator actions in the cloud. For example, every guest VM start creates an associated event. Events are stored in the Management Server’s database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Emails will be sent to administrators under the following circumstances:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server cluster runs low on CPU, memory, or storage resources" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server loses heartbeat from a Host for more than 3 minutes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Host cluster runs low on CPU, memory, or storage resources" -msgstr "" - diff --git a/docs/pot/admin-guide.pot b/docs/pot/admin-guide.pot deleted file mode 100644 index b571c79a78c..00000000000 --- a/docs/pot/admin-guide.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Administrator Guide" -msgstr "" - diff --git a/docs/pot/adv-zone-topology-req.pot b/docs/pot/adv-zone-topology-req.pot deleted file mode 100644 index 2b0fd2d948c..00000000000 --- a/docs/pot/adv-zone-topology-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With Advanced Networking, separate subnets must be used for private and public networks." -msgstr "" - diff --git a/docs/pot/advanced-zone-configuration.pot b/docs/pot/advanced-zone-configuration.pot deleted file mode 100644 index 2c0543140b4..00000000000 --- a/docs/pot/advanced-zone-configuration.pot +++ /dev/null @@ -1,345 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you select Advanced in the Add Zone wizard and click Next, you will be asked to enter the following details. Then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. A name for the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS 1 and 2. These are DNS servers for use by guest VMs in the zone. These DNS servers will be accessed via the public network you will add later. The public IP addresses for the zone must have a route to the DNS server named here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Internal DNS 1 and Internal DNS 2. These are DNS servers for use by system VMs in the zone(these are VMs used by &PRODUCT; itself, such as virtual routers, console proxies,and Secondary Storage VMs.) These DNS servers will be accessed via the management traffic network interface of the System VMs. The private IP address you provide for the pods must have a route to the internal DNS server named here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Domain. (Optional) If you want to assign a special domain name to the guest VM network, specify the DNS suffix." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest CIDR. This is the CIDR that describes the IP addresses in use in the guest virtual networks in this zone. For example, 10.1.1.0/24. As a matter of good practice you should set different CIDRs for different zones. This will make it easier to set up VPNs between networks in different zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. (Introduced in version 3.0.1) Choose the hypervisor for the first cluster in the zone. You can add clusters with different hypervisors later, after you finish adding the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. A public zone is available to all users. A zone that is not public will be assigned to a particular domain. Only users in that domain will be allowed to create guest VMs in this zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose which traffic types will be carried by the physical network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The traffic types are management, public, guest, and storage traffic. For more information about the types, roll over the icons to display their tool tips, or see . This screen starts out with one network already configured. If you have multiple physical networks, you need to add more. Drag and drop traffic types onto a greyed-out network and it will become active. You can move the traffic icons from one network to another; for example, if the default traffic types shown for Network 1 do not match your actual setup, you can move them down. You can also change the network names if desired." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Introduced in version 3.0.1) Assign a network traffic label to each traffic type on each physical network. These labels must match the labels you have already defined on the hypervisor host. To assign each label, click the Edit button under the traffic type icon within each physical network. A popup dialog appears where you can type the label, then click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These traffic labels will be defined only for the hypervisor selected for the first cluster. For all other hypervisors, the labels can be configured after the zone is created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the IP range for public Internet traffic. Enter the following details, then click Add. If desired, you can repeat this step to add more public Internet IP ranges. When done, click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway. The gateway in use for these IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask. The netmask associated with this IP range." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN. The VLAN that will be used for public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start IP/End IP. A range of IP addresses that are assumed to be accessible from the Internet and will be allocated for access to guest networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new zone, &PRODUCT; adds the first pod for you. You can always add more pods later. For an overview of what a pod is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first pod, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pod Name. A name for the pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reserved system gateway. The gateway for the hosts in that pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reserved system netmask. The network prefix that defines the pod's subnet. Use CIDR notation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start/End Reserved System IP. The IP range in the management network that &PRODUCT; uses to manage various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP. For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify a range of VLAN IDs to carry guest traffic for each physical network (see VLAN Allocation Example ), then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new pod, &PRODUCT; adds the first cluster for you. You can always add more clusters later. For an overview of what a cluster is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first cluster, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. (Version 3.0.0 only; in 3.0.1, this field is read only) Choose the type of hypervisor software that all hosts in this cluster will run. If you choose VMware, additional fields appear so you can give information about a vSphere cluster. For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add Cluster: vSphere ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster name. Enter a name for the cluster. This can be text of your choosing and is not used by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new cluster, &PRODUCT; adds the first host for you. You can always add more hosts later. For an overview of what a host is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you can configure the host, you need to install the hypervisor software on the host. You will need to know which version of the hypervisor software version is supported by &PRODUCT; and what additional configuration is required to ensure the host will work with &PRODUCT;. To find these installation details, see:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix XenServer Installation for &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware vSphere Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first host, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Name. The DNS name or IP address of the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username. Usually root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password. This is the password for the user named above (from your XenServer or KVM install)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Tags. (Optional) Any labels that you use to categorize hosts for ease of maintenance. For example, you can set to the cloud's HA tag (set in the ha.tag global configuration parameter) if you want this host to be used only for VMs with the \"high availability\" feature enabled. For more information, see HA-Enabled Virtual Machines as well as HA for Hosts, both in the Administration Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new cluster, &PRODUCT; adds the first primary storage server for you. You can always add more servers later. For an overview of what primary storage is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first primary storage server, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint, CLVM, and RBD. For vSphere choose either VMFS (iSCSI or FiberChannel) or NFS. The remaining fields in the screen vary depending on what you choose here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server. The IP address or DNS name of the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path. The exported path from the server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags (optional). The comma-separated list of tags for this storage device. It should be an equivalent set or superset of the tags on your disk offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The tag sets on primary storage across clusters in a Zone must be identical. For example, if cluster A provides primary storage that has tags T1 and T2, all other clusters in the Zone must also provide primary storage that has tags T1 and T2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "iSCSI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Target IQN. The IQN of the target. For example, iqn.1986-03.com.sun:02:01ec9bb549-1271378984." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Lun. The LUN number. For example, 3." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "preSetup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SR Name-Label. Enter the name-label of the SR that has been set up outside &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SharedMountPoint" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path. The path on each host that is where this primary storage is mounted. For example, \"/mnt/primary\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server. The IP address or DNS name of the vCenter server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path. A combination of the datacenter name and the datastore name. The format is \"/\" datacenter name \"/\" datastore name. For example, \"/cloud.dc.VM/cluster1datastore\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new zone, &PRODUCT; adds the first secondary storage server for you. For an overview of what secondary storage is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you can fill out this screen, you need to prepare the secondary storage by setting up NFS shares and installing the latest &PRODUCT; System VM template. See Adding Secondary Storage :" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS Server. The IP address of the server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Launch." -msgstr "" - diff --git a/docs/pot/advanced-zone-guest-ip-addresses.pot b/docs/pot/advanced-zone-guest-ip-addresses.pot deleted file mode 100644 index c0ed786071b..00000000000 --- a/docs/pot/advanced-zone-guest-ip-addresses.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Guest IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When advanced networking is used, the administrator can create additional networks for use by the guests. These networks can span the zone and be available to all accounts, or they can be scoped to a single account, in which case only the named account may create guests that attach to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The administrator may provision thousands of these networks if desired." -msgstr "" - diff --git a/docs/pot/advanced-zone-network-traffic-types.pot b/docs/pot/advanced-zone-network-traffic-types.pot deleted file mode 100644 index 3b24de1289e..00000000000 --- a/docs/pot/advanced-zone-network-traffic-types.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Network Traffic Types" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When advanced networking is used, there can be multiple physical networks in the zone. Each physical network can carry one or more traffic types, and you need to let &PRODUCT; know which type of network traffic you want each network to carry. The traffic types in an advanced zone are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest. When end users run VMs, they generate guest traffic. The guest VMs communicate with each other over a network that can be referred to as the guest network. This network can be isolated or shared. In an isolated guest network, the administrator needs to reserve VLAN ranges to provide isolation for each &PRODUCT; account’s network (potentially a large number of VLANs). In a shared guest network, all guest VMs share a single network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management. When &PRODUCT;’s internal resources communicate with each other, they generate management traffic. This includes communication between hosts, system VMs (VMs used by &PRODUCT; to perform various tasks in the cloud), and any other component that communicates directly with the &PRODUCT; Management Server. You must configure the IP range for the system VMs to use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Public traffic is generated when VMs in the cloud access the Internet. Publicly accessible IPs must be allocated for this purpose. End users can use the &PRODUCT; UI to acquire these IPs to implement NAT between their guest network and the public network, as described in “Acquiring a New IP Address†in the Administration Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage. Traffic such as VM templates and snapshots, which is sent between the secondary storage VM and secondary storage servers. &PRODUCT; uses a separate Network Interface Controller (NIC) named storage NIC for storage network traffic. Use of a storage NIC that always operates on a high bandwidth network allows fast template and snapshot copying. You must configure the IP range to use for the storage network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These traffic types can each be on a separate physical network, or they can be combined with certain restrictions. When you use the Add Zone wizard in the UI to create a new zone, you are guided into making only valid choices." -msgstr "" - diff --git a/docs/pot/advanced-zone-physical-network-configuration.pot b/docs/pot/advanced-zone-physical-network-configuration.pot deleted file mode 100644 index ba06ced75ce..00000000000 --- a/docs/pot/advanced-zone-physical-network-configuration.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Physical Network Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Within a zone that uses advanced networking, you need to tell the Management Server how the physical network is set up to carry different kinds of traffic in isolation." -msgstr "" - diff --git a/docs/pot/advanced-zone-public-ip-addresses.pot b/docs/pot/advanced-zone-public-ip-addresses.pot deleted file mode 100644 index 54d3ef2933e..00000000000 --- a/docs/pot/advanced-zone-public-ip-addresses.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Advanced Zone Public IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When advanced networking is used, the administrator can create additional networks for use by the guests. These networks can span the zone and be available to all accounts, or they can be scoped to a single account, in which case only the named account may create guests that attach to these networks. The networks are defined by a VLAN ID, IP range, and gateway. The administrator may provision thousands of these networks if desired." -msgstr "" - diff --git a/docs/pot/alerts.pot b/docs/pot/alerts.pot deleted file mode 100644 index ea65bdc3ed6..00000000000 --- a/docs/pot/alerts.pot +++ /dev/null @@ -1,165 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Alerts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following is the list of alert type numbers. The current alerts can be found by calling listAlerts." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "MEMORY = 0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "CPU = 1" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "STORAGE =2" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "STORAGE_ALLOCATED = 3" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "PUBLIC_IP = 4" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "PRIVATE_IP = 5" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "HOST = 6" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "USERVM = 7" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DOMAIN_ROUTER = 8" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "CONSOLE_PROXY = 9" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ROUTING = 10// lost connection to default route (to the gateway)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "STORAGE_MISC = 11 // lost connection to default route (to the gateway)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "USAGE_SERVER = 12 // lost connection to default route (to the gateway)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "MANAGMENT_NODE = 13 // lost connection to default route (to the gateway)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DOMAIN_ROUTER_MIGRATE = 14" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "CONSOLE_PROXY_MIGRATE = 15" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "USERVM_MIGRATE = 16" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "VLAN = 17" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "SSVM = 18" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "USAGE_SERVER_RESULT = 19" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "STORAGE_DELETE = 20;" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "UPDATE_RESOURCE_COUNT = 21; //Generated when we fail to update the resource count" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "USAGE_SANITY_RESULT = 22;" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DIRECT_ATTACHED_PUBLIC_IP = 23;" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "LOCAL_STORAGE = 24;" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "RESOURCE_LIMIT_EXCEEDED = 25; //Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only" -msgstr "" - diff --git a/docs/pot/allocators.pot b/docs/pot/allocators.pot deleted file mode 100644 index 9394ee6806c..00000000000 --- a/docs/pot/allocators.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Allocators" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; enables administrators to write custom allocators that will choose the Host to place a new guest and the storage host from which to allocate guest virtual disk images." -msgstr "" - diff --git a/docs/pot/api-calls.pot b/docs/pot/api-calls.pot deleted file mode 100644 index b8e2c8fb6db..00000000000 --- a/docs/pot/api-calls.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Calling the &PRODUCT; API" -msgstr "" - diff --git a/docs/pot/api-overview.pot b/docs/pot/api-overview.pot deleted file mode 100644 index 51a559d7ffb..00000000000 --- a/docs/pot/api-overview.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; API is a low level API that has been used to implement the &PRODUCT; web UIs. It is also a good basis for implementing other popular APIs such as EC2/S3 and emerging DMTF standards." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many &PRODUCT; API calls are asynchronous. These will return a Job ID immediately when called. This Job ID can be used to query the status of the job later. Also, status calls on impacted resources will provide some indication of their state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The API has a REST-like query basis and returns results in XML or JSON." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See the Developer’s Guide and the API Reference." -msgstr "" - diff --git a/docs/pot/api-reference.pot b/docs/pot/api-reference.pot deleted file mode 100644 index 0a6a99ec4db..00000000000 --- a/docs/pot/api-reference.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "API Reference Documentation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can find all the API reference documentation at the below site:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://incubator.apache.org/cloudstack/docs/api/" -msgstr "" - diff --git a/docs/pot/asynchronous-commands-example.pot b/docs/pot/asynchronous-commands-example.pot deleted file mode 100644 index 92f8d322c09..00000000000 --- a/docs/pot/asynchronous-commands-example.pot +++ /dev/null @@ -1,166 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Example" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following shows an example of using an asynchronous command. Assume the API command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=deployVirtualMachine&zoneId=1&serviceOfferingId=1&diskOfferingId=1&templateId=1\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack will immediately return a job ID and any other additional data." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <deployvirtualmachineresponse> \n" -" <jobid>1</jobid>\n" -" <id>100</id>\n" -" </deployvirtualmachineresponse>\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Using the job ID, you can periodically poll for the results by using the queryAsyncJobResult command." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=queryAsyncJobResult&jobId=1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Three possible results could come from this query." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Job is still pending:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <queryasyncjobresult> \n" -" <jobid>1</jobid>\n" -" <jobstatus>0</jobstatus>\n" -" <jobprocstatus>1</jobprocstatus>\n" -" </queryasyncjobresult>\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Job has succeeded:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <queryasyncjobresultresponse cloud-stack-version=\"3.0.1.6\">\n" -" <jobid>1</jobid>\n" -" <jobstatus>1</jobstatus>\n" -" <jobprocstatus>0</jobprocstatus>\n" -" <jobresultcode>0</jobresultcode>\n" -" <jobresulttype>object</jobresulttype>\n" -" <jobresult>\n" -" <virtualmachine>\n" -" <id>450</id>\n" -" <name>i-2-450-VM</name>\n" -" <displayname>i-2-450-VM</displayname>\n" -" <account>admin</account>\n" -" <domainid>1</domainid>\n" -" <domain>ROOT</domain>\n" -" <created>2011-03-10T18:20:25-0800</created>\n" -" <state>Running</state>\n" -" <haenable>false</haenable>\n" -" <zoneid>1</zoneid>\n" -" <zonename>San Jose 1</zonename>\n" -" <hostid>2</hostid>\n" -" <hostname>905-13.sjc.lab.vmops.com</hostname>\n" -" <templateid>1</templateid>\n" -" <templatename>CentOS 5.3 64bit LAMP</templatename>\n" -" <templatedisplaytext>CentOS 5.3 64bit LAMP</templatedisplaytext>\n" -" <passwordenabled>false</passwordenabled>\n" -" <serviceofferingid>1</serviceofferingid>\n" -" <serviceofferingname>Small Instance</serviceofferingname>\n" -" <cpunumber>1</cpunumber>\n" -" <cpuspeed>500</cpuspeed>\n" -" <memory>512</memory>\n" -" <guestosid>12</guestosid>\n" -" <rootdeviceid>0</rootdeviceid>\n" -" <rootdevicetype>NetworkFilesystem</rootdevicetype>\n" -" <nic>\n" -" <id>561</id>\n" -" <networkid>205</networkid>\n" -" <netmask>255.255.255.0</netmask>\n" -" <gateway>10.1.1.1</gateway>\n" -" <ipaddress>10.1.1.225</ipaddress>\n" -" <isolationuri>vlan://295</isolationuri>\n" -" <broadcasturi>vlan://295</broadcasturi>\n" -" <traffictype>Guest</traffictype>\n" -" <type>Virtual</type>\n" -" <isdefault>true</isdefault>\n" -" </nic>\n" -" <hypervisor>XenServer</hypervisor>\n" -" </virtualmachine>\n" -" </jobresult>\n" -" </queryasyncjobresultresponse>\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Job has failed:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <queryasyncjobresult>\n" -" <jobid>1</jobid> \n" -" <jobstatus>2</jobstatus> \n" -" <jobprocstatus>0</jobprocstatus>\n" -" <jobresultcode>551</jobresultcode>\n" -" <jobresulttype>text</jobresulttype>\n" -" <jobresult>Unable to deploy virtual machine id = 100 due to not enough capacity</jobresult> \n" -" </queryasyncjobresult>\n" -" " -msgstr "" - diff --git a/docs/pot/asynchronous-commands.pot b/docs/pot/asynchronous-commands.pot deleted file mode 100644 index 44e2aacc8b5..00000000000 --- a/docs/pot/asynchronous-commands.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Asynchronous Commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asynchronous commands were introduced in &PRODUCT; 2.x. Commands are designated as asynchronous when they can potentially take a long period of time to complete such as creating a snapshot or disk volume. They differ from synchronous commands by the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "They are identified in the API Reference by an (A)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "They will immediately return a job ID to refer to the job that will be responsible in processing the command." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If executed as a \"create\" resource command, it will return the resource ID as well as the job ID." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can periodically check the status of the job by making a simple API call to the command, queryAsyncJobResult and passing in the job ID." -msgstr "" - diff --git a/docs/pot/attach-iso-to-vm.pot b/docs/pot/attach-iso-to-vm.pot deleted file mode 100644 index 791bd167243..00000000000 --- a/docs/pot/attach-iso-to-vm.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Attaching an ISO to a VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the virtual machine you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Attach ISO button iso.png: Depicts adding an iso image " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Attach ISO dialog box, select the desired ISO." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK" -msgstr "" - diff --git a/docs/pot/attaching-volume.pot b/docs/pot/attaching-volume.pot deleted file mode 100644 index 37336587d1f..00000000000 --- a/docs/pot/attaching-volume.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Attaching a Volume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can attach a volume to a guest VM to provide extra disk storage. Attach a volume when you first create a new volume, when you are moving an existing volume from one VM to another, or after you have migrated a volume from one storage pool to another." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4. Click the volume name in the Volumes list, then click the Attach Disk button AttachDiskButton.png: button to attach a volume " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Instance popup, choose the VM to which you want to attach the volume. You will only see instances to which you are allowed to attach volumes; for example, a user will see only instances created by that user, but the administrator will have more choices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the volume has been attached, you should be able to see it by clicking Instances, the instance name, and View Volumes." -msgstr "" - diff --git a/docs/pot/automatic-snapshot-creation-retention.pot b/docs/pot/automatic-snapshot-creation-retention.pot deleted file mode 100644 index a0443c4ad37..00000000000 --- a/docs/pot/automatic-snapshot-creation-retention.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Automatic Snapshot Creation and Retention" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Supported for the following hypervisors: XenServer, VMware vSphere, and KVM)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Users can set up a recurring snapshot policy to automatically create multiple snapshots of a disk at regular intervals. Snapshots can be created on an hourly, daily, weekly, or monthly interval. One snapshot policy can be set up per disk volume. For example, a user can set up a daily snapshot at 02:30." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With each snapshot schedule, users can also specify the number of scheduled snapshots to be retained. Older snapshots that exceed the retention limit are automatically deleted. This user-defined limit must be equal to or lower than the global limit set by the &PRODUCT; administrator. See . The limit applies only to those snapshots that are taken as part of an automatic recurring snapshot policy. Additional manual snapshots can be created and retained." -msgstr "" - diff --git a/docs/pot/autoscale.pot b/docs/pot/autoscale.pot deleted file mode 100644 index 34c60c45482..00000000000 --- a/docs/pot/autoscale.pot +++ /dev/null @@ -1,270 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring AutoScale" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "AutoScaling allows you to scale your back-end services or application VMs up or down seamlessly and automatically according to the conditions you define. With AutoScaling enabled, you can ensure that the number of VMs you are using seamlessly scale up when demand increases, and automatically decreases when demand subsides. Thus it helps you save compute costs by terminating underused VMs automatically and launching new VMs when you need them, without the need for manual intervention." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NetScaler AutoScaling is designed to seamlessly launch or terminate VMs based on user-defined conditions. Conditions for triggering a scaleup or scaledown action can vary from a simple use case like monitoring the CPU usage of a server to a complex use case of monitoring a combination of server's responsiveness and its CPU usage. For example, you can configure AutoScaling to launch an additional VM whenever CPU usage exceeds 80 percent for 15 minutes, or to remove a VM whenever CPU usage is less than 20 percent for 30 minutes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses the NetScaler load balancer to monitor all aspects of a system's health and work in unison with &PRODUCT; to initiate scale-up or scale-down actions. The supported NetScaler version is 10.0." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Prerequisites" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you configure an AutoScale rule, consider the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the necessary template is prepared before configuring AutoScale. When a VM is deployed by using a template and when it comes up, the application should be up and running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the application is not running, the NetScaler device considers the VM as ineffective and continues provisioning the VMs unconditionally until the resource limit is exhausted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deploy the templates you prepared. Ensure that the applications come up on the first boot and is ready to take the traffic. Observe the time requires to deploy the template. Consider this time when you specify the quiet time while configuring AutoScale." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The AutoScale feature supports the SNMP counters that can be used to define conditions for taking scale up or scale down actions. To monitor the SNMP-based counter, ensure that the SNMP agent is installed in the template used for creating the AutoScale VMs, and the SNMP operations work with the configured SNMP community and port by using standard SNMP managers. For example, see to configure SNMP on a RHEL machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the endpointe.url parameter present in the Global Settings is set to the Management Server API URL. For example, http://10.102.102.22:8080/client/api. In a multi-node Management Server deployment, use the virtual IP address configured in the load balancer for the management server’s cluster. Additionally, ensure that the NetScaler device has access to this IP address to provide AutoScale support." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you update the endpointe.url, disable the AutoScale functionality of the load balancer rules in the system, then enable them back to reflect the changes. For more information see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the API Key and Secret Key are regenerated for an AutoScale user, ensure that the AutoScale functionality of the load balancers that the user participates in are disabled and then enabled to reflect the configuration changes in the NetScaler." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In an advanced Zone, ensure that at least one VM should be present before configuring a load balancer rule with AutoScale. Having one VM in the network ensures that the network is in implemented state for configuring AutoScale." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Template: A template consists of a base OS image and application. A template is used to provision the new instance of an application on a scaleup action. When a VM is deployed from a template, the VM can start taking the traffic from the load balancer without any admin intervention. For example, if the VM is deployed for a Web service, it should have the Web server running, the database connected, and so on." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Compute offering: A predefined set of virtual hardware attributes, including CPU speed, number of CPUs, and RAM size, that the user can select when creating a new virtual machine instance. Choose one of the compute offerings to be used while provisioning a VM instance as part of scaleup action." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Min Instance: The minimum number of active VM instances that is assigned to a load balancing rule. The active VM instances are the application instances that are up and serving the traffic, and are being load balanced. This parameter ensures that a load balancing rule has at least the configured number of active VM instances are available to serve the traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If an application, such as SAP, running on a VM instance is down for some reason, the VM is then not counted as part of Min Instance parameter, and the AutoScale feature initiates a scaleup action if the number of active VM instances is below the configured value. Similarly, when an application instance comes up from its earlier down state, this application instance is counted as part of the active instance count and the AutoScale process initiates a scaledown action when the active instance count breaches the Max instance value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Max Instance: Maximum number of active VM instances that should be assigned to a load balancing rule. This parameter defines the upper limit of active VM instances that can be assigned to a load balancing rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specifying a large value for the maximum instance parameter might result in provisioning large number of VM instances, which in turn leads to a single load balancing rule exhausting the VM instances limit specified at the account or domain level." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If an application, such as SAP, running on a VM instance is down for some reason, the VM is not counted as part of Max Instance parameter. So there may be scenarios where the number of VMs provisioned for a scaleup action might be more than the configured Max Instance value. Once the application instances in the VMs are up from an earlier down state, the AutoScale feature starts aligning to the configured Max Instance value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify the following scale-up and scale-down policies:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Duration: The duration, in seconds, for which the conditions you specify must be true to trigger a scaleup action. The conditions defined should hold true for the entire duration you specify for an AutoScale action to be invoked." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Counter: The performance counters expose the state of the monitored instances. By default, &PRODUCT; offers four performance counters: Three SNMP counters and one NetScaler counter. The SNMP counters are Linux User CPU, Linux System CPU, and Linux CPU Idle. The NetScaler counter is ResponseTime. The root administrator can add additional counters into &PRODUCT; by using the &PRODUCT; API." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Operator: The following five relational operators are supported in AutoScale feature: Greater than, Less than, Less than or equal to, Greater than or equal to, and Equal to." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Threshold: Threshold value to be used for the counter. Once the counter defined above breaches the threshold value, the AutoScale feature initiates a scaleup or scaledown action." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add: Click Add to add the condition." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additionally, if you want to configure the advanced settings, click Show advanced settings, and specify the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Polling interval: Frequency in which the conditions, combination of counter, operator and threshold, are to be evaluated before taking a scale up or down action. The default polling interval is 30 seconds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Quiet Time: This is the cool down period after an AutoScale action is initiated. The time includes the time taken to complete provisioning a VM instance from its template and the time taken by an application to be ready to serve traffic. This quiet time allows the fleet to come up to a stable state before any action can take place. The default is 300 seconds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Destroy VM Grace Period: The duration in seconds, after a scaledown action is initiated, to wait before the VM is destroyed as part of scaledown action. This is to ensure graceful close of any pending sessions or transactions being served by the VM marked for destroy. The default is 120 seconds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Security Groups: Security groups provide a way to isolate traffic to the VM instances. A security group is a group of VMs that filter their incoming and outgoing traffic according to a set of rules, called ingress and egress rules. These rules filter network traffic according to the IP address that is attempting to communicate with the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk Offerings: A predefined set of disk size for primary data storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNMP Community: The SNMP community string to be used by the NetScaler device to query the configured counter value from the provisioned VM instances. Default is public." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNMP Port: The port number on which the SNMP agent that run on the provisioned VMs is listening. Default port is 161." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User: This is the user that the NetScaler device use to invoke scaleup and scaledown API calls to the cloud. If no option is specified, the user who configures AutoScaling is applied. Specify another user name to override." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apply: Click Apply to create the AutoScale configuration." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Disabling and Enabling an AutoScale Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to perform any maintenance operation on the AutoScale VM instances, disable the AutoScale configuration. When the AutoScale configuration is disabled, no scaleup or scaledown action is performed. You can use this downtime for the maintenance activities. To disable the AutoScale configuration, click the Disable AutoScale EnableDisable.png: button to enable or disable AutoScale. button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The button toggles between enable and disable, depending on whether AutoScale is currently enabled or not. After the maintenance operations are done, you can enable the AutoScale configuration back. To enable, open the AutoScale configuration page again, then click the Enable AutoScale EnableDisable.png: button to enable or disable AutoScale. button." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Updating an AutoScale Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can update the various parameters and add or delete the conditions in a scaleup or scaledown rule. Before you update an AutoScale configuration, ensure that you disable the AutoScale load balancer rule by clicking the Disable AutoScale button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you modify the required AutoScale parameters, click Apply. To apply the new AutoScale policies, open the AutoScale configuration page again, then click the Enable AutoScale button." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Runtime Considerations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An administrator should not assign a VM to a load balancing rule which is configured for AutoScale." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before a VM provisioning is completed if NetScaler is shutdown or restarted, the provisioned VM cannot be a part of the load balancing rule though the intent was to assign it to a load balancing rule. To workaround, rename the AutoScale provisioned VMs based on the rule name or ID so at any point of time the VMs can be reconciled to its load balancing rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Making API calls outside the context of AutoScale, such as destroyVM, on an autoscaled VM leaves the load balancing configuration in an inconsistent state. Though VM is destroyed from the load balancer rule, NetScaler continues to show the VM as a service assigned to a rule." -msgstr "" - diff --git a/docs/pot/aws-api-examples.pot b/docs/pot/aws-api-examples.pot deleted file mode 100644 index be0b0369de6..00000000000 --- a/docs/pot/aws-api-examples.pot +++ /dev/null @@ -1,166 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Examples" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are many tools available to interface with a AWS compatible API. In this section we provide a few examples that users of &PRODUCT; can build upon." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Boto Examples" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Boto is one of them. It is a Python package available at https://github.com/boto/boto. In this section we provide two examples of Python scripts that use Boto and have been tested with the &PRODUCT; AWS API Interface." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First is an EC2 example. Replace the Access and Secret Keys with your own and update the endpoint." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "An EC2 Boto example" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#!/usr/bin/env python\n" -"\n" -"import sys\n" -"import os\n" -"import boto\n" -"import boto.ec2\n" -"\n" -"region = boto.ec2.regioninfo.RegionInfo(name=\"ROOT\",endpoint=\"localhost\")\n" -"apikey='GwNnpUPrO6KgIdZu01z_ZhhZnKjtSdRwuYd4DvpzvFpyxGMvrzno2q05MB0ViBoFYtdqKd'\n" -"secretkey='t4eXLEYWw7chBhDlaKf38adCMSHx_wlds6JfSx3z9fSpSOm0AbP9Moj0oGIzy2LSC8iw'\n" -"\n" -"def main():\n" -" '''Establish connection to EC2 cloud'''\n" -" conn =boto.connect_ec2(aws_access_key_id=apikey,\n" -" aws_secret_access_key=secretkey,\n" -" is_secure=False,\n" -" region=region,\n" -" port=7080,\n" -" path=\"/awsapi\",\n" -" api_version=\"2010-11-15\")\n" -"\n" -" '''Get list of images that I own'''\n" -" images = conn.get_all_images()\n" -" print images\n" -" myimage = images[0]\n" -" '''Pick an instance type'''\n" -" vm_type='m1.small'\n" -" reservation = myimage.run(instance_type=vm_type,security_groups=['default'])\n" -"\n" -"if __name__ == '__main__':\n" -" main()\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Second is an S3 example. Replace the Access and Secret keys with your own, as well as the endpoint of the service. Be sure to also update the file paths to something that exists on your machine." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "An S3 Boto Example" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#!/usr/bin/env python\n" -"\n" -"import sys\n" -"import os\n" -"from boto.s3.key import Key\n" -"from boto.s3.connection import S3Connection\n" -"from boto.s3.connection import OrdinaryCallingFormat\n" -"\n" -"apikey='ChOw-pwdcCFy6fpeyv6kUaR0NnhzmG3tE7HLN2z3OB_s-ogF5HjZtN4rnzKnq2UjtnHeg_yLA5gOw'\n" -"secretkey='IMY8R7CJQiSGFk4cHwfXXN3DUFXz07cCiU80eM3MCmfLs7kusgyOfm0g9qzXRXhoAPCH-IRxXc3w'\n" -"\n" -"cf=OrdinaryCallingFormat()\n" -"\n" -"def main(): \n" -" '''Establish connection to S3 service'''\n" -" conn =S3Connection(aws_access_key_id=apikey,aws_secret_access_key=secretkey, \\\n" -" is_secure=False, \\\n" -" host='localhost', \\\n" -" port=7080, \\\n" -" calling_format=cf, \\\n" -" path=\"/awsapi/rest/AmazonS3\")\n" -"\n" -" try:\n" -" bucket=conn.create_bucket('cloudstack')\n" -" k = Key(bucket)\n" -" k.key = 'test'\n" -" try:\n" -" k.set_contents_from_filename('/Users/runseb/Desktop/s3cs.py')\n" -" except:\n" -" print 'could not write file'\n" -" pass\n" -" except:\n" -" bucket = conn.get_bucket('cloudstack')\n" -" k = Key(bucket)\n" -" k.key = 'test'\n" -" try:\n" -" k.get_contents_to_filename('/Users/runseb/Desktop/foobar')\n" -" except:\n" -" print 'Could not get file'\n" -" pass\n" -"\n" -" try:\n" -" bucket1=conn.create_bucket('teststring')\n" -" k=Key(bucket1)\n" -" k.key('foobar')\n" -" k.set_contents_from_string('This is my silly test')\n" -" except:\n" -" bucket1=conn.get_bucket('teststring')\n" -" k = Key(bucket1)\n" -" k.key='foobar'\n" -" k.get_contents_as_string()\n" -" \n" -"if __name__ == '__main__':\n" -" main()\n" -"\n" -" " -msgstr "" - -#. Tag: title -#, no-c-format -msgid "JClouds Examples" -msgstr "" - diff --git a/docs/pot/aws-ec2-configuration.pot b/docs/pot/aws-ec2-configuration.pot deleted file mode 100644 index e49f076edaf..00000000000 --- a/docs/pot/aws-ec2-configuration.pot +++ /dev/null @@ -1,153 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling the EC2 and S3 Compatible Interface" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The software that provides AWS API compatibility is installed along with &PRODUCT;. You must enable the services and perform some setup steps prior to using it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the global configuration parameters for each service to true. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a set of &PRODUCT; service offerings with names that match the Amazon service offerings. You can do this through the &PRODUCT; UI as described in the Administration Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure you have included the Amazon default service offering, m1.small. As well as any EC2 instance types that you will use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you did not already do so when you set the configuration parameter in step 1, restart the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following sections provides details to perform these steps" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Enabling the Services" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable the EC2 and S3 compatible services you need to set the configuration variables enable.ec2.api and enable.s3.api to true. You do not have to enable both at the same time. Enable the ones you need. This can be done via the &PRODUCT; GUI by going in Global Settings or via the API." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The snapshot below shows you how to use the GUI to enable these services" -msgstr "" - -#. Tag: para -#, no-c-format -msgid " Use the GUI to set the configuration variable to true " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Using the &PRODUCT; API, the easiest is to use the so-called integration port on which you can make unauthenticated calls. In Global Settings set the port to 8096 and subsequently call the updateConfiguration method. The following urls shows you how:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true\n" -" http://localhost:8096/client/api?command=updateConfiguration&name=enable.ec2.api&value=true\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once you have enabled the services, restart the server." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating EC2 Compatible Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You will also need to define compute service offerings with names compatible with the Amazon EC2 instance types API names (e.g m1.small,m1.large). This can be done via the &PRODUCT; GUI. Go under Service Offerings select Compute offering and either create a new compute offering or modify an existing one, ensuring that the name matches an EC2 instance type API name. The snapshot below shows you how:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid " Use the GUI to set the name of a compute service offering to an EC2 instance type API name. " -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Modifying the AWS API Port" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) The AWS API listens for requests on port 7080. If you prefer AWS API to listen on another port, you can change it as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the files /etc/cloud/management/server.xml, /etc/cloud/management/server-nonssl.xml, and /etc/cloud/management/server-ssl.xml." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In each file, find the tag <Service name=\"Catalina7080\">. Under this tag, locate <Connector executor=\"tomcatThreadPool-internal\" port= ....<." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the port to whatever port you want to use, then save the files." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you re-install &PRODUCT;, you will have to re-enable the services and if need be update the port." -msgstr "" - diff --git a/docs/pot/aws-ec2-introduction.pot b/docs/pot/aws-ec2-introduction.pot deleted file mode 100644 index f6c87340dea..00000000000 --- a/docs/pot/aws-ec2-introduction.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Amazon Web Services Compatible Interface" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; can translate Amazon Web Services (AWS) API calls to native &PRODUCT; API calls so that users can continue using existing AWS-compatible tools. This translation service runs as a separate web application in the same tomcat server as the management server of &PRODUCT;, listening on a different port. The Amazon Web Services (AWS) compatible interface provides the EC2 SOAP and Query APIs as well as the S3 REST API." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This service was previously enabled by separate software called CloudBridge. It is now fully integrated with the &PRODUCT; management server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The compatible interface for the EC2 Query API and the S3 API are Work In Progress. The S3 compatible API offers a way to store data on the management server file system, it is not an implementation of the S3 backend." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Limitations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported only in zones that use basic networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Available in fresh installations of &PRODUCT;. Not available through upgrade of previous versions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Features such as Elastic IP (EIP) and Elastic Load Balacing (ELB) are only available in an infrastructure with a Citrix NetScaler device. Users accessing a Zone with a NetScaler device will need to use a NetScaler-enabled network offering (DefaultSharedNetscalerEIP and ELBNetworkOffering)." -msgstr "" - diff --git a/docs/pot/aws-ec2-requirements.pot b/docs/pot/aws-ec2-requirements.pot deleted file mode 100644 index 27644e77be1..00000000000 --- a/docs/pot/aws-ec2-requirements.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Supported API Version" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The EC2 interface complies with Amazon's WDSL version dated November 15, 2010, available at http://ec2.amazonaws.com/doc/2010-11-15/." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The interface is compatible with the EC2 command-line tools EC2 tools v. 1.3.6230, which can be downloaded at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Work is underway to support a more recent version of the EC2 API" -msgstr "" - diff --git a/docs/pot/aws-ec2-supported-commands.pot b/docs/pot/aws-ec2-supported-commands.pot deleted file mode 100644 index 9ffd9f4b6fb..00000000000 --- a/docs/pot/aws-ec2-supported-commands.pot +++ /dev/null @@ -1,665 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Supported AWS API Calls" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following Amazon EC2 commands are supported by &PRODUCT; when the AWS API compatible interface is enabled. For a few commands, there are differences between the &PRODUCT; and Amazon EC2 versions, and these differences are noted. The underlying SOAP call for each command is also given, for those who have built tools using those calls." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Elastic IP API mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "EC2 command" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "SOAP call" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "&PRODUCT; API call" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-allocate-address" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "AllocateAddress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "associateIpAddress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-associate-address" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "AssociateAddress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "enableStaticNat" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-addresses" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeAddresses" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listPublicIpAddresses" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-diassociate-address" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DisassociateAddress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "disableStaticNat" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-release-address" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ReleaseAddress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "disassociateIpAddress" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Availability Zone API mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-availability-zones" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeAvailabilityZones" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listZones" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Images API mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-create-image" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "CreateImage" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "createTemplate" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-deregister" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeregisterImage" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeleteTemplate" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-images" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeImages" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listTemplates" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-register" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "RegisterImage" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "registerTemplate" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Image Attributes API mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-image-attribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeImageAttribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listTemplatePermissions" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-modify-image-attribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ModifyImageAttribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "updateTemplatePermissions" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-reset-image-attribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ResetImageAttribute" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Instances API mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listVirtualMachines" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-run-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "RunInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "deployVirtualMachine" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-reboot-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "RebootInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "rebootVirtualMachine" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-start-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "StartInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "startVirtualMachine" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-stop-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "StopInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "stopVirtualMachine" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-terminate-instances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "TerminateInstances" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "destroyVirtualMachine" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Instance Attributes Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-instance-attribute" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeInstanceAttribute" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Keys Pairs Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-add-keypair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "CreateKeyPair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "createSSHKeyPair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-delete-keypair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeleteKeyPair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "deleteSSHKeyPair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-keypairs" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeKeyPairs" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listSSHKeyPairs" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-import-keypair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ImportKeyPair" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "registerSSHKeyPair" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Passwords API Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-get-password" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "GetPasswordData" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "getVMPassword" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Security Groups API Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-authorize" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "AuthorizeSecurityGroupIngress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "authorizeSecurityGroupIngress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-add-group" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "CreateSecurityGroup" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "createSecurityGroup" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-delete-group" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeleteSecurityGroup" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "deleteSecurityGroup" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-group" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeSecurityGroups" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listSecurityGroups" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-revoke" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "RevokeSecurityGroupIngress" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "revokeSecurityGroupIngress" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Snapshots API Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-create-snapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "CreateSnapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "createSnapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-delete-snapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeleteSnapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "deleteSnapshot" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-snapshots" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeSnapshots" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listSnapshots" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Volumes API Mapping" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-attach-volume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "AttachVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "attachVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-create-volume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "CreateVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "createVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-delete-volume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DeleteVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "deleteVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-describe-volume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DescribeVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "listVolumes" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "ec2-detach-volume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "DetachVolume" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "detachVolume" -msgstr "" - diff --git a/docs/pot/aws-ec2-timeouts.pot b/docs/pot/aws-ec2-timeouts.pot deleted file mode 100644 index 190ad8d82bc..00000000000 --- a/docs/pot/aws-ec2-timeouts.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Timeouts to Ensure AWS API Command Completion" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Amazon EC2 command-line tools have a default connection timeout. When used with &PRODUCT;, a longer timeout might be needed for some commands. If you find that commands are not completing due to timeouts, you can specify a custom timeouts. You can add the following optional command-line parameters to any &PRODUCT;-supported EC2 command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "--connection-timeout TIMEOUT" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Specifies a connection timeout (in seconds). Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "--connection-timeout 30" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "--request-timeout TIMEOUT" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Specifies a request timeout (in seconds). Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "--request-timeout 45" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ec2-run-instances 2 –z us-test1 –n 1-3 --connection-timeout 120 --request-timeout 120" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The timeouts optional arguments are not specific to &PRODUCT;." -msgstr "" - diff --git a/docs/pot/aws-ec2-user-setup.pot b/docs/pot/aws-ec2-user-setup.pot deleted file mode 100644 index c1412920889..00000000000 --- a/docs/pot/aws-ec2-user-setup.pot +++ /dev/null @@ -1,141 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "AWS API User Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In general, users need not be aware that they are using a translation service provided by &PRODUCT;. They only need to send AWS API calls to &PRODUCT;'s endpoint, and it will translate the calls to the native &PRODUCT; API. Users of the Amazon EC2 compatible interface will be able to keep their existing EC2 tools and scripts and use them with their &PRODUCT; deployment, by specifying the endpoint of the management server and using the proper user credentials. In order to do this, each user must perform the following configuration steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate user credentials." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Register with the service." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For convenience, set up environment variables for the EC2 SOAP command-line tools." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "AWS API User Registration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each user must perform a one-time registration. The user follows these steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Obtain the following by looking in the &PRODUCT; UI, using the API, or asking the cloud administrator:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; server's publicly available DNS name or IP address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user account's Access key and Secret key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate a private key and a self-signed X.509 certificate. The user substitutes their own desired storage location for /path/to/… below." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /path/to/private_key.pem -out /path/to/cert.pem" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Register the user X.509 certificate and Access/Secret keys with the AWS compatible service. If you have the source code of &PRODUCT; go to the awsapi-setup/setup directory and use the Python script cloudstack-aws-api-register. If you do not have the source then download the script using the following command." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "wget -O cloudstack-aws-api-register \"https://git-wip-us.apache.org/repos/asf?p=incubator-cloudstack.git;a=blob_plain;f=awsapi-setup/setup/cloudstack-aws-api-register;hb=HEAD\"\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then execute it, using the parameter values that were obtained in step 1. An example is shown below." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ cloudstack-aws-api-register --apikey=User’s &PRODUCT; API key --secretkey=User’s &PRODUCT; Secret key --cert=/path/to/cert.pem --url=http://&PRODUCT;.server:7080/awsapi" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A user with an existing AWS certificate could choose to use the same certificate with &PRODUCT;, but note that the certificate would be uploaded to the &PRODUCT; management server database." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "AWS API Command-Line Tools Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To use the EC2 command-line tools, the user must perform these steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure you have the right version of EC2 Tools. The supported version is available at http://s3.amazonaws.com/ec2-downloads/ec2-api-tools-1.3-62308.zip." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up the EC2 environment variables. This can be done every time you use the service or you can set them up in the proper shell profile. Replace the endpoint (i.e EC2_URL) with the proper address of your &PRODUCT; management server and port. In a bash shell do the following." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" $ export EC2_CERT=/path/to/cert.pem\n" -" $ export EC2_PRIVATE_KEY=/path/to/private_key.pem\n" -" $ export EC2_URL=http://localhost:7080/awsapi\n" -" $ export EC2_HOME=/path/to/EC2_tools_directory\n" -" " -msgstr "" - diff --git a/docs/pot/aws-interface-compatibility.pot b/docs/pot/aws-interface-compatibility.pot deleted file mode 100644 index 8ed02a8270c..00000000000 --- a/docs/pot/aws-interface-compatibility.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Amazon Web Services Compatible Interface" -msgstr "" - diff --git a/docs/pot/basic-adv-networking.pot b/docs/pot/basic-adv-networking.pot deleted file mode 100644 index 2e67f2c2d7b..00000000000 --- a/docs/pot/basic-adv-networking.pot +++ /dev/null @@ -1,195 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Basic and Advanced Networking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides two styles of networking:." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Basic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering)." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Advanced" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks, but requires more configuration steps than basic networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each zone has either basic or advanced networking. Once the choice of networking model for a zone has been made and configured in &PRODUCT;, it can not be changed. A zone is either basic or advanced for its entire lifetime." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table compares the networking features in the two networking models." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Networking Feature" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Basic Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Advanced Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Single network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multiple networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Firewall type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Physical" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Physical and Virtual" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Load balancer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Isolation type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Layer 3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Layer 2 and Layer 3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "No" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Port forwarding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "1:1 NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Userdata" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network usage monitoring" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "sFlow / netFlow at physical router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor and Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS and DHCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The two types of networking may be in use in the same cloud. However, a given zone must use either Basic Networking or Advanced Networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Different types of network traffic can be segmented on the same physical network. Guest traffic can also be segmented by account. To isolate traffic, you can use separate VLANs. If you are using separate VLANs on a single physical network, make sure the VLAN tags are in separate numerical ranges." -msgstr "" - diff --git a/docs/pot/basic-zone-configuration.pot b/docs/pot/basic-zone-configuration.pot deleted file mode 100644 index ee76cf456f9..00000000000 --- a/docs/pot/basic-zone-configuration.pot +++ /dev/null @@ -1,360 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Basic Zone Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you select Basic in the Add Zone wizard and click Next, you will be asked to enter the following details. Then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. A name for the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS 1 and 2. These are DNS servers for use by guest VMs in the zone. These DNS servers will be accessed via the public network you will add later. The public IP addresses for the zone must have a route to the DNS server named here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Internal DNS 1 and Internal DNS 2. These are DNS servers for use by system VMs in the zone (these are VMs used by &PRODUCT; itself, such as virtual routers, console proxies, and Secondary Storage VMs.) These DNS servers will be accessed via the management traffic network interface of the System VMs. The private IP address you provide for the pods must have a route to the internal DNS server named here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. (Introduced in version 3.0.1) Choose the hypervisor for the first cluster in the zone. You can add clusters with different hypervisors later, after you finish adding the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Offering. Your choice here determines what network services will be available on the network for guest VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DefaultSharedNetworkOfferingWithSGService" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to enable security groups for guest traffic isolation, choose this. (See Using Security Groups to Control Traffic to VMs.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DefaultSharedNetworkOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you do not need security groups, choose this." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DefaultSharedNetscalerEIPandELBNetworkOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have installed a Citrix NetScaler appliance as part of your zone network, and you will be using its Elastic IP and Elastic Load Balancing features, choose this. With the EIP and ELB features, a basic zone with security groups enabled can offer 1:1 static NAT and load balancing." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Domain. (Optional) If you want to assign a special domain name to the guest VM network, specify the DNS suffix." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. A public zone is available to all users. A zone that is not public will be assigned to a particular domain. Only users in that domain will be allowed to create guest VMs in this zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose which traffic types will be carried by the physical network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The traffic types are management, public, guest, and storage traffic. For more information about the types, roll over the icons to display their tool tips, or see Basic Zone Network Traffic Types. This screen starts out with some traffic types already assigned. To add more, drag and drop traffic types onto the network. You can also change the network name if desired." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Introduced in version 3.0.1) Assign a network traffic label to each traffic type on the physical network. These labels must match the labels you have already defined on the hypervisor host. To assign each label, click the Edit button under the traffic type icon. A popup dialog appears where you can type the label, then click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These traffic labels will be defined only for the hypervisor selected for the first cluster. For all other hypervisors, the labels can be configured after the zone is created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(NetScaler only) If you chose the network offering for NetScaler, you have an additional screen to fill out. Provide the requested details to set up the NetScaler, then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP address. The NSIP (NetScaler IP) address of the NetScaler device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username/Password. The authentication credentials to access the device. &PRODUCT; uses these credentials to access the device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type. NetScaler device type that is being added. It could be NetScaler VPX, NetScaler MPX, or NetScaler SDX. For a comparison of the types, see About Using a NetScaler Load Balancer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public interface. Interface of NetScaler that is configured to be part of the public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private interface. Interface of NetScaler that is configured to be part of the private network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of retries. Number of times to attempt a command on the device before considering the operation failed. Default is 2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Capacity. Number of guest networks/accounts that will share this NetScaler device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Dedicated. When marked as dedicated, this device will be dedicated to a single account. When Dedicated is checked, the value in the Capacity field has no significance – implicitly, its value is 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(NetScaler only) Configure the IP range for public traffic. The IPs in this range will be used for the static NAT capability which you enabled by selecting the network offering for NetScaler with EIP and ELB. Enter the following details, then click Add. If desired, you can repeat this step to add more IP ranges. When done, click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway. The gateway in use for these IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask. The netmask associated with this IP range." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN. The VLAN that will be used for public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start IP/End IP. A range of IP addresses that are assumed to be accessible from the Internet and will be allocated for access to guest VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new zone, &PRODUCT; adds the first pod for you. You can always add more pods later. For an overview of what a pod is, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first pod, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pod Name. A name for the pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reserved system gateway. The gateway for the hosts in that pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reserved system netmask. The network prefix that defines the pod's subnet. Use CIDR notation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start/End Reserved System IP. The IP range in the management network that &PRODUCT; uses to manage various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP. For more information, see System Reserved IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the network for guest traffic. Provide the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest gateway. The gateway that the guests should use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest netmask. The netmask in use on the subnet the guests will use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest start IP/End IP. Enter the first and last IP addresses that define a range that &PRODUCT; can assign to guests." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We strongly recommend the use of multiple NICs. If multiple NICs are used, they may be in a different subnet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If one NIC is used, these IPs should be in the same CIDR as the pod CIDR." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new pod, &PRODUCT; adds the first cluster for you. You can always add more clusters later. For an overview of what a cluster is, see About Clusters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first cluster, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. (Version 3.0.0 only; in 3.0.1, this field is read only) Choose the type of hypervisor software that all hosts in this cluster will run. If you choose VMware, additional fields appear so you can give information about a vSphere cluster. For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add Cluster: vSphere." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster name. Enter a name for the cluster. This can be text of your choosing and is not used by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new cluster, &PRODUCT; adds the first host for you. You can always add more hosts later. For an overview of what a host is, see About Hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you add a hypervisor host to &PRODUCT;, the host must not have any VMs already running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you can configure the host, you need to install the hypervisor software on the host. You will need to know which version of the hypervisor software version is supported by &PRODUCT; and what additional configuration is required to ensure the host will work with &PRODUCT;. To find these installation details, see:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix XenServer Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware vSphere Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM vSphere Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first host, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Name. The DNS name or IP address of the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username. The username is root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password. This is the password for the user named above (from your XenServer or KVM install)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Tags. (Optional) Any labels that you use to categorize hosts for ease of maintenance. For example, you can set this to the cloud's HA tag (set in the ha.tag global configuration parameter) if you want this host to be used only for VMs with the \"high availability\" feature enabled. For more information, see HA-Enabled Virtual Machines as well as HA for Hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new cluster, &PRODUCT; adds the first primary storage server for you. You can always add more servers later. For an overview of what primary storage is, see About Primary Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the first primary storage server, enter the following, then click Next:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS, SharedMountPoint,CLVM, or RBD. For vSphere choose either VMFS (iSCSI or FiberChannel) or NFS. The remaining fields in the screen vary depending on what you choose here." -msgstr "" - diff --git a/docs/pot/basic-zone-guest-ip-addresses.pot b/docs/pot/basic-zone-guest-ip-addresses.pot deleted file mode 100644 index c656eb525a5..00000000000 --- a/docs/pot/basic-zone-guest-ip-addresses.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Basic Zone Guest IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When basic networking is used, &PRODUCT; will assign IP addresses in the CIDR of the pod to the guests in that pod. The administrator must add a Direct IP range on the pod for this purpose. These IPs are in the same VLAN as the hosts." -msgstr "" - diff --git a/docs/pot/basic-zone-network-traffic-types.pot b/docs/pot/basic-zone-network-traffic-types.pot deleted file mode 100644 index dab50e915e5..00000000000 --- a/docs/pot/basic-zone-network-traffic-types.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Basic Zone Network Traffic Types" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When basic networking is used, there can be only one physical network in the zone. That physical network carries the following traffic types:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest. When end users run VMs, they generate guest traffic. The guest VMs communicate with each other over a network that can be referred to as the guest network. Each pod in a basic zone is a broadcast domain, and therefore each pod has a different IP range for the guest network. The administrator must configure the IP range for each pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management. When &PRODUCT;’s internal resources communicate with each other, they generate management traffic. This includes communication between hosts, system VMs (VMs used by &PRODUCT; to perform various tasks in the cloud), and any other component that communicates directly with the &PRODUCT; Management Server. You must configure the IP range for the system VMs to use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We strongly recommend the use of separate NICs for management traffic and guest traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Public traffic is generated when VMs in the cloud access the Internet. Publicly accessible IPs must be allocated for this purpose. End users can use the &PRODUCT; UI to acquire these IPs to implement NAT between their guest network and the public network, as described in Acquiring a New IP Address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage. Traffic such as VM templates and snapshots, which is sent between the secondary storage VM and secondary storage servers. &PRODUCT; uses a separate Network Interface Controller (NIC) named storage NIC for storage network traffic. Use of a storage NIC that always operates on a high bandwidth network allows fast template and snapshot copying. You must configure the IP range to use for the storage network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a basic network, configuring the physical network is fairly straightforward. In most cases, you only need to configure one guest network to carry traffic that is generated by guest VMs. If you use a NetScaler load balancer and enable its elastic IP and elastic load balancing (EIP and ELB) features, you must also configure a network to carry public traffic. &PRODUCT; takes care of presenting the necessary network configuration steps to you in the UI when you add a new zone." -msgstr "" - diff --git a/docs/pot/basic-zone-physical-network-configuration.pot b/docs/pot/basic-zone-physical-network-configuration.pot deleted file mode 100644 index a9d81520fa3..00000000000 --- a/docs/pot/basic-zone-physical-network-configuration.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Basic Zone Physical Network Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a basic network, configuring the physical network is fairly straightforward. You only need to configure one guest network to carry traffic that is generated by guest VMs. When you first add a zone to &PRODUCT;, you set up the guest network through the Add Zone screens." -msgstr "" - diff --git a/docs/pot/best-practices-for-vms.pot b/docs/pot/best-practices-for-vms.pot deleted file mode 100644 index 9e8ca118723..00000000000 --- a/docs/pot/best-practices-for-vms.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Best Practices for Virtual Machines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator should monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are automatically redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster at all times. Keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation of more VMs to the cluster." -msgstr "" - diff --git a/docs/pot/best-practices-primary-storage.pot b/docs/pot/best-practices-primary-storage.pot deleted file mode 100644 index 6dd7ab2af01..00000000000 --- a/docs/pot/best-practices-primary-storage.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Best Practices for Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The speed of primary storage will impact guest performance. If possible, choose smaller, higher RPM drives for primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that nothing is stored on the server. Adding the server to &PRODUCT; will destroy any existing data" -msgstr "" - diff --git a/docs/pot/best-practices-secondary-storage.pot b/docs/pot/best-practices-secondary-storage.pot deleted file mode 100644 index 43326cf0292..00000000000 --- a/docs/pot/best-practices-secondary-storage.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Best Practices for Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each Zone can have one or more secondary storage servers. Multiple secondary storage servers provide increased scalability to the system." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage has a high read:write ratio and is expected to consist of larger drives with lower IOPS than primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that nothing is stored on the server. Adding the server to &PRODUCT; will destroy any existing data." -msgstr "" - diff --git a/docs/pot/best-practices-templates.pot b/docs/pot/best-practices-templates.pot deleted file mode 100644 index 4b822784dc1..00000000000 --- a/docs/pot/best-practices-templates.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Best Practices for Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you plan to use large templates (100 GB or larger), be sure you have a 10-gigabit network to support the large templates. A slower network can lead to timeouts and other errors when large templates are used." -msgstr "" - diff --git a/docs/pot/best-practices-virtual-router.pot b/docs/pot/best-practices-virtual-router.pot deleted file mode 100644 index d1cd63a5982..00000000000 --- a/docs/pot/best-practices-virtual-router.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Best Practices for Virtual Routers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "WARNING: Restarting a virtual router from a hypervisor console deletes all the iptables rules. To work around this issue, stop the virtual router and start it from the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "WARNING: Do not use the destroyRouter API when only one router is available in the network, because restartNetwork API with the cleanup=false parameter can't recreate it later. If you want to destroy and recreate the single router available in the network, use the restartNetwork API with the cleanup=true parameter." -msgstr "" - diff --git a/docs/pot/build-deb.pot b/docs/pot/build-deb.pot deleted file mode 100644 index 995b086af5b..00000000000 --- a/docs/pot/build-deb.pot +++ /dev/null @@ -1,175 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building DEB packages" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the bootstrap dependencies, you'll also need to install several other dependencies. Note that we recommend using Maven 3, which is not currently available in 12.04.1 LTS. So, you'll also need to add a PPA repository that includes Maven 3. After running the command add-apt-repository, you will be prompted to continue and a GPG key will be added." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"$ sudo apt-get update\n" -"$ sudo apt-get install python-software-properties\n" -"$ sudo add-apt-repository ppa:natecarlson/maven3\n" -"$ sudo apt-get update\n" -"$ sudo apt-get install ant debhelper openjdk-6-jdk tomcat6 libws-commons-util-java genisoimage python-mysqldb libcommons-codec-java libcommons-httpclient-java liblog4j1.2-java maven3\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While we have defined, and you have presumably already installed the bootstrap prerequisites, there are a number of build time prerequisites that need to be resolved. &PRODUCT; uses maven for dependency resolution. You can resolve the buildtime depdencies for CloudStack by running:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "$ mvn3 -P deps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that we have resolved the dependencies we can move on to building &PRODUCT; and packaging them into DEBs by issuing the following command." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"$ dpkg-buildpackage -uc -us\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This command will build 16 Debian packages. You should have all of the following:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"cloud-agent_4.0.0-incubating_amd64.deb\n" -"cloud-agent-deps_4.0.0-incubating_amd64.deb\n" -"cloud-agent-libs_4.0.0-incubating_amd64.deb\n" -"cloud-awsapi_4.0.0-incubating_amd64.deb\n" -"cloud-cli_4.0.0-incubating_amd64.deb\n" -"cloud-client_4.0.0-incubating_amd64.deb\n" -"cloud-client-ui_4.0.0-incubating_amd64.deb\n" -"cloud-core_4.0.0-incubating_amd64.deb\n" -"cloud-deps_4.0.0-incubating_amd64.deb\n" -"cloud-python_4.0.0-incubating_amd64.deb\n" -"cloud-scripts_4.0.0-incubating_amd64.deb\n" -"cloud-server_4.0.0-incubating_amd64.deb\n" -"cloud-setup_4.0.0-incubating_amd64.deb\n" -"cloud-system-iso_4.0.0-incubating_amd64.deb\n" -"cloud-usage_4.0.0-incubating_amd64.deb\n" -"cloud-utils_4.0.0-incubating_amd64.deb\n" -"" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Setting up an APT repo" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you've created the packages, you'll want to copy them to a system where you can serve the packages over HTTP. You'll create a directory for the packages and then use dpkg-scanpackages to create Packages.gz, which holds information about the archive structure. Finally, you'll add the repository to your system(s) so you can install the packages using APT." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The first step is to make sure that you have the dpkg-dev package installed. This should have been installed when you pulled in the debhelper application previously, but if you're generating Packages.gz on a different system, be sure that it's installed there as well." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "$ sudo apt-get install dpkg-dev" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The next step is to copy the DEBs to the directory where they can be served over HTTP. We'll use /var/www/cloudstack/repo in the examples, but change the directory to whatever works for you." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"sudo mkdir -p /var/www/cloudstack/repo/binary\n" -"sudo cp *.deb /var/www/cloudstack/repo/binary\n" -"sudo cd /var/www/cloudstack/repo/binary\n" -"sudo dpkg-scanpackages . /dev/null | tee Packages | gzip -9 > Packages.gz\n" -"" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Note: Override Files" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can safely ignore the warning about a missing override file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now you should have all of the DEB packages and Packages.gz in the binary directory and available over HTTP. (You may want to use wget or curl to test this before moving on to the next step.)" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring your machines to use the APT repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that we have created the repository, you need to configure your machine to make use of the APT repository. You can do this by adding a repository file under /etc/apt/sources.list.d. Use your preferred editor to create /etc/apt/sources.list.d/cloudstack.list with this line:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "deb http://server.url/cloudstack/repo binary ./" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that you have the repository info in place, you'll want to run another update so that APT knows where to find the &PRODUCT; packages." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "$ sudo apt-get update" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can now move on to the instructions under Install on Ubuntu." -msgstr "" - diff --git a/docs/pot/build-rpm.pot b/docs/pot/build-rpm.pot deleted file mode 100644 index a514062a672..00000000000 --- a/docs/pot/build-rpm.pot +++ /dev/null @@ -1,111 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building RPMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While we have defined, and you have presumably already installed the bootstrap prerequisites, there are a number of build time prerequisites that need to be resolved. &PRODUCT; uses maven for dependency resolution. You can resolve the buildtime depdencies for CloudStack by running the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ mvn -P deps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that we have resolved the dependencies we can move on to building &PRODUCT; and packaging them into RPMs by issuing the following command." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ./waf rpm" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once this completes, you should find assembled RPMs in artifacts/rpmbuild/RPMS/x86_64" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating a yum repo" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While RPMs is an ideal packaging format - it's most easily consumed from yum repositories over a network. We'll move into the directory with the newly created RPMs by issuing the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ cd artifacts/rpmbuild/RPMS/x86_64" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Next we'll issue a command to create the repository metadata by issuing the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ createrepo ./" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The files and directories within our current working directory can now be uploaded to a web server and serve as a yum repository" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring your systems to use your new yum repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that your yum repository is populated with RPMs and metadata we need to configure our machines that need to install CloudStack. We will create a file at /etc/yum.repos.d/cloudstack.repo with the following content:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"[apache-cloudstack]\n" -"name=Apache CloudStack\n" -"baseurl=http://webserver.tld/path/to/repo\n" -"enabled=1\n" -"gpgcheck=0\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Completing this step will allow you to easily install CloudStack on a number of machines across the network." -msgstr "" - diff --git a/docs/pot/building-devcloud.pot b/docs/pot/building-devcloud.pot deleted file mode 100644 index ef4d81475d8..00000000000 --- a/docs/pot/building-devcloud.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building DevCloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The DevCloud appliance can be downloaded from the wiki at . It can also be built from scratch. Code is being developed to provide this alternative build. It is based on veewee, Vagrant and Puppet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The goal is to automate the DevCloud build and make this automation capability available to all within the source release of &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is under heavy development. The code is located in the source tree under tools/devcloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A preliminary wiki page describes the build at https://cwiki.pache.org/CLOUDSTACK/building-devcloud.html" -msgstr "" - diff --git a/docs/pot/building-marvin.pot b/docs/pot/building-marvin.pot deleted file mode 100644 index 500d792704d..00000000000 --- a/docs/pot/building-marvin.pot +++ /dev/null @@ -1,91 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building and Installing Marvin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Marvin is built with Maven and is dependent on APIdoc. To build it do the following in the root tree of &PRODUCT;:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mvn -P developer -l :cloud-apidoc" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mvn -P developer -l :cloud-marvin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If successfull the build will have created the cloudstackAPI Python package under tools/marvin/marvin/cloudstackAPI as well as a gziped Marvin package under tools/marvin dist. To install the Python Marvin module do the following in tools/marvin:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sudo python ./setup.py install" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The dependencies will be downloaded the Python module installed and you should be able to use Marvin in Python. Check that you can import the module before starting to use it." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ python\n" -"Python 2.7.3 (default, Nov 17 2012, 19:54:34) \n" -"[GCC 4.2.1 Compatible Apple Clang 4.1 ((tags/Apple/clang-421.11.66))] on darwin\n" -"Type \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n" -">>> import marvin\n" -">>> from marvin.cloudstackAPI import *\n" -">>> " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You could also install it using pip or easy_install using the local distribution package in tools/marvin/dist :" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "pip install tools/marvin/dist/Marvin-0.1.0.tar.gz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Or:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "easy_install tools/marvin/dist/Marvin-0.1.0.tar.gz" -msgstr "" - diff --git a/docs/pot/building-prerequisites.pot b/docs/pot/building-prerequisites.pot deleted file mode 100644 index 8758528a926..00000000000 --- a/docs/pot/building-prerequisites.pot +++ /dev/null @@ -1,116 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Build Procedure Prerequisites" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In this section we will assume that you are using the Ubuntu Linux distribution with the Advanced Packaging Tool (APT). If you are using a different distribution or OS and a different packaging tool, adapt the following instructions to your environment. To build &PRODUCT; you will need:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "git, http://git-scm.com" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sudo apt-get install git-core" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "maven, http://maven.apache.org" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sudo apt-get install maven" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure that you installed maven 3" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ mvn --version\n" -"Apache Maven 3.0.4\n" -"Maven home: /usr/share/maven\n" -"Java version: 1.6.0_24, vendor: Sun Microsystems Inc.\n" -"Java home: /usr/lib/jvm/java-6-openjdk-amd64/jre\n" -"Default locale: en_US, platform encoding: UTF-8\n" -"OS name: \"linux\", version: \"3.2.0-33-generic\", arch: \"amd64\", family: \"unix\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "java" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "set the JAVA_HOME environment variable" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ export JAVA_HOME=/usr/lib/jvm/java-6-openjdk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition, to deploy and run &PRODUCT; in a development environment you will need:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Mysql" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sudo apt-get install mysql-server-5.5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the mysqld service and create a cloud user with cloud as a password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tomcat 6" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "sudo apt-get install tomcat6" -msgstr "" - diff --git a/docs/pot/building-with-maven-deploy.pot b/docs/pot/building-with-maven-deploy.pot deleted file mode 100644 index 5efce88366d..00000000000 --- a/docs/pot/building-with-maven-deploy.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Deployment and Testing Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deploying the &PRODUCT; code that you compiled is a two step process:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have not configured the database or modified its properties do:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mvn -P developer -pl developer -Ddeploydb" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then you need to run the &PRODUCT; management server. To attach a debugger to it, do:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "export MAVEN_OPTS=\"-Xmx1024 -Xdebug -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=n\"" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mvn -pl :cloud-client-ui jetty:run" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When dealing with the database, remember that you may wipe it entirely and lose any data center configuration that you may have set previously." -msgstr "" - diff --git a/docs/pot/building-with-maven-steps.pot b/docs/pot/building-with-maven-steps.pot deleted file mode 100644 index 3e8bdb955f7..00000000000 --- a/docs/pot/building-with-maven-steps.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses git for source version control, first make sure you have the source code by pulling it:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "git clone https://git-wip-us.apache.org/repos/asf/incubator-cloudstack.git" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Several Project Object Models (POM) are defined to deal with the various build targets of &PRODUCT;. Certain features require some packages that are not compatible with the Apache license and therefore need to be downloaded on your own. Check the wiki for additional information https://cwiki.apache.org/CLOUDSTACK/building-with-maven.html. In order to build all the open source targets of &PRODUCT; do:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mvn clean install" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The resulting jar files will be in the target directory of the subdirectory of the compiled module." -msgstr "" - diff --git a/docs/pot/building-with-maven.pot b/docs/pot/building-with-maven.pot deleted file mode 100644 index aaa0a27b0a5..00000000000 --- a/docs/pot/building-with-maven.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Maven to Build &PRODUCT;" -msgstr "" - diff --git a/docs/pot/castor-with-cs.pot b/docs/pot/castor-with-cs.pot deleted file mode 100644 index 1637a6b3482..00000000000 --- a/docs/pot/castor-with-cs.pot +++ /dev/null @@ -1,123 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using the CAStor Back-end Storage with &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section describes how to use a CAStor cluster as the back-end storage system for a &PRODUCT; S3 front-end. The CAStor back-end storage for &PRODUCT; extends the existing storage classes and allows the storage configuration attribute to point to a CAStor cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This feature makes use of the &PRODUCT; server's local disk to spool files before writing them to CAStor when handling the PUT operations. However, a file must be successfully written into the CAStor cluster prior to the return of a success code to the S3 client to ensure that the transaction outcome is correctly reported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The S3 multipart file upload is not supported in this release. You are prompted with proper error message if a multipart upload is attempted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure CAStor:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install &PRODUCT; 4.0.0-incubating by following the instructions given in the INSTALL.txt file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can use the S3 storage system in &PRODUCT; without setting up and installing the compute components." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enable the S3 API by setting \"enable.s3.api = true\" in the Global parameter section in the UI and register a user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see S3 API in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the cloud-bridge.properties file and modify the \"storage.root\" parameter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set \"storage.root\" to the key word \"castor\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify a CAStor tenant domain to which content is written. If the domain is not specified, the CAStor default domain, specified by the \"cluster\" parameter in CAStor's node.cfg file, will be used." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify a list of node IP addresses, or set \"zeroconf\" and the cluster name. When using a static IP list with a large cluster, it is not necessary to include every node, only a few is required to initialize the client software." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "storage.root=castor domain=cloudstack 10.1.1.51 10.1.1.52 10.1.1.53" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In this example, the configuration file directs &PRODUCT; to write the S3 files to CAStor instead of to a file system, where the CAStor domain name is cloudstack, and the CAStor node IP addresses are those listed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) The last value is a port number on which to communicate with the CAStor cluster. If not specified, the default is 80." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#Static IP list with optional port\n" -"storage.root=castor domain=cloudstack 10.1.1.51 10.1.1.52 10.1.1.53 80\n" -"#Zeroconf locator for cluster named \"castor.example.com\"\n" -"storage.root=castor domain=cloudstack zeroconf=castor.example.com" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create the tenant domain within the CAStor storage cluster. If you omit this step before attempting to store content, you will get HTTP 412 errors in the awsapi.log." -msgstr "" - diff --git a/docs/pot/change-console-proxy-ssl-certificate-domain.pot b/docs/pot/change-console-proxy-ssl-certificate-domain.pot deleted file mode 100644 index cd1d121d03f..00000000000 --- a/docs/pot/change-console-proxy-ssl-certificate-domain.pot +++ /dev/null @@ -1,120 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Console Proxy SSL Certificate and Domain" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the administrator prefers, it is possible for the URL of the customer's console session to show a domain other than realhostip.com. The administrator can customize the displayed domain by selecting a different domain and uploading a new SSL certificate and private key. The domain must run a DNS service that is capable of resolving queries for addresses of the form aaa-bbb-ccc-ddd.your.domain to an IPv4 IP address in the form aaa.bbb.ccc.ddd, for example, 202.8.44.1. To change the console proxy domain, SSL certificate, and private key:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up dynamic name resolution or populate all possible DNS names in your public IP range into your existing DNS server with the format aaa-bbb-ccc-ddd.company.com -> aaa.bbb.ccc.ddd." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate the private key and certificate signing request (CSR). When you are using openssl to generate private/public key pairs and CSRs, for the private key that you are going to paste into the &PRODUCT; UI, be sure to convert it into PKCS#8 format." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate a new 2048-bit private key" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "openssl genrsa -des3 -out yourprivate.key 2048" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate a new certificate CSR" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "openssl req -new -key yourprivate.key -out yourcertificate.csr" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Head to the website of your favorite trusted Certificate Authority, purchase an SSL certificate, and submit the CSR. You should receive a valid certificate in return" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Convert your private key format into PKCS#8 encrypted format." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "openssl pkcs8 -topk8 -in yourprivate.key -out yourprivate.pkcs8.encryped.key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Convert your PKCS#8 encrypted private key into the PKCS#8 format that is compliant with &PRODUCT;" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "openssl pkcs8 -in yourprivate.pkcs8.encrypted.key -out yourprivate.pkcs8.key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Update SSL Certificate screen of the &PRODUCT; UI, paste the following" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Certificate from step 1(c)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private key from step 1(e)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The desired new domain name; for example, company.com" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This stops all currently running console proxy VMs, then restarts them with the new certificate and key. Users might notice a brief interruption in console availability" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server will generate URLs of the form \"aaa-bbb-ccc-ddd.company.com\" after this change is made. New console requests will be served with the new DNS domain name, certificate, and key" -msgstr "" - diff --git a/docs/pot/change-database-config.pot b/docs/pot/change-database-config.pot deleted file mode 100644 index f4f6eb056da..00000000000 --- a/docs/pot/change-database-config.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Database Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Management Server stores database configuration information (e.g., hostname, port, credentials) in the file /etc/cloud/management/db.properties. To effect a change, edit this file on each Management Server, then restart the Management Server." -msgstr "" - diff --git a/docs/pot/change-database-password.pot b/docs/pot/change-database-password.pot deleted file mode 100644 index 19445d56675..00000000000 --- a/docs/pot/change-database-password.pot +++ /dev/null @@ -1,126 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Database Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You may need to change the password for the MySQL account used by CloudStack. If so, you'll need to change the password in MySQL, and then add the encrypted password to /etc/cloud/management/db.properties." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before changing the password, you'll need to stop CloudStack's management server and the usage engine if you've deployed that component." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"# service cloud-management stop\n" -"# service cloud-usage stop\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Next, you'll update the password for the CloudStack user on the MySQL server." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"# mysql -u root -p\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At the MySQL shell, you'll change the password and flush privileges:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"update mysql.user set password=PASSWORD(\"newpassword123\") where User='cloud';\n" -"flush privileges;\n" -"quit;\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The next step is to encrypt the password and copy the encrypted password to CloudStack's database configuration (/etc/cloud/management/db.properties)." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -"# java -classpath /usr/share/java/cloud-jasypt-1.8.jar \\\n" -"org.jasypt.intf.cli.JasyptPBEStringEncryptionCLI encrypt.sh \\ \n" -"input=\"newpassword123\" password=\"`cat /etc/cloud/management/key`\" \\\n" -"verbose=false \n" -"\n" -"" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "File encryption type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note that this is for the file encryption type. If you're using the web encryption type then you'll use password=\"management_server_secret_key\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now, you'll update /etc/cloud/management/db.properties with the new ciphertext. Open /etc/cloud/management/db.properties in a text editor, and update these parameters:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"db.cloud.password=ENC(encrypted_password_from_above) \n" -"db.usage.password=ENC(encrypted_password_from_above) \n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After copying the new password over, you can now start CloudStack (and the usage engine, if necessary)." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "\n" -" # service cloud-management start\n" -" # service cloud-usage start\n" -" " -msgstr "" - diff --git a/docs/pot/change-host-password.pot b/docs/pot/change-host-password.pot deleted file mode 100644 index 8623e8579b1..00000000000 --- a/docs/pot/change-host-password.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing Host Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The password for a XenServer Node, KVM Node, or vSphere Node may be changed in the database. Note that all Nodes in a Cluster must have the same password." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To change a Node's password:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Identify all hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the password on all hosts in the cluster. Now the password for the host and the password known to &PRODUCT; will not match. Operations on the cluster will fail until the two passwords match." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Get the list of host IDs for the host in the cluster where you are changing the password. You will need to access the database to determine these host IDs. For each hostname \"h\" (or vSphere cluster) that you are changing the password for, execute:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysql> select id from cloud.host where name like '%h%';" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This should return a single ID. Record the set of such IDs for these hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Update the passwords for the host in the database. In this example, we change the passwords for hosts with IDs 5, 10, and 12 to \"password\"." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysql> update cloud.host set password='password' where id=5 or id=10 or id=12;" -msgstr "" - diff --git a/docs/pot/change-network-offering-on-guest-network.pot b/docs/pot/change-network-offering-on-guest-network.pot deleted file mode 100644 index 1c2778bd8b2..00000000000 --- a/docs/pot/change-network-offering-on-guest-network.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Network Offering on a Guest Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A user or administrator can change the network offering that is associated with an existing guest network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are changing from a network offering that uses the &PRODUCT; virtual router to one that uses external devices as network service providers, you must first stop all the VMs on the network. See Stopping and Starting VMs. Then return here and continue to the next step" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network you want to modify AttachDiskButton.png: button to attach a volume ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Network Offering, choose the new network offering, then click Apply." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A prompt appears asking whether you want to keep the existing CIDR. This is to let you know that if you change the network offering, the CIDR will be affected. Choose No to proceed with the change." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait for the update to complete. Don’t try to restart VMs until after the network change is complete." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you stopped any VMs in step 2, restart them." -msgstr "" - diff --git a/docs/pot/change-to-behavior-of-list-commands.pot b/docs/pot/change-to-behavior-of-list-commands.pot deleted file mode 100644 index 4ecb6440471..00000000000 --- a/docs/pot/change-to-behavior-of-list-commands.pot +++ /dev/null @@ -1,170 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Change to Behavior of List Commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There was a major change in how our List* API commands work in CloudStack 3.0 compared to 2.2.x. The rules below apply only for managed resources – those that belong to an account, domain, or project. They are irrelevant for the List* commands displaying unmanaged (system) resources, such as hosts, clusters, and external network resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When no parameters are passed in to the call, the caller sees only resources owned by the caller (even when the caller is the administrator). Previously, the administrator saw everyone else's resources by default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When accountName and domainId are passed in:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The caller sees the resources dedicated to the account specified." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the call is executed by a regular user, the user is authorized to specify only the user's own account and domainId." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the caller is a domain administrator, CloudStack performs an authorization check to see whether the caller is permitted to view resources for the given account and domainId." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When projectId is passed in, only resources belonging to that project are listed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When domainId is passed in, the call returns only resources belonging to the domain specified. To see the resources of subdomains, use the parameter isRecursive=true. Again, the regular user can see only resources owned by that user, the root administrator can list anything, and a domain administrator is authorized to see only resources of the administrator's own domain and subdomains." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To see all resources the caller is authorized to see, except for Project resources, use the parameter listAll=true." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To see all Project resources the caller is authorized to see, use the parameter projectId=-1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is one API command that doesn't fall under the rules above completely: the listTemplates command. This command has its own flags defining the list rules:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTemplates Flag" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "featured" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Returns templates that have been marked as featured and public." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "self" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Returns templates that have been registered or created by the calling user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "selfexecutable" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Same as self, but only returns templates that are ready to be deployed with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "sharedexecutable" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ready templates that have been granted to the calling user by another user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "executable" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Templates that are owned by the calling user, or public templates, that can be used to deploy a new VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "community" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Returns templates that have been marked as public but not featured." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "all" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Returns all templates (only usable by admins)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; UI on a general view will display all resources that the logged-in user is authorized to see, except for project resources. To see the project resources, select the project view." -msgstr "" - diff --git a/docs/pot/changed-apicommands-4-0.pot b/docs/pot/changed-apicommands-4-0.pot deleted file mode 100644 index 7f0024f7824..00000000000 --- a/docs/pot/changed-apicommands-4-0.pot +++ /dev/null @@ -1,575 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changed API Commands in 4.0.0-incubating" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "API Commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "copyTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "prepareTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "registerTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateTemplate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "activateProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "suspendProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateProject" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjectAccounts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "migrateVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "attachVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "detachVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "uploadVolume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createSecurityGroup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "registerIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "copyIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createIpForwardingRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listIpForwardingRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createLoadBalancerRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateLoadBalancerRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createSnapshot" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have a single new response parameter, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many other commands also have the new tags(*) parameter in addition to other changes; those commands are listed separately." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rebootVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "attachIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "detachIso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLoadBalancerRuleInstances" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "resetPasswordForVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "changeServiceForVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "recoverVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "migrateVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deployVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "assignVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restoreVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "stopVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "destroyVirtualMachine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have two new response parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: keypair, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSecurityGroups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listFirewallRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPortForwardingRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSnapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listIsos" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTemplates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLoadBalancerRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have the following new parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: tags (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listF5LoadBalancerNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetscalerLoadBalancerNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSrxFirewallNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The commands in this list have three new response parameters, and no other changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: canusefordeploy, vpcid, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createZone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateZone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: localstorageenabled (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: localstorageenabled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listZones" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rebootRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "changeServiceForRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "destroyRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "stopRouter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcid, nic(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "disableAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listAccounts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "markDefaultZoneForAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enableAccount" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcavailable, vpclimit, vpctotal" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listRouters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: forvpc (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkOfferings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: forvpc (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: forvpc" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVolumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: details (optional), tags (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addTrafficMonitor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: excludezones (optional), includezones (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPublicIpAddresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: tags (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: vpcid, tags(*)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: canusefordeploy (optional), forvpc (optional), tags (optional), vpcid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "restartNetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enableStaticNat" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: networkid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createDiskOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameter: storagetype (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameter: storagetype" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listDiskOfferings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateDiskOffering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createFirewallRule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Changed request parameters: ipaddressid (old version - optional, new version - required)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVirtualMachines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New request parameters: isoid (optional), tags (optional), templateid (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateStorageNetworkIpRange" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "New response parameters: id, endip, gateway, netmask, networkid, podid, startip, vlan, zoneid" -msgstr "" - diff --git a/docs/pot/changing-root-password.pot b/docs/pot/changing-root-password.pot deleted file mode 100644 index 996cbc2fe08..00000000000 --- a/docs/pot/changing-root-password.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Root Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "During installation and ongoing cloud administration, you will need to log in to the UI as the root administrator. The root administrator account manages the &PRODUCT; deployment, including physical infrastructure. The root administrator can modify configuration settings to change basic functionality, create or delete user accounts, and take many actions that should be performed only by an authorized person. When first installing &PRODUCT;, be sure to change the default password to a new, unique value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Open your favorite Web browser and go to this URL. Substitute the IP address of your own Management Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://<management-server-ip-address>:8080/client" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the UI using the current root user ID and password. The default is admin, password." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the admin account name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View Users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the admin user name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Change Password button. change-password.png: button to change a user's password " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type the new password, and click OK." -msgstr "" - diff --git a/docs/pot/changing-secondary-storage-ip.pot b/docs/pot/changing-secondary-storage-ip.pot deleted file mode 100644 index c02be1a977d..00000000000 --- a/docs/pot/changing-secondary-storage-ip.pot +++ /dev/null @@ -1,62 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Making API Requests" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can change the secondary storage IP address after it has been provisioned. After changing the IP address on the host, log in to your management server and execute the following commands. Replace HOSTID below with your own value, and change the URL to use the appropriate IP address and path for your server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" # mysql -p\n" -" mysql> use cloud;\n" -" mysql> select id from host where type = 'SecondaryStorage';\n" -" mysql> update host_details set value = 'nfs://192.168.160.20/export/mike-ss1'\n" -" where host_id = HOSTID and name = 'orig.url';\n" -" mysql> update host set name = 'nfs://192.168.160.20/export/mike-ss1' where type\n" -" = 'SecondaryStorage' and id = #;\n" -" mysql> update host set url = 'nfs://192.168.160.20/export/mike-ss1' where type\n" -" = 'SecondaryStorage' and id = #;\n" -" mysql> update host set guid = 'nfs://192.168.160.20/export/mike-ss1' where type\n" -" = 'SecondaryStorage' and id = #;\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then log in to the cloud console UI and stop and start (not reboot) the Secondary Storage VM for that Zone." -msgstr "" - diff --git a/docs/pot/changing-secondary-storage-servers.pot b/docs/pot/changing-secondary-storage-servers.pot deleted file mode 100644 index 2a17400909a..00000000000 --- a/docs/pot/changing-secondary-storage-servers.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing Secondary Storage Servers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can change the secondary storage NFS mount. Perform the following steps to do so:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop all running Management Servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait 30 minutes. This allows any writes to secondary storage to complete." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy all files from the old secondary storage mount to the new." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the procedure above to change the IP address for secondary storage if required." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the Management Server." -msgstr "" - diff --git a/docs/pot/changing-service-offering-for-vm.pot b/docs/pot/changing-service-offering-for-vm.pot deleted file mode 100644 index cc0232f39cb..00000000000 --- a/docs/pot/changing-service-offering-for-vm.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Service Offering for a VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To upgrade or downgrade the level of compute resources available to a virtual machine, you can change the VM's compute offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the VM that you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Stop button to stop the VM StopButton.png: button to stop a VM " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Change Service button ChangeServiceButton.png: button to change the service of a VM . The Change service dialog box is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the offering you want." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/changing-vm-name-os-group.pot b/docs/pot/changing-vm-name-os-group.pot deleted file mode 100644 index cbf2c279e52..00000000000 --- a/docs/pot/changing-vm-name-os-group.pot +++ /dev/null @@ -1,90 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the VM Name, OS, or Group" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After a VM is created, you can modify the display name, operating system, and the group it belongs to." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To access a VM through the &PRODUCT; UI:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the VM that you want to modify." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Stop button to stop the VM StopButton.png: button to stop a VM " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Edit StopButton.png: button to edit the properties of a VM ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make the desired changes to the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Display name: Enter a new display name if you want to change the name of the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type: Select the desired operating system." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Group: Enter the group name for the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Apply." -msgstr "" - diff --git a/docs/pot/choosing-a-deployment-architecture.pot b/docs/pot/choosing-a-deployment-architecture.pot deleted file mode 100644 index a42b765b542..00000000000 --- a/docs/pot/choosing-a-deployment-architecture.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Choosing a Deployment Architecture" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The architecture used in a deployment will vary depending on the size and purpose of the deployment. This section contains examples of deployment architecture, including a small-scale deployment useful for test and trial deployments and a fully-redundant large-scale setup for production deployments." -msgstr "" - diff --git a/docs/pot/cisco3750-hardware.pot b/docs/pot/cisco3750-hardware.pot deleted file mode 100644 index f819da49919..00000000000 --- a/docs/pot/cisco3750-hardware.pot +++ /dev/null @@ -1,76 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Cisco 3750" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following steps show how a Cisco 3750 is configured for zone-level layer-3 switching. These steps assume VLAN 201 is used to route untagged private IPs for pod 1, and pod 1’s layer-2 switch is connected to GigabitEthernet1/0/1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Setting VTP mode to transparent allows us to utilize VLAN IDs above 1000. Since we only use VLANs up to 999, vtp transparent mode is not strictly required." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vtp mode transparent\n" -"vlan 200-999\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure GigabitEthernet1/0/1." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "interface GigabitEthernet1/0/1\n" -"switchport trunk encapsulation dot1q\n" -"switchport mode trunk\n" -"switchport trunk native vlan 201\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The statements configure GigabitEthernet1/0/1 as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 201 is the native untagged VLAN for port GigabitEthernet1/0/1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cisco passes all VLANs by default. As a result, all VLANs (300-999) are passed to all the pod-level layer-2 switches." -msgstr "" - diff --git a/docs/pot/cisco3750-layer2.pot b/docs/pot/cisco3750-layer2.pot deleted file mode 100644 index ad13cc66ea8..00000000000 --- a/docs/pot/cisco3750-layer2.pot +++ /dev/null @@ -1,66 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Cisco 3750" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following steps show how a Cisco 3750 is configured for pod-level layer-2 switching." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Setting VTP mode to transparent allows us to utilize VLAN IDs above 1000. Since we only use VLANs up to 999, vtp transparent mode is not strictly required." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vtp mode transparent\n" -"vlan 300-999\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure all ports to dot1q and set 201 as the native VLAN." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "interface range GigabitEthernet 1/0/1-24\n" -"switchport trunk encapsulation dot1q\n" -"switchport mode trunk\n" -"switchport trunk native vlan 201\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default, Cisco passes all VLANs. Cisco switches complain of the native VLAN IDs are different when 2 ports are connected together. That’s why you must specify VLAN 201 as the native VLAN on the layer-2 switch." -msgstr "" - diff --git a/docs/pot/citrix-xenserver-installation.pot b/docs/pot/citrix-xenserver-installation.pot deleted file mode 100644 index 1a54c152f73..00000000000 --- a/docs/pot/citrix-xenserver-installation.pot +++ /dev/null @@ -1,1195 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Citrix XenServer Installation for &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to use the Citrix XenServer hypervisor to run guest virtual machines, install XenServer 6.0 or XenServer 6.0.2 on the host(s) in your cloud. For an initial installation, follow the steps below. If you have previously installed XenServer and want to upgrade to another version, see ." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "System Requirements for XenServer Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host must be certified as compatible with one of the following. See the Citrix Hardware Compatibility Guide: http://hcl.xensource.com" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 5.6 SP2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 6.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 6.0.2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You must re-install Citrix XenServer if you are going to re-use a host from a previous install." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Must support HVM (Intel-VT or AMD-V enabled)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Must support HVM (Intel-VT or AMD-V enabled in BIOS)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "64-bit x86 CPU (more cores results in better performance)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hardware virtualization support required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4 GB of memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "36 GB of local disk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At least 1 NIC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Statically allocated IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The lack of up-do-date hotfixes can lead to data corruption and lost VMs." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "XenServer Installation Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From https://www.citrix.com/English/ss/downloads/, download the appropriate version of XenServer for your &PRODUCT; version (see ). Install it using the Citrix XenServer Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After installation, perform the following configuration steps, which are described in the next few sections:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Optional" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up SR if not using NFS, iSCSI, or local disk; see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure XenServer dom0 Memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see http://support.citrix.com/article/CTX126531. The article refers to XenServer 5.6, but the same information applies to XenServer 6.0." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Username and Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All XenServers in a cluster must have the same username and password as configured in &PRODUCT;." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Time Synchronization" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host must be set to use NTP. All hosts in a pod must have the same time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install NTP." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# yum install ntp" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the NTP configuration file to point to your NTP server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/ntp.conf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add one or more server lines in this file with the names of the NTP servers you want to use. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "server 0.xenserver.pool.ntp.org\n" -"server 1.xenserver.pool.ntp.org\n" -"server 2.xenserver.pool.ntp.org\n" -"server 3.xenserver.pool.ntp.org\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the NTP client." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service ntpd restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure NTP will start again upon reboot." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# chkconfig ntpd on" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Licensing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix XenServer Free version provides 30 days usage without a license. Following the 30 day trial, XenServer requires a free activation and license. You can choose to install a license now or skip this step. If you skip this step, you will need to install a license when you activate and license the XenServer." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Getting and Deploying a License" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you choose to install a license now you will need to use the XenCenter to activate and get a license." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In XenCenter, click Tools > License manager." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select your XenServer and select Activate Free XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Request a license." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can install the license with XenCenter or using the xe command line tool." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Install &PRODUCT; XenServer Support Package (CSP)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable security groups, elastic load balancing, and elastic IP on XenServer, download and install the &PRODUCT; XenServer Support Package (CSP). After installing XenServer, perform the following additional steps on each XenServer host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download the CSP software onto the XenServer host from one of the following links:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer 6.0.2:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://download.cloud.com/releases/3.0.1/XS-6.0.2/xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer 5.6 SP2:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://download.cloud.com/releases/2.2.0/xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer 6.0:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://download.cloud.com/releases/3.0/xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extract the file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# tar xf xenserver-cloud-supp.tgz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe-install-supplemental-pack xenserver-cloud-supp.iso" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the XenServer host is part of a zone that uses basic networking, disable Open vSwitch (OVS):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe-switch-network-backend bridge" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the host machine when prompted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The XenServer host is now ready to be added to &PRODUCT;." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Primary Storage Setup for XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; natively supports NFS, iSCSI and local storage. If you are using one of these storage types, there is no need to create the XenServer Storage Repository (\"SR\")." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If, however, you would like to use storage connected via some other technology, such as FiberChannel, you must set up the SR yourself. To do so, perform the following steps. If you have your hosts in a XenServer pool, perform the steps on the master node. If you are working with a single XenServer which is not part of a cluster, perform the steps on that XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Connect FiberChannel cable to all hosts in the cluster and to the FiberChannel storage host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Rescan the SCSI bus. Either use the following command or use XenCenter to perform an HBA rescan." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# scsi-rescan" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat step 2 on every host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check to be sure you see the new SCSI disk." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ls /dev/disk/by-id/scsi-360a98000503365344e6f6177615a516b -l" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The output should look like this, although the specific file name will be different (scsi-<scsiID>):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "lrwxrwxrwx 1 root root 9 Mar 16 13:47\n" -"/dev/disk/by-id/scsi-360a98000503365344e6f6177615a516b -> ../../sdc\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat step 4 on every host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the storage server, run this command to get a unique ID for the new SR." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# uuidgen" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The output should look like this, although the specific ID will be different:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "e6849e96-86c3-4f2c-8fcc-350cc711be3d" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create the FiberChannel SR. In name-label, use the unique ID you just generated." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# xe sr-create type=lvmohba shared=true\n" -"device-config:SCSIid=360a98000503365344e6f6177615a516b\n" -"name-label=\"e6849e96-86c3-4f2c-8fcc-350cc711be3d\"\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This command returns a unique ID for the SR, like the following example (your ID will be different):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "7a143820-e893-6c6a-236e-472da6ee66bf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a human-readable description for the SR, use the following command. In uuid, use the SR ID returned by the previous command. In name-description, set whatever friendly text you prefer." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe sr-param-set uuid=7a143820-e893-6c6a-236e-472da6ee66bf name-description=\"Fiber Channel storage repository\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make note of the values you will need when you add this storage to &PRODUCT; later (see ). In the Add Primary Storage dialog, in Protocol, you will choose PreSetup. In SR Name-Label, you will enter the name-label you set earlier (in this example, e6849e96-86c3-4f2c-8fcc-350cc711be3d)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) If you want to enable multipath I/O on a FiberChannel SAN, refer to the documentation provided by the SAN vendor." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "iSCSI Multipath Setup for XenServer (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When setting up the storage repository on a Citrix XenServer, you can enable multipath I/O, which uses redundant physical components to provide greater reliability in the connection between the server and the SAN. To enable multipathing, use a SAN solution that is supported for Citrix servers and follow the procedures in Citrix documentation. The following links provide a starting point:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://support.citrix.com/article/CTX118791" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://support.citrix.com/article/CTX125403" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can also ask your SAN vendor for advice about setting up your Citrix repository for multipathing." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make note of the values you will need when you add this storage to the &PRODUCT; later (see ). In the Add Primary Storage dialog, in Protocol, you will choose PreSetup. In SR Name-Label, you will enter the same name used to create the SR." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you encounter difficulty, address the support team for the SAN provided by your vendor. If they are not able to solve your issue, see Contacting Support." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Physical Networking Setup for XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once XenServer has been installed, you may need to do some additional network configuration. At this point in the installation, you should have a plan for what NICs the host will have and what traffic each NIC will carry. The NICs should be cabled as necessary to implement your plan." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you plan on using NIC bonding, the NICs on all hosts in the cluster must be cabled exactly the same. For example, if eth0 is in the private bond on one host in a cluster, then eth0 must be in the private bond on all hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP address assigned for the management network interface must be static. It can be set on the host itself or obtained via static DHCP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; configures network traffic of various types to use different NICs or bonds on the XenServer host. You can control this process and provide input to the Management Server through the use of XenServer network name labels. The name labels are placed on physical interfaces or bonds and configured in &PRODUCT;. In some simple cases the name labels are not required." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring Public Network with a Dedicated NIC for XenServer (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports the use of a second NIC (or bonded pair of NICs, described in ) for the public network. If bonding is not used, the public network can be on any NIC and can be on different NICs on the hosts in a cluster. For example, the public network can be on eth0 on node A and eth1 on node B. However, the XenServer name-label for the public network must be identical across all hosts. The following examples set the network label to \"cloud-public\". After the management server is installed and running you must configure it with the name of the chosen network label (e.g. \"cloud-public\"); this is discussed in ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using two NICs bonded together to create a public network, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using a single dedicated NIC to provide public network access, follow this procedure on each new host that is added to &PRODUCT; before adding the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run xe network-list and find the public network. This is usually attached to the NIC that is public. Once you find the network make note of its UUID. Call this <UUID-Public>." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe network-param-set name-label=cloud-public uuid=<UUID-Public>" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring Multiple Guest Networks for XenServer (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports the use of multiple guest networks with the XenServer hypervisor. Each network is assigned a name-label in XenServer. For example, you might have two networks with the labels \"cloud-guest\" and \"cloud-guest2\". After the management server is installed and running, you must add the networks and use these labels so that &PRODUCT; is aware of the networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Follow this procedure on each new host before adding the host to &PRODUCT;:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run xe network-list and find one of the guest networks. Once you find the network make note of its UUID. Call this <UUID-Guest>." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command, substituting your own name-label and uuid values." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe network-param-set name-label=<cloud-guestN> uuid=<UUID-Guest>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps for each additional guest network, using a different name-label and uuid each time." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Separate Storage Network for XenServer (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can optionally set up a separate storage network. This should be done first on the host, before implementing the bonding steps below. This can be done using one or two available NICs. With two NICs bonding may be done as above. It is the administrator's responsibility to set up a separate storage network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Give the storage network a different name-label than what will be given for other networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the separate storage network to work correctly, it must be the only interface that can ping the primary storage device's IP address. For example, if eth0 is the management network NIC, ping -I eth0 <primary storage device IP> must fail. In all deployments, secondary storage devices must be pingable from the management network NIC or bond. If a secondary storage device has been placed on the storage network, it must also be pingable via the storage network NIC or bond on the hosts as well." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can set up two separate storage networks as well. For example, if you intend to implement iSCSI multipath, dedicate two non-bonded NICs to multipath. Each of the two networks needs a unique name-label." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If no bonding is done, the administrator must set up and name-label the separate storage network on all hosts (masters and slaves)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Here is an example to set up eth5 to access a storage network on 172.16.0.0/24." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# xe pif-list host-name-label='hostname' device=eth5\n" -"uuid(RO): ab0d3dd4-5744-8fae-9693-a022c7a3471d\n" -"device ( RO): eth5\n" -"#xe pif-reconfigure-ip DNS=172.16.3.3 gateway=172.16.0.1 IP=172.16.0.55 mode=static netmask=255.255.255.0 uuid=ab0d3dd4-5744-8fae-9693-a022c7a3471d" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "NIC Bonding for XenServer (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer supports Source Level Balancing (SLB) NIC bonding. Two NICs can be bonded together to carry public, private, and guest traffic, or some combination of these. Separate storage networks are also possible. Here are some example supported configurations:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2 NICs on private, 2 NICs on public, 2 NICs on storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2 NICs on private, 1 NIC on public, storage uses management network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2 NICs on private, 2 NICs on public, storage uses management network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "1 NIC for private, public, and storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All NIC bonding is optional." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer expects all nodes in a cluster will have the same network cabling and same bonds implemented. In an installation the master will be the first host that was added to the cluster and the slave hosts will be all subsequent hosts added to the cluster. The bonds present on the master set the expectation for hosts added to the cluster later. The procedure to set up bonds on the master and slaves are different, and are described below. There are several important implications of this:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You must set bonds on the first host added to a cluster. Then you must use xe commands as below to establish the same bonds in the second and subsequent hosts added to a cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Slave hosts in a cluster must be cabled exactly the same as the master. For example, if eth0 is in the private bond on the master, it must be in the management network for added slave hosts." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Management Network Bonding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator must bond the management network NICs prior to adding the host to &PRODUCT;." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating a Private Bond on the First Host in the Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the following steps to create a bond in XenServer. These steps should be run on only the first host in a cluster. This example creates the cloud-private network with two physical NICs (eth0 and eth1) bonded into it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Find the physical NICs that you want to bond together." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe pif-list host-name-label='hostname' device=eth0\n" -"# xe pif-list host-name-label='hostname' device=eth1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These command shows the eth0 and eth1 NICs and their UUIDs. Substitute the ethX devices of your choice. Call the UUID's returned by the above command slave1-UUID and slave2-UUID." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a new network for the bond. For example, a new network with name \"cloud-private\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This label is important. &PRODUCT; looks for a network by a name you configure. You must use the same name-label for all hosts in the cloud for the management network." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe network-create name-label=cloud-private\n" -"# xe bond-create network-uuid=[uuid of cloud-private created above]\n" -"pif-uuids=[slave1-uuid],[slave2-uuid]" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now you have a bonded pair that can be recognized by &PRODUCT; as the management network." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Public Network Bonding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Bonding can be implemented on a separate, public network. The administrator is responsible for creating a bond for the public network if that network will be bonded and will be separate from the management network." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating a Public Bond on the First Host in the Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These steps should be run on only the first host in a cluster. This example creates the cloud-public network with two physical NICs (eth2 and eth3) bonded into it." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#xe pif-list host-name-label='hostname' device=eth2\n" -"# xe pif-list host-name-label='hostname' device=eth3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These command shows the eth2 and eth3 NICs and their UUIDs. Substitute the ethX devices of your choice. Call the UUID's returned by the above command slave1-UUID and slave2-UUID." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a new network for the bond. For example, a new network with name \"cloud-public\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This label is important. &PRODUCT; looks for a network by a name you configure. You must use the same name-label for all hosts in the cloud for the public network." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe network-create name-label=cloud-public\n" -"# xe bond-create network-uuid=[uuid of cloud-public created above]\n" -"pif-uuids=[slave1-uuid],[slave2-uuid]" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now you have a bonded pair that can be recognized by &PRODUCT; as the public network." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Adding More Hosts to the Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With the bonds (if any) established on the master, you should add additional, slave hosts. Run the following command for all additional hosts to be added to the cluster. This will cause the host to join the master in a single XenServer pool." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe pool-join master-address=[master IP] master-username=root\n" -"master-password=[your password]" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Complete the Bonding Setup Across the Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With all hosts added to the pool, run the cloud-setup-bond script. This script will complete the configuration and set up of the bonds across all hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the script from the Management Server in /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/cloud-setup-bonding.sh to the master host and ensure it is executable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ./cloud-setup-bonding.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now the bonds are set up and configured properly across the cluster." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Upgrading XenServer Versions" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section tells how to upgrade XenServer software on &PRODUCT; hosts. The actual upgrade is described in XenServer documentation, but there are some additional steps you must perform before and after the upgrade." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure the hardware is certified compatible with the new version of XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To upgrade XenServer:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upgrade the database. On the Management Server node:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Back up the database:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mysqldump --user=root --databases cloud > cloud.backup.sql\n" -"# mysqldump --user=root --databases cloud_usage > cloud_usage.backup.sql" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You might need to change the OS type settings for VMs running on the upgraded hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you upgraded from XenServer 5.6 GA to XenServer 5.6 SP2, change any VMs that have the OS type CentOS 5.5 (32-bit), Oracle Enterprise Linux 5.5 (32-bit), or Red Hat Enterprise Linux 5.5 (32-bit) to Other Linux (32-bit). Change any VMs that have the 64-bit versions of these same OS types to Other Linux (64-bit)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you upgraded from XenServer 5.6 SP2 to XenServer 6.0.2, change any VMs that have the OS type CentOS 5.6 (32-bit), CentOS 5.7 (32-bit), Oracle Enterprise Linux 5.6 (32-bit), Oracle Enterprise Linux 5.7 (32-bit), Red Hat Enterprise Linux 5.6 (32-bit) , or Red Hat Enterprise Linux 5.7 (32-bit) to Other Linux (32-bit). Change any VMs that have the 64-bit versions of these same OS types to Other Linux (64-bit)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you upgraded from XenServer 5.6 to XenServer 6.0.2, do all of the above." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server and Usage Server. You only need to do this once for all clusters." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management start\n" -"# service cloud-usage start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disconnect the XenServer cluster from &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Navigate to the XenServer cluster, and click Actions – Unmanage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the cluster status until it shows Unmanaged." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to one of the hosts in the cluster, and run this command to clean up the VLAN:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# . /opt/xensource/bin/cloud-clean-vlan.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Still logged in to the host, run the upgrade preparation script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /opt/xensource/bin/cloud-prepare-upgrade.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Troubleshooting: If you see the error \"can't eject CD,\" log in to the VM and umount the CD, then run the script again." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upgrade the XenServer software on all hosts in the cluster. Upgrade the master first." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Live migrate all VMs on this host to other hosts. See the instructions for live migration in the Administrator's Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Troubleshooting: You might see the following error when you migrate a VM:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "[root@xenserver-qa-2-49-4 ~]# xe vm-migrate live=true host=xenserver-qa-2-49-5 vm=i-2-8-VM\n" -"You attempted an operation on a VM which requires PV drivers to be installed but the drivers were not detected.\n" -"vm: b6cf79c8-02ee-050b-922f-49583d9f1a14 (i-2-8-VM)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To solve this issue, run the following:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /opt/xensource/bin/make_migratable.sh b6cf79c8-02ee-050b-922f-49583d9f1a14" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reboot the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upgrade to the newer version of XenServer. Use the steps in XenServer documentation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the upgrade is complete, copy the following files from the management server to this host, in the directory locations shown below:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy this Management Server file..." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "...to this location on the XenServer host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/xenserver60/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/sm/NFSSR.py" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/make_migratable.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/cloud-clean-vlan.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "/opt/xensource/bin/cloud-clean-vlan.sh" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /opt/xensource/bin/setupxenserver.sh" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Troubleshooting: If you see the following error message, you can safely ignore it." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mv: cannot stat `/etc/cron.daily/logrotate': No such file or directory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Plug in the storage repositories (physical block devices) to the XenServer host:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# for pbd in `xe pbd-list currently-attached=false| grep ^uuid | awk '{print $NF}'`; do xe pbd-plug uuid=$pbd ; done" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note: If you add a host to this XenServer pool, you need to migrate all VMs on this host to other hosts, and eject this host from XenServer pool." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps to upgrade every host in the cluster to the same version of XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command on one host in the XenServer cluster to clean up the host tags:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# for host in $(xe host-list | grep ^uuid | awk '{print $NF}') ; do xe host-param-clear uuid=$host param-name=tags; done;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reconnect the XenServer cluster to &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Navigate to the XenServer cluster, and click Actions – Manage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the status to see that all the hosts come up." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After all hosts are up, run the following on one host in the cluster:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /opt/xensource/bin/cloud-clean-vlan.sh" -msgstr "" - diff --git a/docs/pot/cloud-infrastructure-concepts.pot b/docs/pot/cloud-infrastructure-concepts.pot deleted file mode 100644 index 8fdb0f850de..00000000000 --- a/docs/pot/cloud-infrastructure-concepts.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Cloud Infrastructure Concepts" -msgstr "" - diff --git a/docs/pot/cloud-infrastructure-overview.pot b/docs/pot/cloud-infrastructure-overview.pot deleted file mode 100644 index 61f33864915..00000000000 --- a/docs/pot/cloud-infrastructure-overview.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Cloud Infrastructure Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server manages one or more zones (typically, datacenters) containing host computers where guest virtual machines will run. The cloud infrastructure is organized as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone: Typically, a zone is equivalent to a single datacenter. A zone consists of one or more pods and secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pod: A pod is usually one rack of hardware that includes a layer-2 switch and one or more clusters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster: A cluster consists of one or more hosts and primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host: A single compute node within a cluster. The hosts are where the actual cloud services run in the form of guest virtual machines." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Primary storage is associated with a cluster, and it stores the disk volumes for all the VMs running on hosts in that cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage is associated with a zone, and it stores templates, ISO images, and disk volume snapshots." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "More Information" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see documentation on cloud infrastructure concepts." -msgstr "" - diff --git a/docs/pot/cloudstack-api.pot b/docs/pot/cloudstack-api.pot deleted file mode 100644 index 762ea4a4e0b..00000000000 --- a/docs/pot/cloudstack-api.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; API is a low level API that has been used to implement the &PRODUCT; web UIs. It is also a good basis for implementing other popular APIs such as EC2/S3 and emerging DMTF standards." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Many &PRODUCT; API calls are asynchronous. These will return a Job ID immediately when called. This Job ID can be used to query the status of the job later. Also, status calls on impacted resources will provide some indication of their state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The API has a REST-like query basis and returns results in XML or JSON." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See the Developer’s Guide and the API Reference." -msgstr "" - diff --git a/docs/pot/cloudstack.pot b/docs/pot/cloudstack.pot deleted file mode 100644 index 2ebfd4eaa4b..00000000000 --- a/docs/pot/cloudstack.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "&PRODUCT; Complete Documentation" -msgstr "" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Complete documentation for &PRODUCT;." -msgstr "" - diff --git a/docs/pot/cluster-add.pot b/docs/pot/cluster-add.pot deleted file mode 100644 index f7601417591..00000000000 --- a/docs/pot/cluster-add.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Cluster" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to tell &PRODUCT; about the hosts that it will manage. Hosts exist inside clusters, so before you begin adding hosts to the cloud, you must add at least one cluster." -msgstr "" - diff --git a/docs/pot/compatibility-matrix.pot b/docs/pot/compatibility-matrix.pot deleted file mode 100644 index 0b03bdddde0..00000000000 --- a/docs/pot/compatibility-matrix.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Compatibility Matrix" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 2.1.x" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 2.2.x" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 3.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 3.0.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 3.0.2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack 3.0.3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 5.6" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "No" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 5.6 FP1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 5.6 SP2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 6.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 6.0.2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer 6.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM (RHEL 6.0 or 6.1)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware (vSphere and vCenter, both version 4.1)" -msgstr "" - diff --git a/docs/pot/compute-disk-service-offerings.pot b/docs/pot/compute-disk-service-offerings.pot deleted file mode 100644 index 93a78d389d2..00000000000 --- a/docs/pot/compute-disk-service-offerings.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Compute and Disk Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A service offering is a set of virtual hardware features such as CPU core count and speed, memory, and disk size. The &PRODUCT; administrator can set up various offerings, and then end users choose from the available offerings when they create a new VM. A service offering includes the following elements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU, memory, and network resource guarantees" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How resources are metered" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How the resource usage is charged" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How often the charges are generated" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, one service offering might allow users to create a virtual machine instance that is equivalent to a 1 GHz Intel® Coreâ„¢ 2 CPU, with 1 GB memory at $0.20/hour, with network traffic metered at $0.10/GB. Based on the user’s selected offering, &PRODUCT; emits usage records that can be integrated with billing systems. &PRODUCT; separates service offerings into compute offerings and disk offerings. The computing service offering specifies:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest CPU" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest RAM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Networking type (virtual or direct)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags on the root disk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The disk offering specifies:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk size (optional). An offering without a disk size will allow users to pick their own" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags on the data disk" -msgstr "" - diff --git a/docs/pot/concepts.pot b/docs/pot/concepts.pot deleted file mode 100644 index ed6f2ab0c78..00000000000 --- a/docs/pot/concepts.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Concepts" -msgstr "" - diff --git a/docs/pot/configure-acl.pot b/docs/pot/configure-acl.pot deleted file mode 100644 index 64e5c96acf0..00000000000 --- a/docs/pot/configure-acl.pot +++ /dev/null @@ -1,165 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring Access Control List" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Define Network Access Control List (ACL) on the VPC virtual router to control incoming (ingress) and outgoing (egress) traffic between the VPC tiers, and the tiers and Internet. By default, all incoming and outgoing traffic to the guest networks is blocked. To open the ports, you must create a new network ACL. The network ACLs can be created for the tiers only if the NetworkACL service is supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Network ACLs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Network ACLs page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Network ACLs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add an ACL rule, fill in the following fields to specify what kind of network traffic is allowed in this tier." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CIDR: The CIDR acts as the Source CIDR for the Ingress rules, and Destination CIDR for the Egress rules. To accept traffic only from or to the IP addresses within a particular address block, enter a CIDR or a comma-separated list of CIDRs. The CIDR is the base IP address of the incoming traffic. For example, 192.168.0.0/22. To allow all CIDRs, set to 0.0.0.0/0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol: The networking protocol that sources use to send traffic to the tier. The TCP and UDP protocols are typically used for data exchange and end-user communications. The ICMP protocol is typically used to send error messages or network monitoring data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start Port, End Port (TCP, UDP only): A range of listening ports that are the destination for the incoming traffic. If you are opening a single port, use the same number in both fields." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Tier: Select the tier for which you want to add this ACL rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ICMP Type, ICMP Code (ICMP only): The type of message and error code that will be sent." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Traffic Type: Select the traffic type you want to apply." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Egress: To add an egress rule, select Egress from the Traffic type drop-down box and click Add. This specifies what type of traffic is allowed to be sent out of VM instances in this tier. If no egress rules are specified, all traffic from the tier is allowed out at the VPC virtual router. Once egress rules are specified, only the traffic specified in egress rules and the responses to any traffic that has been allowed in through an ingress rule are allowed out. No egress rule is required for the VMs in a tier to communicate with each other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ingress: To add an ingress rule, select Ingress from the Traffic type drop-down box and click Add. This specifies what network traffic is allowed into the VM instances in this tier. If no ingress rules are specified, then no traffic will be allowed in, except for responses to any traffic that has been allowed out through an egress rule." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default, all incoming and outgoing traffic to the guest networks is blocked. To open the ports, create a new network ACL." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add. The ACL rule is added." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To view the list of ACL rules you have added, click the desired tier from the Network ACLs page, then select the Network ACL tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can edit the tags assigned to the ACL rules and delete the ACL rules you have created. Click the appropriate button in the Actions column." -msgstr "" - diff --git a/docs/pot/configure-guest-traffic-in-advanced-zone.pot b/docs/pot/configure-guest-traffic-in-advanced-zone.pot deleted file mode 100644 index eedfdcb1a92..00000000000 --- a/docs/pot/configure-guest-traffic-in-advanced-zone.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure Guest Traffic in an Advanced Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These steps assume you have already logged in to the &PRODUCT; UI. To configure the base guest network:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. On Zones, click View More, then click the zone to which you want to add a network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Network tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add guest network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Add guest network window is displayed:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the network. This will be user-visible" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Display Text: The description of the network. This will be user-visible" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone: The zone in which you are configuring the guest network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network offering: If the administrator has configured multiple network offerings, select the one you want to use for this network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Gateway: The gateway that the guests should use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Netmask: The netmask in use on the subnet the guests will use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/configure-package-repository.pot b/docs/pot/configure-package-repository.pot deleted file mode 100644 index c0ee374254a..00000000000 --- a/docs/pot/configure-package-repository.pot +++ /dev/null @@ -1,131 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure package repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; is only distributed from source from the official mirrors. However, members of the CloudStack community may build convenience binaries so that users can install Apache CloudStack without needing to build from source." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you didn't follow the steps to build your own packages from source in the sections for or you may find pre-built DEB and RPM packages for your convience linked from the downloads page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These repositories contain both the Management Server and KVM Hypervisor packages." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "DEB package repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can add a DEB package repository to your apt sources with the following commands. Please note that only packages for Ubuntu 12.04 LTS (precise) are being built at this time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use your preferred editor and open (or create) /etc/apt/sources.list.d/cloudstack.list. Add the community provided repository to the file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "deb http://cloudstack.apt-get.eu/ubuntu precise 4.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We now have to add the public key to the trusted keys." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ wget -O - http://cloudstack.apt-get.eu/release.asc|apt-key add -" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now update your local apt cache." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ apt-get update" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Your DEB package repository should now be configured and ready for use." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "RPM package repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is a RPM package repository for &PRODUCT; so you can easily install on RHEL based platforms." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you're using an RPM-based system, you'll want to add the Yum repository so that you can install &PRODUCT; with Yum." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yum repository information is found under /etc/yum.repos.d. You'll see several .repo files in this directory, each one denoting a specific repository." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add the &PRODUCT; repository, create /etc/yum.repos.d/cloudstack.repo and insert the following information." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"[cloudstack]\n" -"name=cloudstack\n" -"baseurl=http://cloudstack.apt-get.eu/rhel/4.1/\n" -"enabled=1\n" -"gpgcheck=0\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now you should be able to install CloudStack using Yum." -msgstr "" - diff --git a/docs/pot/configure-public-traffic-in-an-advanced-zone.pot b/docs/pot/configure-public-traffic-in-an-advanced-zone.pot deleted file mode 100644 index e69b5f4be9e..00000000000 --- a/docs/pot/configure-public-traffic-in-an-advanced-zone.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure Public Traffic in an Advanced Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a zone that uses advanced networking, you need to configure at least one range of IP addresses for Internet traffic." -msgstr "" - diff --git a/docs/pot/configure-snmp-rhel.pot b/docs/pot/configure-snmp-rhel.pot deleted file mode 100644 index d614a53aad7..00000000000 --- a/docs/pot/configure-snmp-rhel.pot +++ /dev/null @@ -1,143 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:57\n" -"PO-Revision-Date: 2013-02-02T20:11:57\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring SNMP Community String on a RHEL Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The SNMP Community string is similar to a user id or password that provides access to a network device, such as router. This string is sent along with all SNMP requests. If the community string is correct, the device responds with the requested information. If the community string is incorrect, the device discards the request and does not respond." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The NetScaler device uses SNMP to communicate with the VMs. You must install SNMP and configure SNMP Community string for a secure communication between the NetScaler device and the RHEL machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that you installed SNMP on RedHat. If not, run the following command:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "yum install net-snmp-utils" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the /etc/snmp/snmpd.conf file to allow the SNMP polling from the NetScaler device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Map the community name into a security name (local and mynetwork, depending on where the request is coming from):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use a strong password instead of public when you edit the following table." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "# sec.name source community\n" -"com2sec local localhost public\n" -"com2sec mynetwork 0.0.0.0 public" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Setting to 0.0.0.0 allows all IPs to poll the NetScaler server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Map the security names into group names:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "# group.name sec.model sec.name\n" -"group MyRWGroup v1 local\n" -"group MyRWGroup v2c local\n" -"group MyROGroup v1 mynetwork\n" -"group MyROGroup v2c mynetwork" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a view to allow the groups to have the permission to:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "incl/excl subtree mask view all included .1 " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Grant access with different write permissions to the two groups to the view you created." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "# context sec.model sec.level prefix read write notif\n" -" access MyROGroup \"\" any noauth exact all none none\n" -" access MyRWGroup \"\" any noauth exact all all all " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unblock SNMP in iptables." -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "iptables -A INPUT -p udp --dport 161 -j ACCEPT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the SNMP service:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "service snmpd start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the SNMP service is started automatically during the system startup:" -msgstr "" - -#. Tag: screen -#, no-c-format -msgid "chkconfig snmpd on" -msgstr "" - diff --git a/docs/pot/configure-usage-server.pot b/docs/pot/configure-usage-server.pot deleted file mode 100644 index deadcad6a3a..00000000000 --- a/docs/pot/configure-usage-server.pot +++ /dev/null @@ -1,230 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring the Usage Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the usage server:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure the Usage Server has been installed. This requires extra steps beyond just installing the &PRODUCT; software. See Installing the Usage Server (Optional) in the Advanced Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Search, type usage. Find the configuration parameter that controls the behavior you want to set. See the table below for a description of the available parameters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Actions, click the Edit icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type the desired value and click the Save icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server (as usual with any global configuration change) and also the Usage Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart\n" -"# service cloud-usage restart \n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table shows the global configuration settings that control the behavior of the Usage Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Parameter Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enable.usage.server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Whether the Usage Server is active." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.aggregation.timezone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Time zone of usage records. Set this if the usage records and daily job execution are in different time zones. For example, with the following settings, the usage job will run at PST 00:15 and generate usage records for the 24 hours from 00:00:00 GMT to 23:59:59 GMT:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "usage.stats.job.exec.time = 00:15 \n" -"usage.execution.timezone = PST\n" -"usage.aggregation.timezone = GMT\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Valid values for the time zone are specified in " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default: GMT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.execution.timezone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The time zone of usage.stats.job.exec.time. Valid values for the time zone are specified in " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default: The time zone of the management server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.sanity.check.interval" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The number of days between sanity checks. Set this in order to periodically search for records with erroneous data before issuing customer invoices. For example, this checks for VM usage records created after the VM was destroyed, and similar checks for templates, volumes, and so on. It also checks for usage times longer than the aggregation range. If any issue is found, the alert ALERT_TYPE_USAGE_SANITY_RESULT = 21 is sent." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.stats.job.aggregation.range" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The time period in minutes between Usage Server processing jobs. For example, if you set it to 1440, the Usage Server will run once per day. If you set it to 600, it will run every ten hours. In general, when a Usage Server job runs, it processes all events generated since usage was last run." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is special handling for the case of 1440 (once per day). In this case the Usage Server does not necessarily process all records since Usage was last run. &PRODUCT; assumes that you require processing once per day for the previous, complete day’s records. For example, if the current day is October 7, then it is assumed you would like to process records for October 6, from midnight to midnight. &PRODUCT; assumes this \"midnight to midnight\" is relative to the usage.execution.timezone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default: 1440" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.stats.job.exec.time" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The time when the Usage Server processing will start. It is specified in 24-hour format (HH:MM) in the time zone of the server, which should be GMT. For example, to start the Usage job at 10:30 GMT, enter \"10:30\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If usage.stats.job.aggregation.range is also set, and its value is not 1440, then its value will be added to usage.stats.job.exec.time to get the time to run the Usage Server job again. This is repeated until 24 hours have elapsed, and the next day's processing begins again at usage.stats.job.exec.time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default: 00:15." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, suppose that your server is in GMT, your user population is predominantly in the East Coast of the United States, and you would like to process usage records every night at 2 AM local (EST) time. Choose these settings:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "enable.usage.server = true" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.execution.timezone = America/New_York" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.stats.job.exec.time = 07:00. This will run the Usage job at 2:00 AM EST. Note that this will shift by an hour as the East Coast of the U.S. enters and exits Daylight Savings Time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage.stats.job.aggregation.range = 1440" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With this configuration, the Usage job will run every night at 2 AM EST and will process records for the previous day’s midnight-midnight as defined by the EST (America/New_York) time zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Because the special value 1440 has been used for usage.stats.job.aggregation.range, the Usage Server will ignore the data between midnight and 2 AM. That data will be included in the next day's run" -msgstr "" - diff --git a/docs/pot/configure-virtual-router.pot b/docs/pot/configure-virtual-router.pot deleted file mode 100644 index 616d8e0669c..00000000000 --- a/docs/pot/configure-virtual-router.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring the Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can set the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP range" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported network services" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default domain name for the network serviced by the virtual router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway IP address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How often &PRODUCT; fetches network usage statistics from &PRODUCT; virtual routers. If you want to collect traffic metering data from the virtual router, set the global configuration parameter router.stats.interval. If you are not using the virtual router to gather network usage statistics, set it to 0." -msgstr "" - diff --git a/docs/pot/configure-vpc.pot b/docs/pot/configure-vpc.pot deleted file mode 100644 index 13d1bd7242e..00000000000 --- a/docs/pot/configure-vpc.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring a Virtual Private Cloud" -msgstr "" - diff --git a/docs/pot/configure-vpn.pot b/docs/pot/configure-vpn.pot deleted file mode 100644 index 9d4e798860f..00000000000 --- a/docs/pot/configure-vpn.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up VPN for the cloud:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the following global configuration parameters." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "remote.access.vpn.client.ip.range – The range of IP addressess to be allocated to remote access VPN clients. The first IP in the range is used by the VPN server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "remote.access.vpn.psk.length – Length of the IPSec key." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "remote.access.vpn.user.limit – Maximum number of VPN users per account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable VPN for a particular network:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as a user or administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click one of the displayed IP address names." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Enable VPN button AttachDiskButton.png: button to attach a volume ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IPsec key is displayed in a popup window." -msgstr "" - diff --git a/docs/pot/configure-xenserver-dom0-memory.pot b/docs/pot/configure-xenserver-dom0-memory.pot deleted file mode 100644 index 33e6b06166a..00000000000 --- a/docs/pot/configure-xenserver-dom0-memory.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure XenServer dom0 Memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the XenServer dom0 settings to allocate more memory to dom0. This can enable XenServer to handle larger numbers of virtual machines. We recommend 2940 MB of RAM for XenServer dom0. For instructions on how to do this, see Citrix Knowledgebase Article.The article refers to XenServer 5.6, but the same information applies to XenServer 6" -msgstr "" - diff --git a/docs/pot/configuring-projects.pot b/docs/pot/configuring-projects.pot deleted file mode 100644 index 46b2193e05d..00000000000 --- a/docs/pot/configuring-projects.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring Projects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before &PRODUCT; users start using projects, the &PRODUCT; administrator must set up various systems to support them, including membership invitations, limits on project resources, and controls on who can create projects." -msgstr "" - diff --git a/docs/pot/console-proxy.pot b/docs/pot/console-proxy.pot deleted file mode 100644 index 6117a8680db..00000000000 --- a/docs/pot/console-proxy.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Console Proxy" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Console Proxy is a type of System Virtual Machine that has a role in presenting a console view via the web UI. It connects the user’s browser to the VNC port made available via the hypervisor for the console of the guest. Both the administrator and end user web UIs offer a console connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Clicking on a console icon brings up a new window. The AJAX code downloaded into that window refers to the public IP address of a console proxy VM. There is exactly one public IP address allocated per console proxy VM. The AJAX application connects to this IP. The console proxy then proxies the connection to the VNC port for the requested VM on the Host hosting the guest. ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The hypervisors will have many ports assigned to VNC usage so that multiple VNC sessions can occur simultaneously." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is never any traffic to the guest virtual IP, and there is no need to enable VNC within the guest." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The console proxy VM will periodically report its active session count to the Management Server. The default reporting interval is five seconds. This can be changed through standard Management Server configuration with the parameter consoleproxy.loadscan.interval." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Assignment of guest VM to console proxy is determined by first determining if the guest VM has a previous session associated with a console proxy. If it does, the Management Server will assign the guest VM to the target Console Proxy VM regardless of the load on the proxy VM. Failing that, the first available running Console Proxy VM that has the capacity to handle new sessions is used." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Console proxies can be restarted by administrators but this will interrupt existing console sessions for users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The console viewing functionality uses a dynamic DNS service under the domain name realhostip.com to assist in providing SSL security to console sessions. The console proxy is assigned a public IP address. In order to avoid browser warnings for mismatched SSL certificates, the URL for the new console window is set to the form of https://aaa-bbb-ccc-ddd.realhostip.com. Customers will see this URL during console session creation. &PRODUCT; includes the realhostip.com SSL certificate in the console proxy VM. Of course, &PRODUCT; cannot know about DNS A records for our customers' public IPs prior to shipping the software. &PRODUCT; therefore runs a dynamic DNS server that is authoritative for the realhostip.com domain. It maps the aaa-bbb-ccc-ddd part of the DNS name to the IP address aaa.bbb.ccc.ddd on lookups. This allows the browser to correctly connect to the console proxy's public IP, where it then expects and receives a SSL certificate for realhostip.com, and SSL is set up without browser warnings." -msgstr "" - diff --git a/docs/pot/convert-hyperv-vm-to-template.pot b/docs/pot/convert-hyperv-vm-to-template.pot deleted file mode 100644 index c4f6f380160..00000000000 --- a/docs/pot/convert-hyperv-vm-to-template.pot +++ /dev/null @@ -1,170 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Converting a Hyper-V VM to a Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To convert a Hyper-V VM to a XenServer-compatible &PRODUCT; template, you will need a standalone XenServer host with an attached NFS VHD SR. Use whatever XenServer version you are using with &PRODUCT;, but use XenCenter 5.6 FP1 or SP2 (it is backwards compatible to 5.6). Additionally, it may help to have an attached NFS ISO SR." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For Linux VMs, you may need to do some preparation in Hyper-V before trying to get the VM to work in XenServer. Clone the VM and work on the clone if you still want to use the VM in Hyper-V. Uninstall Hyper-V Integration Components and check for any references to device names in /etc/fstab:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the linux_ic/drivers/dist directory, run make uninstall (where \"linux_ic\" is the path to the copied Hyper-V Integration Components files)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restore the original initrd from backup in /boot/ (the backup is named *.backup0)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remove the \"hdX=noprobe\" entries from /boot/grub/menu.lst." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check /etc/fstab for any partitions mounted by device name. Change those entries (if any) to mount by LABEL or UUID (get that information with the \"blkid\" command).." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The next step is make sure the VM is not running in Hyper-V, then get the VHD into XenServer. There are two options for doing this." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Option one:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Import the VHD using XenCenter. In XenCenter, go to Tools>Virtual Appliance Tools>Disk Image Import." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the VHD, then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name the VM, choose the NFS VHD SR under Storage, enable \"Run Operating System Fixups\" and choose the NFS ISO SR." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Next, then Finish. A VM should be created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Option two" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run XenConvert, under From choose VHD, under To choose XenServer. Click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Input the XenServer host info, then click Next." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name the VM, then click Next, then Convert. A VM should be created" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once you have a VM created from the Hyper-V VHD, prepare it using the following steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Boot the VM, uninstall Hyper-V Integration Services, and reboot." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install XenServer Tools, then reboot." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the VM as desired. For example, run sysprep on Windows VMs. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Either option above will create a VM in HVM mode. This is fine for Windows VMs, but Linux VMs may not perform optimally. Converting a Linux VM to PV mode will require additional steps and will vary by distribution." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shut down the VM and copy the VHD from the NFS storage to a web server; for example, mount the NFS share on the web server and copy it, or from the XenServer host use sftp or scp to upload it to the web server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In &PRODUCT;, create a new template using the following values:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL. Give the URL for the VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type. Use the appropriate OS. For PV mode on CentOS, choose Other PV (32-bit) or Other PV (64-bit). This choice is available only for XenServer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format. VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The template will be created, and you can create instances from it." -msgstr "" - diff --git a/docs/pot/create-bare-metal-template.pot b/docs/pot/create-bare-metal-template.pot deleted file mode 100644 index 09ee1bcbecd..00000000000 --- a/docs/pot/create-bare-metal-template.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a Bare Metal Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Beta feature. Untested in &PRODUCT; 3.0.3. Provided without guarantee of performance." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you can create a bare metal template, you must have performed several other installation and setup steps to create a bare metal cluster and environment. See Bare Metal Installation in the Installation Guide. It is assumed you already have a directory named \"win7_64bit\" on your CIFS server, containing the image for the bare metal instance. This directory and image are set up as part of the Bare Metal Installation procedure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create Template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog box, enter the following values." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Short name for the template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Display Text. Description of the template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL. The directory name which contains image file on your CIFS server. For example, win7_64bit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone. All Zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type. Select the OS type of the ISO image. Choose other if the OS Type of the ISO is not listed or if the ISO is not bootable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor. BareMetal." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format. BareMetal." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password Enabled. No." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. No." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Featured. Choose Yes if you would like this template to be more prominent for users to select. Only administrators may make templates featured." -msgstr "" - diff --git a/docs/pot/create-new-projects.pot b/docs/pot/create-new-projects.pot deleted file mode 100644 index de1b6300a5e..00000000000 --- a/docs/pot/create-new-projects.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New Project" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; administrators and domain administrators can create projects. If the global configuration parameter allow.user.create.projects is set to true, end users can also create projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select view, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click New Project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Give the project a name and description for display to users, then click Create Project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A screen appears where you can immediately add more members to the project. This is optional. Click Next when you are ready to move on." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Save." -msgstr "" - diff --git a/docs/pot/create-template-from-existing-vm.pot b/docs/pot/create-template-from-existing-vm.pot deleted file mode 100644 index fbb0012970a..00000000000 --- a/docs/pot/create-template-from-existing-vm.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a Template from an Existing Virtual Machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once you have at least one VM set up in the way you want, you can use it as the prototype for other VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create and start a virtual machine using any of the techniques given in ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make any desired configuration changes on the running VM, then click Stop." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait for the VM to stop. When the status shows Stopped, go to the next step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create Template and provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name and Display Text. These will be shown in the UI, so choose something descriptive." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type. This helps &PRODUCT; and the hypervisor perform certain operations and make assumptions that improve the performance of the guest. Select one of the following." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the operating system of the stopped VM is listed, choose it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the OS type of the stopped VM is not listed, choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to boot from this template in PV mode, choose Other PV (32-bit) or Other PV (64-bit). This choice is available only for XenServere:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note: Generally you should not choose an older version of the OS than the version in the image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will in general not work. In those cases you should choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Choose Yes to make this template accessible to all users of this &PRODUCT; installation. The template will appear in the Community Templates list. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password Enabled. Choose Yes if your template has the &PRODUCT; password change script installed. See Adding Password Management to Your Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new template will be visible in the Templates section when the template creation process has been completed. The template is then available when creating a new VM" -msgstr "" - diff --git a/docs/pot/create-template-from-snapshot.pot b/docs/pot/create-template-from-snapshot.pot deleted file mode 100644 index 1e53985d5ba..00000000000 --- a/docs/pot/create-template-from-snapshot.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a Template from a Snapshot" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you do not want to stop the VM in order to use the Create Template menu item (as described in ), you can create a template directly from any snapshot through the &PRODUCT; UI." -msgstr "" - diff --git a/docs/pot/create-templates-overview.pot b/docs/pot/create-templates-overview.pot deleted file mode 100644 index fe715ee402c..00000000000 --- a/docs/pot/create-templates-overview.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating Templates: Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; ships with a default template for the CentOS operating system. There are a variety of ways to add more templates. Administrators and end users can add templates. The typical sequence of events is:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Launch a VM instance that has the operating system you want. Make any other desired configuration changes to the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Convert the volume into a template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are other ways to add templates to &PRODUCT;. For example, you can take a snapshot of the VM's volume and create a template from the snapshot, or import a VHD from another system into &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The various techniques for creating templates are described in the next few sections." -msgstr "" - diff --git a/docs/pot/create-vpn-connection-vpc.pot b/docs/pot/create-vpn-connection-vpc.pot deleted file mode 100644 index 7f9e3a98a7d..00000000000 --- a/docs/pot/create-vpn-connection-vpc.pot +++ /dev/null @@ -1,160 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a VPN Connection" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you create for the account are listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ASLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Site-to-Site VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Site-to-Site VPN page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the Select View drop-down, ensure that VPN Connection is selected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create VPN Connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Create VPN Connection dialog is displayed:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the desired customer gateway, then click OK to confirm." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Within a few moments, the VPN Connection is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following information on the VPN connection is displayed:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "State" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IPSec Preshared Key" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IKE Policy" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESP Policy" -msgstr "" - diff --git a/docs/pot/create-vpn-customer-gateway.pot b/docs/pot/create-vpn-customer-gateway.pot deleted file mode 100644 index 69990a2a48d..00000000000 --- a/docs/pot/create-vpn-customer-gateway.pot +++ /dev/null @@ -1,180 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating and Updating a VPN Customer Gateway" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VPN customer gateway can be connected to only one VPN gateway at a time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To add a VPN Customer Gateway:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPN Customer Gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add site-to-site VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: A unique name for the VPN customer gateway you create." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway: The IP address for the remote gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CIDR list: The guest CIDR list of the remote subnets. Enter a CIDR or a comma-separated list of CIDRs. Ensure that a guest CIDR list is not overlapped with the VPC’s CIDR, or another guest CIDR. The CIDR must be RFC1918-compliant." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IPsec Preshared Key: Preshared keying is a method where the endpoints of the VPN share a secret key. This key value is used to authenticate the customer gateway and the VPC VPN gateway to each other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IKE peers (VPN end points) authenticate each other by computing and sending a keyed hash of data that includes the Preshared key. If the receiving peer is able to create the same hash independently by using its Preshared key, it knows that both peers must share the same secret, thus authenticating the customer gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IKE Encryption: The Internet Key Exchange (IKE) policy for phase-1. The supported encryption algorithms are AES128, AES192, AES256, and 3DES. Authentication is accomplished through the Preshared Keys." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The phase-1 is the first phase in the IKE process. In this initial negotiation phase, the two VPN endpoints agree on the methods to be used to provide security for the underlying IP traffic. The phase-1 authenticates the two VPN gateways to each other, by confirming that the remote gateway has a matching Preshared Key." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IKE Hash: The IKE hash for phase-1. The supported hash algorithms are SHA1 and MD5." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IKE DH: A public-key cryptography protocol which allows two parties to establish a shared secret over an insecure communications channel. The 1536-bit Diffie-Hellman group is used within IKE to establish session keys. The supported options are None, Group-5 (1536-bit) and Group-2 (1024-bit)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESP Encryption: Encapsulating Security Payload (ESP) algorithm within phase-2. The supported encryption algorithms are AES128, AES192, AES256, and 3DES." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The phase-2 is the second phase in the IKE process. The purpose of IKE phase-2 is to negotiate IPSec security associations (SA) to set up the IPSec tunnel. In phase-2, new keying material is extracted from the Diffie-Hellman key exchange in phase-1, to provide session keys to use in protecting the VPN data flow." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESP Hash: Encapsulating Security Payload (ESP) hash for phase-2. Supported hash algorithms are SHA1 and MD5." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Perfect Forward Secrecy: Perfect Forward Secrecy (or PFS) is the property that ensures that a session key derived from a set of long-term public and private keys will not be compromised. This property enforces a new Diffie-Hellman key exchange. It provides the keying material that has greater key material life and thereby greater resistance to cryptographic attacks. The available options are None, Group-5 (1536-bit) and Group-2 (1024-bit). The security of the key exchanges increase as the DH groups grow larger, as does the time of the exchanges." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When PFS is turned on, for every negotiation of a new phase-2 SA the two gateways must generate a new set of phase-1 keys. This adds an extra layer of protection that PFS adds, which ensures if the phase-2 SA’s have expired, the keys used for new phase-2 SA’s have not been generated from the current phase-1 keying material." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IKE Lifetime (seconds): The phase-1 lifetime of the security association in seconds. Default is 86400 seconds (1 day). Whenever the time expires, a new phase-1 exchange is performed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESP Lifetime (seconds): The phase-2 lifetime of the security association in seconds. Default is 3600 seconds (1 hour). Whenever the value is exceeded, a re-key is initiated to provide a new IPsec encryption and authentication session keys." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Dead Peer Detection: A method to detect an unavailable Internet Key Exchange (IKE) peer. Select this option if you want the virtual router to query the liveliness of its IKE peer at regular intervals. It’s recommended to have the same configuration of DPD on both side of VPN connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Updating and Removing a VPN Customer Gateway" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can update a customer gateway either with no VPN connection, or related VPN connection is in error state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the VPN customer gateway you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To modify the required parameters, click the Edit VPN Customer Gateway button edit.png: button to edit a VPN customer gateway " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To remove the VPN customer gateway, click the Delete VPN Customer Gateway button delete.png: button to remove a VPN customer gateway " -msgstr "" - diff --git a/docs/pot/create-vpn-gateway-for-vpc.pot b/docs/pot/create-vpn-gateway-for-vpc.pot deleted file mode 100644 index de02f8d4b2f..00000000000 --- a/docs/pot/create-vpn-gateway-for-vpc.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a VPN gateway for the VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Site-to-Site VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are creating the VPN gateway for the first time, selecting Site-to-Site VPN prompts you to create a VPN gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the confirmation dialog, click Yes to confirm." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Within a few moments, the VPN gateway is created. You will be prompted to view the details of the VPN gateway you have created. Click Yes to confirm." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following details are displayed in the VPN Gateway page:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Domain" -msgstr "" - diff --git a/docs/pot/create-vr-network-offering.pot b/docs/pot/create-vr-network-offering.pot deleted file mode 100644 index 0c3a0e1ac77..00000000000 --- a/docs/pot/create-vr-network-offering.pot +++ /dev/null @@ -1,140 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating and Changing a Virtual Router Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create the network offering in association with a virtual router system service offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First, create a system service offering, for example: VRsystemofferingHA." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information on creating a system service offering, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the Select Offering drop-down, choose Network Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Network Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog, make the following choices:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Any desired name for the network offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description. A short description of the offering that can be displayed to users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Rate. Allowed data transfer rate in MB per second." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Traffic Type. The type of network traffic that will be carried on the network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Type. Choose whether the guest network is isolated or shared. For a description of these terms, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify VLAN. (Isolated guest networks only) Indicate whether a VLAN should be specified when this offering is used." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported Services. Select one or more of the possible network services. For some services, you must also choose the service provider; for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or any other load balancers that have been configured in the cloud. Depending on which services you choose, additional fields may appear in the rest of the dialog box. For more information, see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System Offering. Choose the system service offering that you want virtual routers to use in this network. In this case, the default “System Offering For Software Router†and the custom “VRsystemofferingHA†are available and displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK and the network offering is created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To change the network offering of a guest network to the virtual router service offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Network from the left navigation pane." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the guest network that you want to offer this network service to." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Edit button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the Network Offering drop-down, select the virtual router network offering you have just created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/create-windows-template.pot b/docs/pot/create-windows-template.pot deleted file mode 100644 index 64b3ae89f14..00000000000 --- a/docs/pot/create-windows-template.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a Windows Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Windows templates must be prepared with Sysprep before they can be provisioned on multiple machines. Sysprep allows you to create a generic Windows template and avoid any possible SID conflicts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in the template or added after the VM is created. The PV drivers are necessary for essential management functions such as mounting additional volumes and ISO images, live migration, and graceful shutdown." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An overview of the procedure is as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upload your Windows ISO." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a VM Instance with this ISO." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Follow the steps in Sysprep for Windows Server 2008 R2 (below) or Sysprep for Windows Server 2003 R2, depending on your version of Windows Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The preparation steps are complete. Now you can actually create the template as described in Creating the Windows Template." -msgstr "" - diff --git a/docs/pot/creating-compute-offerings.pot b/docs/pot/creating-compute-offerings.pot deleted file mode 100644 index 92051fd6379..00000000000 --- a/docs/pot/creating-compute-offerings.pot +++ /dev/null @@ -1,125 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New Compute Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a new compute offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in with admin privileges to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Service Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select Offering, choose Compute Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Compute Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog, make the following choices:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name: Any desired name for the service offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description: A short description of the offering that can be displayed to users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage type: The type of disk that should be allocated. Local allocates from storage attached directly to the host where the system VM is running. Shared allocates from storage accessible via NFS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# of CPU cores: The number of cores which should be allocated to a system VM with this offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU (in MHz): The CPU speed of the cores that the system VM is allocated. For example, “2000†would provide for a 2 GHz clock." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Memory (in MB): The amount of memory in megabytes that the system VM should be allocated. For example, “2048†would provide for a 2 GB RAM allocation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Rate: Allowed data transfer rate in MB per second." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Offer HA: If yes, the administrator can choose to have the system VM be monitored and as highly available as possible." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage Tags: The tags that should be associated with the primary storage used by the system VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Tags: (Optional) Any tags that you use to organize your hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU cap: Whether to limit the level of CPU usage even if spare capacity is available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public: Indicate whether the service offering should be available all domains or only some domains. Choose Yes to make it available to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; will then prompt for the subdomain's name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/creating-disk-offerings.pot b/docs/pot/creating-disk-offerings.pot deleted file mode 100644 index c4aa06933dc..00000000000 --- a/docs/pot/creating-disk-offerings.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New Disk Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a system service offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in with admin privileges to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Service Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select Offering, choose Disk Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Disk Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog, make the following choices:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Any desired name for the system offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description. A short description of the offering that can be displayed to users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Custom Disk Size. If checked, the user can set their own disk size. If not checked, the root administrator must define a value in Disk Size." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk Size. Appears only if Custom Disk Size is not selected. Define the volume size in GB." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional)Storage Tags. The tags that should be associated with the primary storage for this disk. Tags are a comma separated list of attributes of the storage. For example \"ssd,blue\". Tags are also added on Primary Storage. &PRODUCT; matches tags on a disk offering to tags on the storage. If a tag is present on a disk offering that tag (or tags) must also be present on Primary Storage for the volume to be provisioned. If no such primary storage exists, allocation from the disk offering will fail.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Indicate whether the service offering should be available all domains or only some domains. Choose Yes to make it available to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; will then prompt for the subdomain's name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/creating-network-offerings.pot b/docs/pot/creating-network-offerings.pot deleted file mode 100644 index c1183db966a..00000000000 --- a/docs/pot/creating-network-offerings.pot +++ /dev/null @@ -1,225 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a network offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in with admin privileges to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Service Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select Offering, choose Network Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Network Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog, make the following choices:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Any desired name for the network offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description. A short description of the offering that can be displayed to users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Rate. Allowed data transfer rate in MB per second" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Traffic Type. The type of network traffic that will be carried on the network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Type. Choose whether the guest network is isolated or shared. For a description of these terms, see " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify VLAN. (Isolated guest networks only) Indicate whether a VLAN should be specified when this offering is used" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported Services. Select one or more of the possible network services. For some services, you must also choose the service provider; for example, if you select Load Balancer, you can choose the &PRODUCT; virtual router or any other load balancers that have been configured in the cloud. Depending on which services you choose, additional fields may appear in the rest of the dialog box." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Based on the guest network type selected, you can see the following supported services:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported Services" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Isolated" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shared" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DHCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Load Balancer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you select Load Balancer, you can choose the &PRODUCT; virtual router or any other load balancers that have been configured in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you select Source NAT, you can choose the &PRODUCT; virtual router or any other Source NAT providers that have been configured in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Static NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you select Static NAT, you can choose the &PRODUCT; virtual router or any other Static NAT providers that have been configured in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Port Forwarding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you select Port Forwarding, you can choose the &PRODUCT; virtual router or any other Port Forwarding providers that have been configured in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Not Supported" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User Data" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Security Groups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System Offering. If the service provider for any of the services selected in Supported Services is a virtual router, the System Offering field appears. Choose the system service offering that you want virtual routers to use in this network. For example, if you selected Load Balancer in Supported Services and selected a virtual router to provide load balancing, the System Offering field appears so you can choose between the &PRODUCT; default system service offering and any custom system service offerings that have been defined by the &PRODUCT; root administrator. For more information, see System Service Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Redundant router capability. (v3.0.3 and greater) Available only when Virtual Router is selected as the Source NAT provider. Select this option if you want to use two virtual routers in the network for uninterrupted connection: one operating as the master virtual router and the other as the backup. The master virtual router receives requests from and sends responses to the user’s VM. The backup virtual router is activated only when the master is down. After the failover, the backup becomes the master virtual router. &PRODUCT; deploys the routers on different hosts to ensure reliability if one host is down." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Conserve mode. Indicate whether to use conserve mode. In this mode, network resources are allocated only when the first virtual machine starts in the network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags. Network tag to specify which physical network to use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/creating-new-volumes.pot b/docs/pot/creating-new-volumes.pot deleted file mode 100644 index bf5d94dab4f..00000000000 --- a/docs/pot/creating-new-volumes.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New Volume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can add more data disk volumes to a guest VM at any time, up to the limits of your storage capacity. Both &PRODUCT; administrators and users can add volumes to VM instances. When you create a new volume, it is stored as an entity in &PRODUCT;, but the actual storage resources are not allocated on the physical storage device until you attach the volume. This optimization allows the &PRODUCT; to provision the volume nearest to the guest that will use it when the first attachment is made." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a new volume, click Add Volume, provide the following details, and click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Give the volume a unique name so you can find it later." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Availability Zone. Where do you want the storage to reside? This should be close to the VM that will use the volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk Offering. Choose the characteristics of the storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new volume appears in the list of volumes with the state “Allocated.†The volume data is stored in &PRODUCT;, but the volume is not yet ready for use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To start using the volume, continue to Attaching a Volume" -msgstr "" - diff --git a/docs/pot/creating-system-service-offerings.pot b/docs/pot/creating-system-service-offerings.pot deleted file mode 100644 index 914376f0f5f..00000000000 --- a/docs/pot/creating-system-service-offerings.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating a New System Service Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a system service offering:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in with admin privileges to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Service Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select Offering, choose System Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add System Service Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the dialog, make the following choices:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. Any desired name for the system offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description. A short description of the offering that can be displayed to users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System VM Type. Select the type of system virtual machine that this offering is intended to support." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage type. The type of disk that should be allocated. Local allocates from storage attached directly to the host where the system VM is running. Shared allocates from storage accessible via NFS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "# of CPU cores. The number of cores which should be allocated to a system VM with this offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU (in MHz). The CPU speed of the cores that the system VM is allocated. For example, \"2000\" would provide for a 2 GHz clock." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Memory (in MB). The amount of memory in megabytes that the system VM should be allocated. For example, \"2048\" would provide for a 2 GB RAM allocation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Rate. Allowed data transfer rate in MB per second." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Offer HA. If yes, the administrator can choose to have the system VM be monitored and as highly available as possible." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage Tags. The tags that should be associated with the primary storage used by the system VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Tags. (Optional) Any tags that you use to organize your hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CPU cap. Whether to limit the level of CPU usage even if spare capacity is available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Indicate whether the service offering should be available all domains or only some domains. Choose Yes to make it available to all domains. Choose No to limit the scope to a subdomain; &PRODUCT; will then prompt for the subdomain's name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/creating-vms.pot b/docs/pot/creating-vms.pot deleted file mode 100644 index c6b7f456d65..00000000000 --- a/docs/pot/creating-vms.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Creating VMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual machines are usually created from a template. Users can also create blank virtual machines. A blank virtual machine is a virtual machine without an OS template. Users can attach an ISO file and install the OS from the CD/DVD-ROM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a VM from a template:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Instance." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select a template, then follow the steps in the wizard. (For more information about how the templates came to be in this list, see Working with Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure that the hardware you have allows starting the selected service offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Submit and your VM will be created and started." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For security reason, the internal name of the VM is visible only to the root admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Starting with v3.0.3, you can create a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A new request parameter, startVM, is introduced in the deployVm API to support this feature. For more information, see the Developer's Guide" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a VM from an ISO:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(XenServer) Windows VMs running on XenServer require PV drivers, which may be provided in the template or added after the VM is created. The PV drivers are necessary for essential management functions such as mounting additional volumes and ISO images, live migration, and graceful shutdown." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select ISO Boot, and follow the steps in the wizard." -msgstr "" - diff --git a/docs/pot/customizing-dns.pot b/docs/pot/customizing-dns.pot deleted file mode 100644 index 75d56931a9c..00000000000 --- a/docs/pot/customizing-dns.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Customizing the Network Domain Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The root administrator can optionally assign a custom DNS suffix at the level of a network, account, domain, zone, or entire &PRODUCT; installation, and a domain administrator can do so within their own domain. To specify a custom domain name and put it into effect, follow these steps." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the DNS suffix at the desired scope" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At the network level, the DNS suffix can be assigned through the UI when creating a new network, as described in or with the updateNetwork command in the &PRODUCT; API." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At the account, domain, or zone level, the DNS suffix can be assigned with the appropriate &PRODUCT; API commands: createAccount, editAccount, createDomain, editDomain, createZone, or editZone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At the global level, use the configuration parameter guest.domain.suffix. You can also use the &PRODUCT; API command updateConfiguration. After modifying this global configuration, restart the Management Server to put the new setting into effect." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To make the new DNS suffix take effect for an existing network, call the &PRODUCT; API command updateNetwork. This step is not necessary when the DNS suffix was specified while creating a new network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The source of the network domain that is used depends on the following rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For all networks, if a network domain is specified as part of a network's own configuration, that value is used." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For an account-specific network, the network domain specified for the account is used. If none is specified, the system looks for a value in the domain, zone, and global configuration, in that order." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For a domain-specific network, the network domain specified for the domain is used. If none is specified, the system looks for a value in the zone and global configuration, in that order." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For a zone-specific network, the network domain specified for the zone is used. If none is specified, the system looks for a value in the global configuration." -msgstr "" - diff --git a/docs/pot/database-replication.pot b/docs/pot/database-replication.pot deleted file mode 100644 index 6efcacf0b5b..00000000000 --- a/docs/pot/database-replication.pot +++ /dev/null @@ -1,254 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Database Replication (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports database replication from one MySQL node to another. This is achieved using standard MySQL replication. You may want to do this as insurance against MySQL server or storage loss. MySQL replication is implemented using a master/slave model. The master is the node that the Management Servers are configured to use. The slave is a standby node that receives all write operations from the master and applies them to a local, redundant copy of the database. The following steps are a guide to implementing MySQL replication." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Creating a replica is not a backup solution. You should develop a backup procedure for the MySQL data that is distinct from replication." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that this is a fresh install with no data in the master." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit my.cnf on the master and add the following in the [mysqld] section below datadir." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"log_bin=mysql-bin\n" -"server_id=1\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The server_id must be unique with respect to other servers. The recommended way to achieve this is to give the master an ID of 1 and each slave a sequential number greater than 1, so that the servers are numbered 1, 2, 3, etc." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the MySQL service:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# service mysqld restart\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a replication account on the master and give it privileges. We will use the \"cloud-repl\" user with the password \"password\". This assumes that master and slave run on the 172.16.1.0/24 network." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# mysql -u root\n" -"mysql> create user 'cloud-repl'@'172.16.1.%' identified by 'password';\n" -"mysql> grant replication slave on *.* TO 'cloud-repl'@'172.16.1.%';\n" -"mysql> flush privileges;\n" -"mysql> flush tables with read lock;\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Leave the current MySQL session running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a new shell start a second MySQL session." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Retrieve the current position of the database." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# mysql -u root\n" -"mysql> show master status;\n" -"+------------------+----------+--------------+------------------+\n" -"| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |\n" -"+------------------+----------+--------------+------------------+\n" -"| mysql-bin.000001 | 412 | | |\n" -"+------------------+----------+--------------+------------------+\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note the file and the position that are returned by your instance." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Exit from this session." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Complete the master setup. Returning to your first session on the master, release the locks and exit MySQL." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"mysql> unlock tables;\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install and configure the slave. On the slave server, run the following commands." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# yum install mysql-server\n" -"# chkconfig mysqld on\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit my.cnf and add the following lines in the [mysqld] section below datadir." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"server_id=2\n" -"innodb_rollback_on_timeout=1\n" -"innodb_lock_wait_timeout=600\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart MySQL." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Instruct the slave to connect to and replicate from the master. Replace the IP address, password, log file, and position with the values you have used in the previous steps." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"mysql> change master to\n" -" -> master_host='172.16.1.217',\n" -" -> master_user='cloud-repl',\n" -" -> master_password='password',\n" -" -> master_log_file='mysql-bin.000001',\n" -" -> master_log_pos=412;\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then start replication on the slave." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"mysql> start slave;\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Optionally, open port 3306 on the slave as was done on the master earlier." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is not required for replication to work. But if you choose not to do this, you will need to do it when failover to the replica occurs." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Failover" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This will provide for a replicated database that can be used to implement manual failover for the Management Servers. &PRODUCT; failover from one MySQL instance to another is performed by the administrator. In the event of a database failure you should:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the Management Servers (via service cloud-management stop)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the replica's configuration to be a master and restart it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the replica's port 3306 is open to the Management Servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make a change so that the Management Server uses the new database. The simplest process here is to put the IP address of the new database server into each Management Server's /etc/cloud/management/db.properties." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Servers:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# service cloud-management start\n" -" " -msgstr "" - diff --git a/docs/pot/dates-in-usage-record.pot b/docs/pot/dates-in-usage-record.pot deleted file mode 100644 index 5ad955581c2..00000000000 --- a/docs/pot/dates-in-usage-record.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Dates in the Usage Record" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Usage records include a start date and an end date. These dates define the period of time for which the raw usage number was calculated. If daily aggregation is used, the start date is midnight on the day in question and the end date is 23:59:59 on the day in question (with one exception; see below). A virtual machine could have been deployed at noon on that day, stopped at 6pm on that day, then started up again at 11pm. When usage is calculated on that day, there will be 7 hours of running VM usage (usage type 1) and 12 hours of allocated VM usage (usage type 2). If the same virtual machine runs for the entire next day, there will 24 hours of both running VM usage (type 1) and allocated VM usage (type 2)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note: The start date is not the time a virtual machine was started, and the end date is not the time when a virtual machine was stopped. The start and end dates give the time range within which usage was calculated." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For network usage, the start date and end date again define the range in which the number of bytes transferred was calculated. If a user downloads 10 MB and uploads 1 MB in one day, there will be two records, one showing the 10 megabytes received and one showing the 1 megabyte sent." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is one case where the start date and end date do not correspond to midnight and 11:59:59pm when daily aggregation is used. This occurs only for network usage records. When the usage server has more than one day's worth of unprocessed data, the old data will be included in the aggregation period. The start date in the usage record will show the date and time of the earliest event. For other types of usage, such as IP addresses and VMs, the old unprocessed data is not included in daily aggregation." -msgstr "" - diff --git a/docs/pot/dedicated-ha-hosts.pot b/docs/pot/dedicated-ha-hosts.pot deleted file mode 100644 index 131d48a7627..00000000000 --- a/docs/pot/dedicated-ha-hosts.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Dedicated HA Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(v3.0.3 and greater) One or more hosts can be designated for use only by HA-enabled VMs that are restarting due to a host failure. Setting up a pool of such dedicated HA hosts as the recovery destination for all HA-enabled VMs is useful to:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make it easier to determine which VMs have been restarted as part of the &PRODUCT; high-availability function. If a VM is running on a dedicated HA host, then it must be an HA-enabled VM whose original host failed. (With one exception: It is possible for an administrator to manually migrate any VM to a dedicated HA host.)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Keep HA-enabled VMs from restarting on hosts which may be reserved for other purposes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The dedicated HA option is set through a special host tag when the host is created. To allow the administrator to dedicate hosts to only HA-enabled VMs, set the global configuration variable ha.tag to the desired tag (for example, \"ha_host\"), and restart the Management Server. Enter the value in the Host Tags field when adding the host(s) that you want to dedicate to HA-enabled VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you set ha.tag, be sure to actually use that tag on at least one host in your cloud. If the tag specified in ha.tag is not set for any host in the cloud, the HA-enabled VMs will fail to restart after a crash." -msgstr "" - diff --git a/docs/pot/default-account-resource-limit.pot b/docs/pot/default-account-resource-limit.pot deleted file mode 100644 index a74111d206c..00000000000 --- a/docs/pot/default-account-resource-limit.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Default Account Resource Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can limit resource use by accounts. The default limits are set using global configuration parameters, and they affect all accounts within a cloud. The relevant parameters are those beginning with max.account (max.account.snapshots, etc.).." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To override a default limit for a particular account, set a per-account resource limit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation tree, click Accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the account you want to modify. The current limits are displayed. A value of -1 shows that there is no limit in place" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Edit button editbutton.png: edits the settings. " -msgstr "" - diff --git a/docs/pot/default-template.pot b/docs/pot/default-template.pot deleted file mode 100644 index 63b08f4c818..00000000000 --- a/docs/pot/default-template.pot +++ /dev/null @@ -1,79 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "The Default Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; includes a CentOS template. This template is downloaded by the Secondary Storage VM after the primary and secondary storage are configured. You can use this template in your production deployment or you can delete it and use custom templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The root password for the default template is \"password\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A default template is provided for each of XenServer, KVM, and vSphere. The templates that are downloaded depend on the hypervisor type that is available in your cloud. Each template is approximately 2.5 GB physical size." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default template includes the standard iptables rules, which will block most access to the template excluding ssh." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# iptables --list\n" -"Chain INPUT (policy ACCEPT)\n" -"target prot opt source destination\n" -"RH-Firewall-1-INPUT all -- anywhere anywhere\n" -"\n" -"Chain FORWARD (policy ACCEPT)\n" -"target prot opt source destination\n" -"RH-Firewall-1-INPUT all -- anywhere anywhere\n" -"\n" -"Chain OUTPUT (policy ACCEPT)\n" -"target prot opt source destination\n" -"\n" -"Chain RH-Firewall-1-INPUT (2 references)\n" -"target prot opt source destination\n" -"ACCEPT all -- anywhere anywhere\n" -"ACCEPT icmp -- anywhere anywhere icmp any\n" -"ACCEPT esp -- anywhere anywhere\n" -"ACCEPT ah -- anywhere anywhere\n" -"ACCEPT udp -- anywhere 224.0.0.251 udp dpt:mdns\n" -"ACCEPT udp -- anywhere anywhere udp dpt:ipp\n" -"ACCEPT tcp -- anywhere anywhere tcp dpt:ipp\n" -"ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED\n" -"ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh\n" -"REJECT all -- anywhere anywhere reject-with icmp-host-\n" -"" -msgstr "" - diff --git a/docs/pot/delete-reset-vpn.pot b/docs/pot/delete-reset-vpn.pot deleted file mode 100644 index 54b2143fe13..00000000000 --- a/docs/pot/delete-reset-vpn.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Restarting and Removing a VPN Connection" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ASLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Site-to-Site VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Site-to-Site VPN page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the Select View drop-down, ensure that VPN Connection is selected." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPN connections you created are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the VPN connection you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Details tab is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To remove a VPN connection, click the Delete VPN connection button remove-vpn.png: button to remove a VPN connection " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To restart a VPN connection, click the Reset VPN connection button present in the Details tab. reset-vpn.png: button to reset a VPN connection " -msgstr "" - diff --git a/docs/pot/delete-templates.pot b/docs/pot/delete-templates.pot deleted file mode 100644 index d1cf15f1e92..00000000000 --- a/docs/pot/delete-templates.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Deleting Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Templates may be deleted. In general, when a template spans multiple Zones, only the copy that is selected for deletion will be deleted; the same template in other Zones will not be deleted. The provided CentOS template is an exception to this. If the provided CentOS template is deleted, it will be deleted from all Zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When templates are deleted, the VMs instantiated from them will continue to run. However, new VMs cannot be created based on the deleted template." -msgstr "" - diff --git a/docs/pot/deleting-vms.pot b/docs/pot/deleting-vms.pot deleted file mode 100644 index 236b460ad19..00000000000 --- a/docs/pot/deleting-vms.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Deleting VMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Users can delete their own virtual machines. A running virtual machine will be abruptly stopped before it is deleted. Administrators can delete any virtual machines." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To delete a virtual machine:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the VM that you want to delete." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Destroy Instance button Destroyinstance.png: button to destroy an instance " -msgstr "" - diff --git a/docs/pot/dell62xx-hardware.pot b/docs/pot/dell62xx-hardware.pot deleted file mode 100644 index b55f4d1271e..00000000000 --- a/docs/pot/dell62xx-hardware.pot +++ /dev/null @@ -1,82 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Dell 62xx" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following steps show how a Dell 62xx is configured for zone-level layer-3 switching. These steps assume VLAN 201 is used to route untagged private IPs for pod 1, and pod 1’s layer-2 switch is connected to Ethernet port 1/g1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Dell 62xx Series switch supports up to 1024 VLANs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure all the VLANs in the database." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vlan database\n" -"vlan 200-999\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure Ethernet port 1/g1." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "interface ethernet 1/g1\n" -"switchport mode general\n" -"switchport general pvid 201\n" -"switchport general allowed vlan add 201 untagged\n" -"switchport general allowed vlan add 300-999 tagged\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The statements configure Ethernet port 1/g1 as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 201 is the native untagged VLAN for port 1/g1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All VLANs (300-999) are passed to all the pod-level layer-2 switches." -msgstr "" - diff --git a/docs/pot/dell62xx-layer2.pot b/docs/pot/dell62xx-layer2.pot deleted file mode 100644 index a0b588c003f..00000000000 --- a/docs/pot/dell62xx-layer2.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Dell 62xx" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following steps show how a Dell 62xx is configured for pod-level layer-2 switching." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure all the VLANs in the database." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vlan database\n" -"vlan 300-999\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 201 is used to route untagged private IP addresses for pod 1, and pod 1 is connected to this layer-2 switch." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "interface range ethernet all\n" -"switchport mode general\n" -"switchport general allowed vlan add 300-999 tagged\n" -"exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The statements configure all Ethernet ports to function as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All ports are configured the same way." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All VLANs (300-999) are passed through all the ports of the layer-2 switch." -msgstr "" - diff --git a/docs/pot/deployment-architecture-overview.pot b/docs/pot/deployment-architecture-overview.pot deleted file mode 100644 index 3ce4056de44..00000000000 --- a/docs/pot/deployment-architecture-overview.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Deployment Architecture Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A &PRODUCT; installation consists of two parts: the Management Server and the cloud infrastructure that it manages. When you set up and manage a &PRODUCT; cloud, you provision resources such as hosts, storage devices, and IP addresses into the Management Server, and the Management Server manages those resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The minimum production installation consists of one machine running the &PRODUCT; Management Server and another machine to act as the cloud infrastructure (in this case, a very simple infrastructure consisting of one host running hypervisor software). In its smallest deployment, a single machine can act as both the Management Server and the hypervisor host (using the KVM hypervisor)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A more full-featured installation consists of a highly-available multi-node Management Server installation and up to tens of thousands of hosts using any of several advanced networking setups. For information about deployment options, see Choosing a Deployment Architecture." -msgstr "" - diff --git a/docs/pot/detach-move-volumes.pot b/docs/pot/detach-move-volumes.pot deleted file mode 100644 index 405ce9bfc74..00000000000 --- a/docs/pot/detach-move-volumes.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Attaching a Volume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This procedure is different from moving disk volumes from one storage pool to another. See VM Storage Migration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A volume can be detached from a guest VM and attached to another guest. Both &PRODUCT; administrators and users can detach volumes from VMs and move them to other VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the two VMs are in different clusters, and the volume is large, it may take several minutes for the volume to be moved to the new VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Storage, and choose Volumes in Select View. Alternatively, if you know which VM the volume is attached to, you can click Instances, click the VM name, and click View Volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the volume you want to detach, then click the Detach Disk button DetachDiskButton.png: button to detach a volume " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To move the volume to another VM, follow the steps in Attaching a Volume ." -msgstr "" - diff --git a/docs/pot/devcloud-usage-mode.pot b/docs/pot/devcloud-usage-mode.pot deleted file mode 100644 index 50a687b6174..00000000000 --- a/docs/pot/devcloud-usage-mode.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "DevCloud Usage Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DevCloud can be used in several different ways:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Full sandbox. Where &PRODUCT; is run within the DevCloud instance started in Virtual Box." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In this mode, the &PRODUCT; management server runs within the instance and nested virtualization allows instantiation of tiny VMs within DevCloud itself. &PRODUCT; code modifications are done within DevCloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following diagram shows the architecture of the SandBox mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A deployment environment. Where &PRODUCT; code is developed in the localhost of the developer and the resulting build is deployed within DevCloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This mode was used in the testing procedure of &PRODUCT; 4.0.0 incubating release. See the following screencast to see how: http://vimeo.com/54621457" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A host-only mode. Where DevCloud is used only as a host. &PRODUCT; management server is run in the localhost of the developer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This mode makes use of a host-only interface defined in the Virtual Box preferences. Check the following screencast to see how: http://vimeo.com/54610161" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following schematic shows the architecture of the Host-Only mode." -msgstr "" - diff --git a/docs/pot/devcloud.pot b/docs/pot/devcloud.pot deleted file mode 100644 index 7d9f6f94a12..00000000000 --- a/docs/pot/devcloud.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "DevCloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DevCloud is the &PRODUCT; sandbox. It is provided as a Virtual Box appliance. It is meant to be used as a development environment to easily test new &PRODUCT; development. It has also been used for training and &PRODUCT; demos since it provides a Cloud in a box." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DevCloud is provided as a convenience by community members. It is not an official &PRODUCT; release artifact." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; source code however, contains tools to build your own DevCloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DevCloud is under development and should be considered a Work In Progress (WIP), the wiki is the most up to date documentation:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/pot/developer-getting-started.pot b/docs/pot/developer-getting-started.pot deleted file mode 100644 index b0c3703a124..00000000000 --- a/docs/pot/developer-getting-started.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Getting Started" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To get started using the &PRODUCT; API, you should have the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL of the &PRODUCT; server you wish to integrate with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both the API Key and Secret Key for an account. This should have been generated by the administrator of the cloud instance and given to you." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Familiarity with HTTP GET/POST and query strings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Knowledge of either XML or JSON." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Knowledge of a programming language that can generate HTTP requests; for example, Java or PHP." -msgstr "" - diff --git a/docs/pot/developer-introduction.pot b/docs/pot/developer-introduction.pot deleted file mode 100644 index 5ce4fe3872c..00000000000 --- a/docs/pot/developer-introduction.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Introduction to the &PRODUCT; API" -msgstr "" - diff --git a/docs/pot/disable-enable-zones-pods-clusters.pot b/docs/pot/disable-enable-zones-pods-clusters.pot deleted file mode 100644 index 064346f09fe..00000000000 --- a/docs/pot/disable-enable-zones-pods-clusters.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Disabling and Enabling Zones, Pods, and Clusters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can enable or disable a zone, pod, or cluster without permanently removing it from the cloud. This is useful for maintenance or when there are problems that make a portion of the cloud infrastructure unreliable. No new allocations will be made to a disabled zone, pod, or cluster until its state is returned to Enabled. When a zone, pod, or cluster is first added to the cloud, it is Disabled by default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To disable and enable a zone, pod, or cluster:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as administrator" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Zones, click View More." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are disabling or enabling a zone, find the name of the zone in the list, and click the Enable/Disable button. enable-disable.png: button to enable or disable zone, pod, or cluster. " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are disabling or enabling a pod or cluster, click the name of the zone that contains the pod or cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Pods or Clusters node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the pod or cluster name in the list." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Enable/Disable button. " -msgstr "" - diff --git a/docs/pot/disk-volume-usage-record-format.pot b/docs/pot/disk-volume-usage-record-format.pot deleted file mode 100644 index 67e4dff727b..00000000000 --- a/docs/pot/disk-volume-usage-record-format.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Disk Volume Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For disk volumes, the following fields exist in a usage record." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – The volume ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "offeringid – The ID of the disk offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "type – Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "templateid – ROOT template ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "size – The amount of storage allocated" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/dns-dhcp.pot b/docs/pot/dns-dhcp.pot deleted file mode 100644 index ef86c0ed7d8..00000000000 --- a/docs/pot/dns-dhcp.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "DNS and DHCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Virtual Router provides DNS and DHCP services to the guests. It proxies DNS requests to the DNS server configured on the Availability Zone." -msgstr "" - diff --git a/docs/pot/domains.pot b/docs/pot/domains.pot deleted file mode 100644 index 9278a41978c..00000000000 --- a/docs/pot/domains.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Domains" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the LDAP server requires SSL, you need to enable it in the ldapConfig command by setting the parameters ssl, truststore, and truststorepass. Before enabling SSL for ldapConfig, you need to get the certificate which the LDAP server is using and add it to a trusted keystore. You will need to know the path to the keystore and the password." -msgstr "" - diff --git a/docs/pot/enable-disable-static-nat-vpc.pot b/docs/pot/enable-disable-static-nat-vpc.pot deleted file mode 100644 index a45a1354d0a..00000000000 --- a/docs/pot/enable-disable-static-nat-vpc.pot +++ /dev/null @@ -1,135 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling or Disabling Static NAT on a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A static NAT rule maps a public IP address to the private IP address of a VM in a VPC to allow Internet traffic to it. This section tells how to enable or disable static NAT for a particular IP address in a VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If port forwarding rules are already in effect for an IP address, you cannot enable static NAT to that IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If a guest VM is part of more than one network, static NAT rules will function only if they are defined on the default network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC to which you want to deploy the VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP Addresses page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Details tab,click the Static NAT button. enable-disable.png: button to enable Statid NAT. The button toggles between Enable and Disable, depending on whether static NAT is currently enabled for the IP address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are enabling static NAT, a dialog appears as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the tier and the destination VM, then click Apply." -msgstr "" - diff --git a/docs/pot/enable-disable-static-nat.pot b/docs/pot/enable-disable-static-nat.pot deleted file mode 100644 index 1b4bab178e3..00000000000 --- a/docs/pot/enable-disable-static-nat.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling or Disabling Static NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If port forwarding rules are already in effect for an IP address, you cannot enable static NAT to that IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If a guest VM is part of more than one network, static NAT rules will function only if they are defined on the default network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network where you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Static NAT button. ReleaseIPButton.png: button to release an IP The button toggles between Enable and Disable, depending on whether static NAT is currently enabled for the IP address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are enabling static NAT, a dialog appears where you can choose the destination VM and click Apply" -msgstr "" - diff --git a/docs/pot/enable-security-groups.pot b/docs/pot/enable-security-groups.pot deleted file mode 100644 index 4af60756ebd..00000000000 --- a/docs/pot/enable-security-groups.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling Security Groups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order for security groups to function in a zone, the security groups feature must first be enabled for the zone. The administrator can do this when creating a new zone, by selecting a network offering that includes security groups. The procedure is described in Basic Zone Configuration in the Advanced Installation Guide." -msgstr "" - diff --git a/docs/pot/enabling-api-call-expiration.pot b/docs/pot/enabling-api-call-expiration.pot deleted file mode 100644 index d21abe2d156..00000000000 --- a/docs/pot/enabling-api-call-expiration.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling API Call Expiration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can set an expiry timestamp on API calls to prevent replay attacks over non-secure channels, such as HTTP. The server tracks the expiry timestamp you have specified and rejects all the subsequent API requests that come in after this validity period." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable this feature, add the following parameters to the API request:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "signatureVersion=3: If the signatureVersion parameter is missing or is not equal to 3, the expires parameter is ignored in the API request." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "expires=YYYY-MM-DDThh:mm:ssZ: Specifies the date and time at which the signature included in the request is expired. The timestamp is expressed in the YYYY-MM-DDThh:mm:ssZ format, as specified in the ISO 8601 standard." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "expires=2011-10-10T12:00:00+0530" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A sample API request with expiration is given below:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://<IPAddress>:8080/client/api?command=listZones&signatureVersion=3&expires=2011-10-10T12:00:00+0530&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D" -msgstr "" - diff --git a/docs/pot/enabling-port-8096.pot b/docs/pot/enabling-port-8096.pot deleted file mode 100644 index 21836fe010d..00000000000 --- a/docs/pot/enabling-port-8096.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling Port 8096" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Port 8096, which allows API calls without authentication, is closed and disabled by default on any fresh 3.0.1 installations. You can enable 8096 (or another port) for this purpose as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that the first Management Server is installed and running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the global configuration parameter integration.api.port to the desired port." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Management Server host machine, create an iptables rule allowing access to that port." -msgstr "" - diff --git a/docs/pot/end-user-ui-overview.pot b/docs/pot/end-user-ui-overview.pot deleted file mode 100644 index 9e0e5684fed..00000000000 --- a/docs/pot/end-user-ui-overview.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "End User's UI Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; UI helps users of cloud infrastructure to view and use their cloud resources, including virtual machines, templates and ISOs, data volumes and snapshots, guest networks, and IP addresses. If the user is a member or administrator of one or more &PRODUCT; projects, the UI can provide a project-oriented view." -msgstr "" - diff --git a/docs/pot/error-handling.pot b/docs/pot/error-handling.pot deleted file mode 100644 index d96cbdaa4ae..00000000000 --- a/docs/pot/error-handling.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Error Handling" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If an error occurs while processing an API request, the appropriate response in the format specified is returned. Each error response consists of an error code and an error text describing what possibly can go wrong. For an example error response, see page 12." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An HTTP error code of 401 is always returned if API request was rejected due to bad signatures, missing API Keys, or the user simply did not have the permissions to execute the command." -msgstr "" - diff --git a/docs/pot/event-log-queries.pot b/docs/pot/event-log-queries.pot deleted file mode 100644 index b6ecf6cc703..00000000000 --- a/docs/pot/event-log-queries.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Event Log Queries" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Database logs can be queried from the user interface. The list of events captured by the system includes:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual machine creation, deletion, and on-going management operations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual router creation, deletion, and on-going management operations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Template creation and deletion" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network/load balancer rules creation and deletion" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage volume creation and deletion" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User login and logout" -msgstr "" - diff --git a/docs/pot/event-types.pot b/docs/pot/event-types.pot deleted file mode 100644 index fc06fb73878..00000000000 --- a/docs/pot/event-types.pot +++ /dev/null @@ -1,575 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Event Types" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.EXTRACT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SG.REVOKE.INGRESS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.DESTROY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.UPLOAD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "HOST.RECONNECT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.START" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.CLEANUP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MAINT.CANCEL" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.STOP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MAINT.CANCEL.PS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.REBOOT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MAINT.PREPARE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.UPGRADE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME.ATTACH" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MAINT.PREPARE.PS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VM.RESETPASSWORD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME.DETACH" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN.REMOTE.ACCESS.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME.UPLOAD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN.USER.ADD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.DESTROY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICEOFFERING.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN.USER.REMOVE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.START" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICEOFFERING.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.RESTART" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.STOP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICEOFFERING.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "UPLOAD.CUSTOM.CERTIFICATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.REBOOT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DOMAIN.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ROUTER.HA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DOMAIN.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "STATICNAT.DISABLE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DOMAIN.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.DESTROY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOT.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.DESTROY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.START" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOT.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.START" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.STOP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOTPOLICY.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.STOP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.REBOOT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOTPOLICY.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.REBOOT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PROXY.HA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOTPOLICY.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SSVM.H" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VNC.CONNECT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VNC.DISCONNECT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NET.IPASSIGN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NET.IPRELEASE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NET.RULEADD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NET.RULEDELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NET.RULEMODIFY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LB.ASSIGN.TO.RULE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LB.REMOVE.FROM.RULE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LB.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LB.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LB.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.LOGIN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.LOGOUT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "USER.DISABLE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.UPDATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.COPY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.DOWNLOAD.START" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.DOWNLOAD.SUCCESS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE.DOWNLOAD.FAILED" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.COPY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.ATTACH" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.DETACH" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.EXTRACT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO.UPLOAD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICE.OFFERING.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICE.OFFERING.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SERVICE.OFFERING.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DISK.OFFERING.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DISK.OFFERING.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DISK.OFFERING.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.OFFERING.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.OFFERING.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK.OFFERING.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "POD.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "POD.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "POD.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ZONE.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ZONE.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ZONE.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN.IP.RANGE.CREATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN.IP.RANGE.DELETE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CONFIGURATION.VALUE.EDIT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SG.AUTH.INGRESS" -msgstr "" - diff --git a/docs/pot/events-log.pot b/docs/pot/events-log.pot deleted file mode 100644 index bdfb9c23ea9..00000000000 --- a/docs/pot/events-log.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Event Logs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are two types of events logged in the &PRODUCT; Event Log. Standard events log the success or failure of an event and can be used to identify jobs or processes that have failed. There are also long running job events. Events for asynchronous jobs log when a job is scheduled, when it starts, and when it completes. Other long running synchronous jobs log when a job starts, and when it completes. Long running synchronous and asynchronous event logs can be used to gain more information on the status of a pending job or can be used to identify a job that is hanging or has not started. The following sections provide more information on these events.." -msgstr "" - diff --git a/docs/pot/events.pot b/docs/pot/events.pot deleted file mode 100644 index 24a646eec0f..00000000000 --- a/docs/pot/events.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Events" -msgstr "" - diff --git a/docs/pot/example-LDAP-configuration-commands.pot b/docs/pot/example-LDAP-configuration-commands.pot deleted file mode 100644 index 693bf19fabd..00000000000 --- a/docs/pot/example-LDAP-configuration-commands.pot +++ /dev/null @@ -1,82 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Example LDAP Configuration Commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To understand the examples in this section, you need to know the basic concepts behind calling the &PRODUCT; API, which are explained in the Developer’s Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following shows an example invocation of ldapConfig with an ApacheDS LDAP server" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://127.0.0.1:8080/client/api?command=ldapConfig&hostname=127.0.0.1&searchbase=ou%3Dtesting%2Co%3Dproject&queryfilter=%28%26%28uid%3D%25u%29%29&binddn=cn%3DJohn+Singh%2Cou%3Dtesting%2Co%project&bindpass=secret&port=10389&ssl=true&truststore=C%3A%2Fcompany%2Finfo%2Ftrusted.ks&truststorepass=secret&response=json&apiKey=YourAPIKey&signature=YourSignatureHash" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The command must be URL-encoded. Here is the same example without the URL encoding:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://127.0.0.1:8080/client/api?command=ldapConfig\n" -"&hostname=127.0.0.1\n" -"&searchbase=ou=testing,o=project\n" -"&queryfilter=(&(%uid=%u))\n" -"&binddn=cn=John+Singh,ou=testing,o=project\n" -"&bindpass=secret\n" -"&port=10389\n" -"&ssl=true\n" -"&truststore=C:/company/info/trusted.ks\n" -"&truststorepass=secret\n" -"&response=json\n" -"&apiKey=YourAPIKey&signature=YourSignatureHash\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following shows a similar command for Active Directory. Here, the search base is the testing group within a company, and the users are matched up based on email address." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://10.147.29.101:8080/client/api?command=ldapConfig&hostname=10.147.28.250&searchbase=OU%3Dtesting%2CDC%3Dcompany&queryfilter=%28%26%28mail%3D%25e%29%29 &binddn=CN%3DAdministrator%2COU%3Dtesting%2CDC%3Dcompany&bindpass=1111_aaaa&port=389&response=json&apiKey=YourAPIKey&signature=YourSignatureHash" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The next few sections explain some of the concepts you will need to know when filling out the ldapConfig parameters." -msgstr "" - diff --git a/docs/pot/example-response-from-listUsageRecords.pot b/docs/pot/example-response-from-listUsageRecords.pot deleted file mode 100644 index 50b39f40a6e..00000000000 --- a/docs/pot/example-response-from-listUsageRecords.pot +++ /dev/null @@ -1,64 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Example response from listUsageRecords" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <listusagerecordsresponse>\n" -" <count>1816</count>\n" -" <usagerecord>\n" -" <account>user5</account>\n" -" <accountid>10004</accountid>\n" -" <domainid>1</domainid>\n" -" <zoneid>1</zoneid>\n" -" <description>i-3-4-WC running time (ServiceOffering: 1) (Template: 3)</description>\n" -" <usage>2.95288 Hrs</usage>\n" -" <usagetype>1</usagetype>\n" -" <rawusage>2.95288</rawusage>\n" -" <virtualmachineid>4</virtualmachineid>\n" -" <name>i-3-4-WC</name>\n" -" <offeringid>1</offeringid>\n" -" <templateid>3</templateid>\n" -" <usageid>245554</usageid>\n" -" <type>XenServer</type>\n" -" <startdate>2009-09-15T00:00:00-0700</startdate>\n" -" <enddate>2009-09-18T16:14:26-0700</enddate>\n" -" </usagerecord>\n" -"\n" -" … (1,815 more usage records)\n" -" </listusagerecordsresponse>\n" -" " -msgstr "" - diff --git a/docs/pot/export-template.pot b/docs/pot/export-template.pot deleted file mode 100644 index 8316f4bb966..00000000000 --- a/docs/pot/export-template.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Exporting Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "End users and Administrators may export templates from the &PRODUCT;. Navigate to the template in the UI and choose the Download function from the Actions menu." -msgstr "" - diff --git a/docs/pot/external-firewalls-and-load-balancers.pot b/docs/pot/external-firewalls-and-load-balancers.pot deleted file mode 100644 index 8c628710e11..00000000000 --- a/docs/pot/external-firewalls-and-load-balancers.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "External Firewalls and Load Balancers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; is capable of replacing its Virtual Router with an external Juniper SRX device and an optional external NetScaler or F5 load balancer for gateway and load balancing services. In this case, the VMs use the SRX as their gateway." -msgstr "" - diff --git a/docs/pot/external-fw-topology-req.pot b/docs/pot/external-fw-topology-req.pot deleted file mode 100644 index f218b10e870..00000000000 --- a/docs/pot/external-fw-topology-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "External Firewall Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When external firewall integration is in place, the public IP VLAN must still be trunked to the Hosts. This is required to support the Secondary Storage VM and Console Proxy VM." -msgstr "" - diff --git a/docs/pot/external-guest-firewall-integration.pot b/docs/pot/external-guest-firewall-integration.pot deleted file mode 100644 index 5d7c7d19eaa..00000000000 --- a/docs/pot/external-guest-firewall-integration.pot +++ /dev/null @@ -1,251 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "External Guest Firewall Integration for Juniper SRX (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Available only for guests using advanced networking." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides for direct management of the Juniper SRX series of firewalls. This enables &PRODUCT; to establish static NAT mappings from public IPs to guest VMs, and to use the Juniper device in place of the virtual router for firewall services. You can have one or more Juniper SRX per zone. This feature is optional. If Juniper integration is not provisioned, &PRODUCT; will use the virtual router for these services." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Juniper SRX can optionally be used in conjunction with an external load balancer. External Network elements can be deployed in a side-by-side or inline configuration." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; requires the Juniper to be configured as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported SRX software version is 10.3 or higher." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install your SRX appliance according to the vendor's instructions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Connect one interface to the management network and one interface to the public network. Alternatively, you can connect the same interface to both networks and a use a VLAN for the public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure \"vlan-tagging\" is enabled on the private interface." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Record the public and private interface names. If you used a VLAN for the public interface, add a \".[VLAN TAG]\" after the interface name. For example, if you are using ge-0/0/3 for your public interface and VLAN tag 301, your public interface name would be \"ge-0/0/3.301\". Your private interface name should always be untagged because the &PRODUCT; software automatically creates tagged logical interfaces." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a public security zone and a private security zone. By default, these will already exist and will be called \"untrust\" and \"trust\". Add the public interface to the public zone and the private interface to the private zone. Note down the security zone names." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure there is a security policy from the private zone to the public zone that allows all traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note the username and password of the account you want the &PRODUCT; software to log in to when it is programming rules." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the \"ssh\" and \"xnm-clear-text\" system services are enabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If traffic metering is desired:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "a. Create an incoming firewall filter and an outgoing firewall filter. These filters should be the same names as your public security zone name and private security zone name respectively. The filters should be set to be \"interface-specific\". For example, here is the configuration where the public zone is \"untrust\" and the private zone is \"trust\":" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "root@cloud-srx# show firewall\n" -"filter trust {\n" -" interface-specific;\n" -"}\n" -"filter untrust {\n" -" interface-specific;\n" -"}" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add the firewall filters to your public interface. For example, a sample configuration output (for public interface ge-0/0/3.0, public security zone untrust, and private security zone trust) is:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ge-0/0/3 {\n" -" unit 0 {\n" -" family inet {\n" -" filter {\n" -" input untrust;\n" -" output trust;\n" -" }\n" -" address 172.25.0.252/16;\n" -" }\n" -" }\n" -"}" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure all VLANs are brought to the private interface of the SRX." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the &PRODUCT; Management Server is installed, log in to the &PRODUCT; UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Zones, click View More." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the zone you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Network tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Network Service Providers node of the diagram, click Configure. (You might have to scroll down to see this.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click SRX." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Add New SRX button (+) and provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address: The IP address of the SRX." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username: The user name of the account on the SRX that &PRODUCT; should use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password: The password of the account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Interface. The name of the public interface on the SRX. For example, ge-0/0/2. A \".x\" at the end of the interface indicates the VLAN that is in use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Interface: The name of the private interface on the SRX. For example, ge-0/0/1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Usage Interface: (Optional) Typically, the public interface is used to meter traffic. If you want to use a different interface, specify its name here" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of Retries: The number of times to attempt a command on the SRX before failing. The default value is 2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Timeout (seconds): The time to wait for a command on the SRX before considering it failed. Default is 300 seconds." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Network: The name of the public network on the SRX. For example, trust." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Network: The name of the private network on the SRX. For example, untrust." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Capacity: The number of networks the device can handle" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Dedicated: When marked as dedicated, this device will be dedicated to a single account. When Dedicated is checked, the value in the Capacity field has no significance implicitly, its value is 1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Global Settings. Set the parameter external.network.stats.interval to indicate how often you want &PRODUCT; to fetch network usage statistics from the Juniper SRX. If you are not using the SRX to gather network usage statistics, set to 0." -msgstr "" - diff --git a/docs/pot/external-guest-lb-integration.pot b/docs/pot/external-guest-lb-integration.pot deleted file mode 100644 index e1939dbf142..00000000000 --- a/docs/pot/external-guest-lb-integration.pot +++ /dev/null @@ -1,155 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "External Guest Load Balancer Integration (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; can optionally use a Citrix NetScaler or BigIP F5 load balancer to provide load balancing services to guests. If this is not enabled, &PRODUCT; will use the software load balancer in the virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To install and enable an external load balancer for &PRODUCT; management:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up the appliance according to the vendor's directions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Connect it to the networks carrying public traffic and management traffic (these could be the same network)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Record the IP address, username, password, public interface name, and private interface name. The interface names will be something like \"1.1\" or \"1.2\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure that the VLANs are trunked to the management network interface." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the &PRODUCT; Management Server is installed, log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Zones, click View More." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the zone you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Network tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Network Service Providers node of the diagram, click Configure. (You might have to scroll down to see this.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click NetScaler or F5." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Add button (+) and provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For NetScaler:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address: The IP address of the SRX." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username/Password: The authentication credentials to access the device. &PRODUCT; uses these credentials to access the device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type: The type of device that is being added. It could be F5 Big Ip Load Balancer, NetScaler VPX, NetScaler MPX, or NetScaler SDX. For a comparison of the NetScaler types, see the &PRODUCT; Administration Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public interface: Interface of device that is configured to be part of the public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private interface: Interface of device that is configured to be part of the private network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of retries. Number of times to attempt a command on the device before considering the operation failed. Default is 2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Capacity: The number of networks the device can handle." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Dedicated: When marked as dedicated, this device will be dedicated to a single account. When Dedicated is checked, the value in the Capacity field has no significance implicitly, its value is 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The installation and provisioning of the external load balancer is finished. You can proceed to add VMs and NAT or load balancing rules." -msgstr "" - diff --git a/docs/pot/extracting-source.pot b/docs/pot/extracting-source.pot deleted file mode 100644 index eb739a6dd0a..00000000000 --- a/docs/pot/extracting-source.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Extracting source" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extracting the &PRODUCT; release is relatively simple and can be done with a single command as follows:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ tar -jxvf apache-cloudstack-4.0.0-incubating-src.tar.bz2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can now move into the directory:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ cd ./apache-cloudstack-4.0.0-incubating-src" -msgstr "" - diff --git a/docs/pot/feature-overview.pot b/docs/pot/feature-overview.pot deleted file mode 100644 index 3f50f9f969b..00000000000 --- a/docs/pot/feature-overview.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "What Can &PRODUCT; Do?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multiple Hypervisor Support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; works with a variety of hypervisors, and a single cloud deployment can contain multiple hypervisor implementations. The current release of &PRODUCT; supports pre-packaged enterprise solutions like Citrix XenServer and VMware vSphere, as well as KVM or Xen running on Ubuntu or CentOS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Massively Scalable Infrastructure Management" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; can manage tens of thousands of servers installed in multiple geographically distributed datacenters. The centralized management server scales linearly, eliminating the need for intermediate cluster-level management servers. No single component failure can cause cloud-wide outage. Periodic maintenance of the management server can be performed without affecting the functioning of virtual machines running in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Automatic Configuration Management" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; automatically configures each guest virtual machine’s networking and storage settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; internally manages a pool of virtual appliances to support the cloud itself. These appliances offer services such as firewalling, routing, DHCP, VPN access, console proxy, storage access, and storage replication. The extensive use of virtual appliances simplifies the installation, configuration, and ongoing management of a cloud deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Graphical User Interface" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; offers an administrator's Web interface, used for provisioning and managing the cloud, as well as an end-user's Web interface, used for running VMs and managing VM templates. The UI can be customized to reflect the desired service provider or enterprise look and feel." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "API and Extensibility" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides an API that gives programmatic access to all the management features available in the UI. The API is maintained and documented. This API enables the creation of command line tools and new user interfaces to suit particular needs. See the Developer’s Guide and API Reference, both available at Apache CloudStack Guides and Apache CloudStack API Reference respectively." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; pluggable allocation architecture allows the creation of new types of allocators for the selection of storage and Hosts. See the Allocator Implementation Guide (http://docs.cloudstack.org/CloudStack_Documentation/Allocator_Implementation_Guide)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "High Availability" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; has a number of features to increase the availability of the system. The Management Server itself may be deployed in a multi-node installation where the servers are load balanced. MySQL may be configured to use replication to provide for a manual failover in the event of database loss. For the hosts, &PRODUCT; supports NIC bonding and the use of separate networks for storage as well as iSCSI Multipath." -msgstr "" - diff --git a/docs/pot/firewall-rules.pot b/docs/pot/firewall-rules.pot deleted file mode 100644 index 6c661014436..00000000000 --- a/docs/pot/firewall-rules.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Firewall Rules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default, all incoming traffic to the public IP address is rejected by the firewall. To allow external traffic, you can open firewall ports by specifying firewall rules. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You cannot use firewall rules to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Firewall rules can be created using the Firewall tab in the Management Server UI. This tab is not displayed by default when &PRODUCT; is installed. To display the Firewall tab, the &PRODUCT; administrator must set the global configuration parameter firewall.rule.ui.enabled to \"true.\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To create a firewall rule:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network where you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configuration tab and fill in the following values." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source CIDR. (Optional) To accept only traffic from IP addresses within a particular address block, enter a CIDR or a comma-separated list of CIDRs. Example: 192.168.0.0/22. Leave empty to allow all CIDRs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. The communication protocol in use on the opened port(s)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start Port and End Port. The port(s) you want to open on the firewall. If you are opening a single port, use the same number in both fields" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ICMP Type and ICMP Code. Used only if Protocol is set to ICMP. Provide the type and code required by the ICMP protocol to fill out the ICMP header. Refer to ICMP documentation for more details if you are not sure what to enter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add" -msgstr "" - diff --git a/docs/pot/first_ms_node_install.pot b/docs/pot/first_ms_node_install.pot deleted file mode 100644 index 6ecb5b7f0cb..00000000000 --- a/docs/pot/first_ms_node_install.pot +++ /dev/null @@ -1,64 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install the First Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure you have configured your machine according to or as appropriate for your platform." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the &PRODUCT; management server packages by issuing one of the following commands as appropriate:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# yum install cloud-client" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# apt-get install cloud-client" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(RPM-based distributions) When the installation is finished, run the following commands to start essential services:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service rpcbind start\n" -"# service nfs start\n" -"# chkconfig nfs on\n" -"# chkconfig rpcbind on\n" -" " -msgstr "" - diff --git a/docs/pot/generic-firewall-provisions.pot b/docs/pot/generic-firewall-provisions.pot deleted file mode 100644 index 0f1d0a3d7d4..00000000000 --- a/docs/pot/generic-firewall-provisions.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Generic Firewall Provisions" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The hardware firewall is required to serve two purposes:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protect the Management Servers. NAT and port forwarding should be configured to direct traffic from the public Internet to the Management Servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Route management network traffic between multiple zones. Site-to-site VPN should be configured between multiple zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To achieve the above purposes you must set up fixed configurations for the firewall. Firewall rules and policies need not change as users are provisioned into the cloud. Any brand of hardware firewall that supports NAT and site-to-site VPN can be used." -msgstr "" - diff --git a/docs/pot/getting-release.pot b/docs/pot/getting-release.pot deleted file mode 100644 index c2505e554b5..00000000000 --- a/docs/pot/getting-release.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Getting the release" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can download the latest &PRODUCT; release from the Apache CloudStack project download page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You'll notice several links under the 'Latest release' section." -msgstr "" - -#. Tag: para -#, no-c-format -msgid " apache-cloudstack-4.0.0-incubating-src.tar.bz2 - This is the link to the release itself." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PGP - This is a detached cryptographic signature that can be used to help verify the authenticity of the release." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MD5 - An MD5 hash of the release to aid in verify the validity of the release download." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SHA512 - A SHA512 hash of the release to aid in verify the validity of the release download." -msgstr "" - diff --git a/docs/pot/global-config.pot b/docs/pot/global-config.pot deleted file mode 100644 index 2b6b3015df7..00000000000 --- a/docs/pot/global-config.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Global Configuration Parameters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides parameters that you can set to control many aspects of the cloud. When &PRODUCT; is first installed, and periodically thereafter, you might need to modify these settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose one of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Global Settings. This displays a list of the parameters with brief descriptions and current values." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor Capabilities. This displays a list of hypervisor versions with the maximum number of guests supported for each." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the search box to narrow down the list to those you are interested in." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Edit icon to modify a value. If you are viewing Hypervisor Capabilities, you must click the name of the hypervisor first to display the editing screen." -msgstr "" - diff --git a/docs/pot/globally-configured-limits.pot b/docs/pot/globally-configured-limits.pot deleted file mode 100644 index 390e8ab4926..00000000000 --- a/docs/pot/globally-configured-limits.pot +++ /dev/null @@ -1,175 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Globally Configured Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a zone, the guest virtual network has a 24 bit CIDR by default. This limits the guest virtual network to 254 running instances. It can be adjusted as needed, but this must be done before any instances are created in the zone. For example, 10.1.1.0/22 would provide for ~1000 addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table lists limits set in the Global Configuration:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Parameter Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Definition" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.account.public.ips" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of public IP addresses that can be owned by an account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.account.snapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of snapshots that can exist for an account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.account.templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of templates that can exist for an account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.account.user.vms" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of virtual machine instances that can exist for an account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.account.volumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Number of disk volumes that can exist for an account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.template.iso.size" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum size for a downloaded template or ISO in GB" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.volume.size.gb" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum size for a volume in GB" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "network.throttling.rate" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default data transfer rate in megabits per second allowed per user (supported on XenServer)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "snapshot.max.hourly" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum recurring hourly snapshots to be retained for a volume. If the limit is reached, early snapshots from the start of the hour are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring hourly snapshots can not be scheduled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "snapshot.max.daily" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum recurring daily snapshots to be retained for a volume. If the limit is reached, snapshots from the start of the day are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring daily snapshots can not be scheduled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "snapshot.max.weekly" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum recurring weekly snapshots to be retained for a volume. If the limit is reached, snapshots from the beginning of the week are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring weekly snapshots can not be scheduled" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "snapshot.max.monthly" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum recurring monthly snapshots to be retained for a volume. If the limit is reached, snapshots from the beginning of the month are deleted so that newer ones can be saved. This limit does not apply to manual snapshots. If set to 0, recurring monthly snapshots can not be scheduled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To modify global configuration parameters, use the global configuration screen in the &PRODUCT; UI. See Setting Global Configuration Parameters" -msgstr "" - diff --git a/docs/pot/guest-ip-ranges.pot b/docs/pot/guest-ip-ranges.pot deleted file mode 100644 index f193ca7df63..00000000000 --- a/docs/pot/guest-ip-ranges.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Guest IP Ranges" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP ranges for guest network traffic are set on a per-account basis by the user. This allows the users to configure their network in a fashion that will enable VPN linking between their guest network and their clients." -msgstr "" - diff --git a/docs/pot/guest-network.pot b/docs/pot/guest-network.pot deleted file mode 100644 index 9f79450d7ca..00000000000 --- a/docs/pot/guest-network.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Guest Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a &PRODUCT; cloud, guest VMs can communicate with each other using shared infrastructure with the security and user perception that the guests have a private LAN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; virtual router is the main component providing networking features for guest traffic." -msgstr "" - diff --git a/docs/pot/guest-nw-usage-with-traffic-sentinel.pot b/docs/pot/guest-nw-usage-with-traffic-sentinel.pot deleted file mode 100644 index baafd5d447e..00000000000 --- a/docs/pot/guest-nw-usage-with-traffic-sentinel.pot +++ /dev/null @@ -1,90 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Guest Network Usage Integration for Traffic Sentinel" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To collect usage data for a guest network, &PRODUCT; needs to pull the data from an external network statistics collector installed on the network. Metering statistics for guest networks are available through &PRODUCT;’s integration with inMon Traffic Sentinel." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Traffic Sentinel is a network traffic usage data collection package. &PRODUCT; can feed statistics from Traffic Sentinel into its own usage records, providing a basis for billing users of cloud infrastructure. Traffic Sentinel uses the traffic monitoring protocol sFlow. Routers and switches generate sFlow records and provide them for collection by Traffic Sentinel, then &PRODUCT; queries the Traffic Sentinel database to obtain this information" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To construct the query, &PRODUCT; determines what guest IPs were in use during the current query interval. This includes both newly assigned IPs and IPs that were assigned in a previous time period and continued to be in use. &PRODUCT; queries Traffic Sentinel for network statistics that apply to these IPs during the time period they remained allocated in &PRODUCT;. The returned data is correlated with the customer account that owned each IP and the timestamps when IPs were assigned and released in order to create billable metering records in &PRODUCT;. When the Usage Server runs, it collects this data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up the integration between &PRODUCT; and Traffic Sentinel:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On your network infrastructure, install Traffic Sentinel and configure it to gather traffic data. For installation and configuration steps, see inMon documentation at Traffic Sentinel Documentation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Traffic Sentinel UI, configure Traffic Sentinel to accept script querying from guest users. &PRODUCT; will be the guest user performing the remote queries to gather network usage for one or more IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click File > Users > Access Control > Reports Query, then select Guest from the drop-down list." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On &PRODUCT;, add the Traffic Sentinel host by calling the &PRODUCT; API command addTrafficMonitor. Pass in the URL of the Traffic Sentinel as protocol + host + port (optional); for example, http://10.147.28.100:8080. For the addTrafficMonitor command syntax, see the API Reference at API Documentation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For information about how to call the &PRODUCT; API, see the Developer’s Guide at CloudStack API Developer's Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Configuration from the Global Settings page, and set the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "direct.network.stats.interval: How often you want &PRODUCT; to query Traffic Sentinel." -msgstr "" - diff --git a/docs/pot/guest-traffic.pot b/docs/pot/guest-traffic.pot deleted file mode 100644 index 2da57bf537d..00000000000 --- a/docs/pot/guest-traffic.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Guest Traffic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A network can carry guest traffic only between VMs within one zone. Virtual machines in different zones cannot communicate with each other using their IP addresses; they must communicate with each other by routing through a public IP address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server automatically creates a virtual router for each network. A virtual router is a special virtual machine that runs on the hosts. Each virtual router has three network interfaces. Its eth0 interface serves as the gateway for the guest traffic and has the IP address of 10.1.1.1. Its eth1 interface is used by the system to configure the virtual router. Its eth2 interface is assigned a public IP address for public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The virtual router provides DHCP and will automatically assign an IP address for each guest VM within the IP range assigned for the network. The user can manually reconfigure guest VMs to assume different IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source NAT is automatically configured in the virtual router to forward outbound traffic for all guest VMs" -msgstr "" - diff --git a/docs/pot/ha-enabled-vm.pot b/docs/pot/ha-enabled-vm.pot deleted file mode 100644 index b91f9d1c648..00000000000 --- a/docs/pot/ha-enabled-vm.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "HA-Enabled Virtual Machines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "HA features work with iSCSI or NFS primary storage. HA with local storage is not supported." -msgstr "" - diff --git a/docs/pot/ha-for-hosts.pot b/docs/pot/ha-for-hosts.pot deleted file mode 100644 index c95851645d3..00000000000 --- a/docs/pot/ha-for-hosts.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "HA for Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user can specify a virtual machine as HA-enabled. By default, all virtual router VMs and Elastic Load Balancing VMs are automatically configured as HA-enabled. When an HA-enabled VM crashes, &PRODUCT; detects the crash and restarts the VM automatically within the same Availability Zone. HA is never performed across different Availability Zones. &PRODUCT; has a conservative policy towards restarting VMs and ensures that there will never be two instances of the same VM running at the same time. The Management Server attempts to start the VM on another Host in the same cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "HA features work with iSCSI or NFS primary storage. HA with local storage is not supported." -msgstr "" - diff --git a/docs/pot/ha-management-server.pot b/docs/pot/ha-management-server.pot deleted file mode 100644 index 62404d8e665..00000000000 --- a/docs/pot/ha-management-server.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "HA for Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Management Server should be deployed in a multi-node configuration such that it is not susceptible to individual server failures. The Management Server itself (as distinct from the MySQL database) is stateless and may be placed behind a load balancer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Normal operation of Hosts is not impacted by an outage of all Management Serves. All guest VMs will continue to work." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the Management Server is down, no new VMs can be created, and the end user and admin UI, API, dynamic load distribution, and HA will cease to work." -msgstr "" - diff --git a/docs/pot/hardware-config-eg.pot b/docs/pot/hardware-config-eg.pot deleted file mode 100644 index b42f0df2429..00000000000 --- a/docs/pot/hardware-config-eg.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Example Hardware Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section contains an example configuration of specific switch models for zone-level layer-3 switching. It assumes VLAN management protocols, such as VTP or GVRP, have been disabled. The example scripts must be changed appropriately if you choose to use VTP or GVRP." -msgstr "" - diff --git a/docs/pot/hardware-firewall.pot b/docs/pot/hardware-firewall.pot deleted file mode 100644 index ba3d5f1622b..00000000000 --- a/docs/pot/hardware-firewall.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Hardware Firewall" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All deployments should have a firewall protecting the management server; see Generic Firewall Provisions. Optionally, some deployments may also have a Juniper SRX firewall that will be the default gateway for the guest networks; see ." -msgstr "" - diff --git a/docs/pot/host-add-vsphere.pot b/docs/pot/host-add-vsphere.pot deleted file mode 100644 index 4798035d2c3..00000000000 --- a/docs/pot/host-add-vsphere.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Host (vSphere)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere servers, we recommend creating the cluster of hosts in vCenter and then adding the entire cluster to &PRODUCT;. See Add Cluster: vSphere." -msgstr "" - diff --git a/docs/pot/host-add-xenserver-kvm-ovm.pot b/docs/pot/host-add-xenserver-kvm-ovm.pot deleted file mode 100644 index 7b984e46043..00000000000 --- a/docs/pot/host-add-xenserver-kvm-ovm.pot +++ /dev/null @@ -1,210 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Host (XenServer or KVM)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer and KVM hosts can be added to a cluster at any time." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Requirements for XenServer and KVM Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the hypervisor host does not have any VMs already running before you add it to &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configuration requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each cluster must contain only hosts with the identical hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer, do not put more than 8 hosts in a cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM, do not put more than 16 hosts in a cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For hardware requirements, see the installation section for your hypervisor in the &PRODUCT; Installation Guide." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "XenServer Host Additional Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If network bonding is in use, the administrator must cable the new host identically to other hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For all additional hosts to be added to the cluster, run the following command. This will cause the host to join the master in a XenServer pool." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# xe pool-join master-address=[master IP] master-username=root master-password=[your password]" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With all hosts added to the XenServer pool, run the cloud-setup-bond script. This script will complete the configuration and setup of the bonds on the new hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the script from the Management Server in /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver/cloud-setup-bonding.sh to the master host and ensure it is executable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ./cloud-setup-bonding.sh" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "KVM Host Additional Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If shared mountpoint storage is in use, the administrator should ensure that the new host has all the same mountpoints (with storage mounted) as the other hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the new host has the same network configuration (guest, private, and public network) as other hosts in the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using OpenVswitch bridges edit the file agent.properties on the KVM host and set the parameter network.bridge.type to openvswitch before adding the host to &PRODUCT;" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Adding a XenServer or KVM Host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have not already done so, install the hypervisor software on the host. You will need to know which version of the hypervisor software version is supported by &PRODUCT; and what additional configuration is required to ensure the host will work with &PRODUCT;. To find these installation details, see the appropriate section for your hypervisor in the &PRODUCT; Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab. In the Clusters node, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the cluster where you want to add the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View Hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Name. The DNS name or IP address of the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username. Usually root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password. This is the password for the user from your XenServer or KVM install)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host Tags (Optional). Any labels that you use to categorize hosts for ease of maintenance. For example, you can set to the cloud's HA tag (set in the ha.tag global configuration parameter) if you want this host to be used only for VMs with the \"high availability\" feature enabled. For more information, see HA-Enabled Virtual Machines as well as HA for Hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There may be a slight delay while the host is provisioned. It should automatically display in the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat for additional hosts." -msgstr "" - diff --git a/docs/pot/host-add.pot b/docs/pot/host-add.pot deleted file mode 100644 index c52ebaa7d4b..00000000000 --- a/docs/pot/host-add.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before adding a host to the &PRODUCT; configuration, you must first install your chosen hypervisor on the host. &PRODUCT; can manage hosts running VMs under a variety of hypervisors." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Installation Guide provides instructions on how to install each supported hypervisor and configure it for use with &PRODUCT;. See the appropriate section in the Installation Guide for information about which version of your chosen hypervisor is supported, as well as crucial additional steps to configure the hypervisor hosts for use with &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure you have performed the additional &PRODUCT;-specific configuration steps described in the hypervisor installation section for your particular hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now add the hypervisor host to &PRODUCT;. The technique to use varies depending on the hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/pot/host-allocation.pot b/docs/pot/host-allocation.pot deleted file mode 100644 index cb6bee69c35..00000000000 --- a/docs/pot/host-allocation.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Host Allocation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The system automatically picks the most appropriate host to run each virtual machine. End users may specify the zone in which the virtual machine will be created. End users do not have control over which host will run the virtual machine instance." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; administrators can specify that certain hosts should have a preference for particular types of guest instances. For example, an administrator could state that a host should have a preference to run Windows guests. The default host allocator will attempt to place guests of that OS type on such hosts first. If no such host is available, the allocator will place the instance wherever there is sufficient physical capacity." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both vertical and horizontal allocation is allowed. Vertical allocation consumes all the resources of a given host before allocating any guests on a second host. This reduces power consumption in the cloud. Horizontal allocation places a guest on each host in a round-robin fashion. This may yield better performance to the guests in some cases. &PRODUCT; also allows an element of CPU over-provisioning as configured by the administrator. Over-provisioning allows the administrator to commit more CPU cycles to the allocated guests than are actually available from the hardware." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; also provides a pluggable interface for adding new allocators. These custom allocators can provide any policy the administrator desires." -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-agent.pot b/docs/pot/hypervisor-host-install-agent.pot deleted file mode 100644 index 2c314d761e1..00000000000 --- a/docs/pot/hypervisor-host-install-agent.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install and configure the Agent" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To manage KVM instances on the host &PRODUCT; uses a Agent. This Agent communicates with the Management server and controls all the instances on the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First we start by installing the agent:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ yum install cloud-agent" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ apt-get install cloud-agent" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host is now ready to be added to a cluster. This is covered in a later section, see . It is recommended that you continue to read the documentation before adding the host!" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-finish.pot b/docs/pot/hypervisor-host-install-finish.pot deleted file mode 100644 index ae14351d5a8..00000000000 --- a/docs/pot/hypervisor-host-install-finish.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add the host to CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host is now ready to be added to a cluster. This is covered in a later section, see . It is recommended that you continue to read the documentation before adding the host!" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-firewall.pot b/docs/pot/hypervisor-host-install-firewall.pot deleted file mode 100644 index fb7a56cc6d7..00000000000 --- a/docs/pot/hypervisor-host-install-firewall.pot +++ /dev/null @@ -1,160 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring the firewall" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The hypervisor needs to be able to communicate with other hypervisors and the management server needs to be able to reach the hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order to do so we have to open the following TCP ports (if you are using a firewall):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "22 (SSH)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "1798" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "16509 (libvirt)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "5900 - 6100 (VNC consoles)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "49152 - 49216 (libvirt live migration)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It depends on the firewall you are using how to open these ports. Below you'll find examples how to open these ports in RHEL/CentOS and Ubuntu." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Open ports in RHEL/CentOS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "RHEL and CentOS use iptables for firewalling the system, you can open extra ports by executing the following iptable commands:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables -I INPUT -p tcp -m tcp --dport 22 -j ACCEPT" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables -I INPUT -p tcp -m tcp --dport 1798 -j ACCEPT" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables -I INPUT -p tcp -m tcp --dport 16509 -j ACCEPT" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables -I INPUT -p tcp -m tcp --dport 5900:6100 -j ACCEPT" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables -I INPUT -p tcp -m tcp --dport 49152:49216 -j ACCEPT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These iptable settings are not persistent accross reboots, we have to save them first." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ iptables-save > /etc/sysconfig/iptables" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Open ports in Ubuntu" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default firewall under Ubuntu is UFW (Uncomplicated FireWall), which is a Python wrapper around iptables." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To open the required ports, execute the following commands:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ufw allow proto tcp from any to any port 22" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ufw allow proto tcp from any to any port 1798" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ufw allow proto tcp from any to any port 16509" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ufw allow proto tcp from any to any port 5900:6100" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ufw allow proto tcp from any to any port 49152:49216" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default UFW is not enabled on Ubuntu. Executing these commands with the firewall disabled does not enable the firewall." -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-libvirt.pot b/docs/pot/hypervisor-host-install-libvirt.pot deleted file mode 100644 index f7eeed99085..00000000000 --- a/docs/pot/hypervisor-host-install-libvirt.pot +++ /dev/null @@ -1,140 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install and Configure libvirt" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses libvirt for managing virtual machines. Therefore it is vital that libvirt is configured correctly. Libvirt is a dependency of cloud-agent and should already be installed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order to have live migration working libvirt has to listen for unsecured TCP connections. We also need to turn off libvirts attempt to use Multicast DNS advertising. Both of these settings are in /etc/libvirt/libvirtd.conf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the following paramaters:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "listen_tls = 0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "listen_tcp = 1" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "tcp_port = 16059" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "auth_tcp = \"none\"" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mdns_adv = 0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Turning on \"listen_tcp\" in libvirtd.conf is not enough, we have to change the parameters as well:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL or CentOS modify /etc/sysconfig/libvirtd:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Uncomment the following line:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#LIBVIRTD_ARGS=\"--listen\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu: modify /etc/init/libvirt-bin.conf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the following line (at the end of the file):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "exec /usr/sbin/libvirtd -d" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "to (just add -l)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "exec /usr/sbin/libvirtd -d -l" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart libvirt" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ service libvirtd restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ service libvirt-bin restart" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-network-openvswitch.pot b/docs/pot/hypervisor-host-install-network-openvswitch.pot deleted file mode 100644 index ab01e23a156..00000000000 --- a/docs/pot/hypervisor-host-install-network-openvswitch.pot +++ /dev/null @@ -1,263 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure the network using OpenVswitch" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is a very important section, please make sure you read this thoroughly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order to forward traffic to your instances you will need at least two bridges: public and private." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default these bridges are called cloudbr0 and cloudbr1, but you do have to make sure they are available on each hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The most important factor is that you keep the configuration consistent on all your hypervisors." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Preparing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To make sure that the native bridge module will not interfere with openvswitch the bridge module should be added to the blacklist. See the modprobe documentation for your distribution on where to find the blacklist. Make sure the module is not loaded either by rebooting or executing rmmod bridge before executing next steps." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The network configurations below depend on the ifup-ovs and ifdown-ovs scripts which are part of the openvswitch installation. They should be installed in /etc/sysconfig/network-scripts/" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Network example" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are many ways to configure your network. In the Basic networking mode you should have two (V)LAN's, one for your private network and one for the public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We assume that the hypervisor has one NIC (eth0) with three tagged VLAN's:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 100 for management of the hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 200 for public network of the instances (cloudbr0)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 300 for private network of the instances (cloudbr1)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On VLAN 100 we give the Hypervisor the IP-Address 192.168.42.11/24 with the gateway 192.168.42.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Hypervisor and Management server don't have to be in the same subnet!" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring the network bridges" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It depends on the distribution you are using how to configure these, below you'll find examples for RHEL/CentOS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The goal is to have three bridges called 'mgmt0', 'cloudbr0' and 'cloudbr1' after this section. This should be used as a guideline only. The exact configuration will depend on your network layout." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure OpenVswitch" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The network interfaces using OpenVswitch are created using the ovs-vsctl command. This command will configure the interfaces and persist them to the OpenVswitch database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First we create a main bridge connected to the eth0 interface. Next we create three fake bridges, each connected to a specific vlan tag." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# ovs-vsctl add-br cloudbr\n" -"# ovs-vsctl add-port cloudbr eth0 \n" -"# ovs-vsctl set port cloudbr trunks=100,200,300\n" -"# ovs-vsctl add-br mgmt0 cloudbr 100\n" -"# ovs-vsctl add-br cloudbr0 cloudbr 200\n" -"# ovs-vsctl add-br cloudbr1 cloudbr 300" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure in RHEL or CentOS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The required packages were installed when openvswitch and libvirt were installed, we can proceed to configuring the network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First we configure eth0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-eth0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure it looks similair to:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=eth0\n" -"HWADDR=00:04:xx:xx:xx:xx\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=Ethernet" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We have to configure the base bridge with the trunk." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-cloudbr" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=cloudbr\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"DEVICETYPE=ovs\n" -"TYPE=OVSBridge" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We now have to configure the three VLAN bridges:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-mgmt0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=mgmt0\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=static\n" -"DEVICETYPE=ovs\n" -"TYPE=OVSBridge\n" -"IPADDR=192.168.42.11\n" -"GATEWAY=192.168.42.1\n" -"NETMASK=255.255.255.0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-cloudbr0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=cloudbr0\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"DEVICETYPE=ovs\n" -"TYPE=OVSBridge" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-cloudbr1" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=cloudbr1\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=OVSBridge\n" -"DEVICETYPE=ovs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With this configuration you should be able to restart the network, although a reboot is recommended to see if everything works properly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you have an alternative way like IPMI or ILO to reach the machine in case you made a configuration error and the network stops functioning!" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-network.pot b/docs/pot/hypervisor-host-install-network.pot deleted file mode 100644 index f46ef2ee8c6..00000000000 --- a/docs/pot/hypervisor-host-install-network.pot +++ /dev/null @@ -1,313 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure the network bridges" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is a very important section, please make sure you read this thoroughly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section details how to configure bridges using the native implementation in Linux. Please refer to the next section if you intend to use OpenVswitch" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In order to forward traffic to your instances you will need at least two bridges: public and private." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default these bridges are called cloudbr0 and cloudbr1, but you do have to make sure they are available on each hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The most important factor is that you keep the configuration consistent on all your hypervisors." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Network example" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are many ways to configure your network. In the Basic networking mode you should have two (V)LAN's, one for your private network and one for the public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We assume that the hypervisor has one NIC (eth0) with three tagged VLAN's:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 100 for management of the hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 200 for public network of the instances (cloudbr0)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN 300 for private network of the instances (cloudbr1)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On VLAN 100 we give the Hypervisor the IP-Address 192.168.42.11/24 with the gateway 192.168.42.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Hypervisor and Management server don't have to be in the same subnet!" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configuring the network bridges" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It depends on the distribution you are using how to configure these, below you'll find examples for RHEL/CentOS and Ubuntu." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The goal is to have two bridges called 'cloudbr0' and 'cloudbr1' after this section. This should be used as a guideline only. The exact configuration will depend on your network layout." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure in RHEL or CentOS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The required packages were installed when libvirt was installed, we can proceed to configuring the network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First we configure eth0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-eth0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure it looks similair to:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=eth0\n" -"HWADDR=00:04:xx:xx:xx:xx\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=Ethernet" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We now have to configure the three VLAN interfaces:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-eth0.100" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=eth0.100\n" -"HWADDR=00:04:xx:xx:xx:xx\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=Ethernet\n" -"VLAN=yes\n" -"IPADDR=192.168.42.11\n" -"GATEWAY=192.168.42.1\n" -"NETMASK=255.255.255.0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-eth0.200" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=eth0.200\n" -"HWADDR=00:04:xx:xx:xx:xx\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=Ethernet\n" -"VLAN=yes\n" -"BRIDGE=cloudbr0" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-eth0.300" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=eth0.300\n" -"HWADDR=00:04:xx:xx:xx:xx\n" -"ONBOOT=yes\n" -"HOTPLUG=no\n" -"BOOTPROTO=none\n" -"TYPE=Ethernet\n" -"VLAN=yes\n" -"BRIDGE=cloudbr1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now we have the VLAN interfaces configured we can add the bridges on top of them." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-cloudbr0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now we just configure it is a plain bridge without an IP-Adress" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=cloudbr0\n" -"TYPE=Bridge\n" -"ONBOOT=yes\n" -"BOOTPROTO=none\n" -"IPV6INIT=no\n" -"IPV6_AUTOCONF=no\n" -"DELAY=5\n" -"STP=yes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We do the same for cloudbr1" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/sysconfig/network-scripts/ifcfg-cloudbr1" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "DEVICE=cloudbr1\n" -"TYPE=Bridge\n" -"ONBOOT=yes\n" -"BOOTPROTO=none\n" -"IPV6INIT=no\n" -"IPV6_AUTOCONF=no\n" -"DELAY=5\n" -"STP=yes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With this configuration you should be able to restart the network, although a reboot is recommended to see if everything works properly." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you have an alternative way like IPMI or ILO to reach the machine in case you made a configuration error and the network stops functioning!" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure in Ubuntu" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the required packages were installed when you installed libvirt, so we only have to configure the network." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/network/interfaces" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Modify the interfaces file to look like this:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "auto lo\n" -"iface lo inet loopback\n" -"\n" -"# The primary network interface\n" -"auto eth0.100\n" -"iface eth0.100 inet static\n" -" address 192.168.42.11\n" -" netmask 255.255.255.240\n" -" gateway 192.168.42.1\n" -" dns-nameservers 8.8.8.8 8.8.4.4\n" -" dns-domain lab.example.org\n" -"\n" -"# Public network\n" -"auto cloudbr0\n" -"iface cloudbr0 inet manual\n" -" bridge_ports eth0.200\n" -" bridge_fd 5\n" -" bridge_stp off\n" -" bridge_maxwait 1\n" -"\n" -"# Private network\n" -"auto cloudbr1\n" -"iface cloudbr1 inet manual\n" -" bridge_ports eth0.300\n" -" bridge_fd 5\n" -" bridge_stp off\n" -" bridge_maxwait 1" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-overview.pot b/docs/pot/hypervisor-host-install-overview.pot deleted file mode 100644 index f9eb4b77c3b..00000000000 --- a/docs/pot/hypervisor-host-install-overview.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "KVM Installation Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to use the Linux Kernel Virtual Machine (KVM) hypervisor to run guest virtual machines, install KVM on the host(s) in your cloud. The material in this section doesn't duplicate KVM installation docs. It provides the &PRODUCT;-specific steps that are needed to prepare a KVM host to work with &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before continuing, make sure that you have applied the latest updates to your host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It is NOT recommended to run services on this host not controlled by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The procedure for installing a KVM Hypervisor Host is:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the Operating System" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install and configure libvirt" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure Security Policies (AppArmor and SELinux)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install and configure the Agent" -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-prepare-os.pot b/docs/pot/hypervisor-host-install-prepare-os.pot deleted file mode 100644 index eb37323d048..00000000000 --- a/docs/pot/hypervisor-host-install-prepare-os.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare the Operating System" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The OS of the Host must be prepared to host the &PRODUCT; Agent and run KVM instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to your OS as root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check for a fully qualified hostname." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ hostname --fqdn" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This should return a fully qualified hostname such as \"kvm1.lab.example.org\". If it does not, edit /etc/hosts so that it does." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure that the machine can reach the Internet." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ping www.cloudstack.org" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Turn on NTP for time synchronization." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NTP is required to synchronize the clocks of the servers in your cloud. Unsynchronized clocks can cause unexpected problems." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install NTP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ yum install ntp" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ apt-get install openntpd" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat all of these steps on every hypervisor host." -msgstr "" - diff --git a/docs/pot/hypervisor-host-install-security-policies.pot b/docs/pot/hypervisor-host-install-security-policies.pot deleted file mode 100644 index e13fd5fa308..00000000000 --- a/docs/pot/hypervisor-host-install-security-policies.pot +++ /dev/null @@ -1,145 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configure the Security Policies" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; does various things which can be blocked by security mechanisms like AppArmor and SELinux. These have to be disabled to ensure the Agent has all the required permissions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure SELinux (RHEL and CentOS)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check to see whether SELinux is installed on your machine. If not, you can skip this section." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In RHEL or CentOS, SELinux is installed and enabled by default. You can verify this with:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ rpm -qa | grep selinux" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the SELINUX variable in /etc/selinux/config to \"permissive\". This ensures that the permissive setting will be maintained after a system reboot." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "vi /etc/selinux/config" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the following line" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "SELINUX=enforcing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "to this" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "SELINUX=permissive" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then set SELinux to permissive starting immediately, without requiring a system reboot." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ setenforce permissive" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure Apparmor (Ubuntu)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check to see whether AppArmor is installed on your machine. If not, you can skip this section." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Ubuntu AppArmor is installed and enabled by default. You can verify this with:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ dpkg --list 'apparmor'" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disable the AppArmor profiles for libvirt" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ln -s /etc/apparmor.d/usr.sbin.libvirtd /etc/apparmor.d/disable/" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ ln -s /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper /etc/apparmor.d/disable/" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ apparmor_parser -R /etc/apparmor.d/usr.sbin.libvirtd" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ apparmor_parser -R /etc/apparmor.d/usr.lib.libvirt.virt-aa-helper" -msgstr "" - diff --git a/docs/pot/hypervisor-installation.pot b/docs/pot/hypervisor-installation.pot deleted file mode 100644 index d8d76998189..00000000000 --- a/docs/pot/hypervisor-installation.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Hypervisor Installation" -msgstr "" - diff --git a/docs/pot/hypervisor-kvm-install-flow.pot b/docs/pot/hypervisor-kvm-install-flow.pot deleted file mode 100644 index 88595e47498..00000000000 --- a/docs/pot/hypervisor-kvm-install-flow.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "KVM Hypervisor Host Installation" -msgstr "" - diff --git a/docs/pot/hypervisor-kvm-requirements.pot b/docs/pot/hypervisor-kvm-requirements.pot deleted file mode 100644 index 10e56f616fa..00000000000 --- a/docs/pot/hypervisor-kvm-requirements.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Requirements for KVM Hypervisor Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM is included with a variety of Linux-based operating systems. Although you are not required to run these distributions, the following are recommended:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CentOS / RHEL: 6.3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ubuntu: 12.04(.1)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The main requirement for KVM hypervisors is the libvirt and Qemu version. No matter what Linux distribution you are using, make sure the following requirements are met:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "libvirt: 0.9.4 or higher" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Qemu/KVM: 1.0 or higher" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default bridge in &PRODUCT; is the Linux native bridge implementation (bridge module). &PRODUCT; includes an option to work with OpenVswitch, the requirements are listed below" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "libvirt: 0.9.11 or higher" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "openvswitch: 1.7.1 or higher" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition, the following hardware requirements apply:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Within a single cluster, the hosts must be of the same distribution version." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Must support HVM (Intel-VT or AMD-V enabled)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "64-bit x86 CPU (more cores results in better performance)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4 GB of memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At least 1 NIC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running" -msgstr "" - diff --git a/docs/pot/hypervisor-support-for-primarystorage.pot b/docs/pot/hypervisor-support-for-primarystorage.pot deleted file mode 100644 index 89d19323c7f..00000000000 --- a/docs/pot/hypervisor-support-for-primarystorage.pot +++ /dev/null @@ -1,155 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Hypervisor Support for Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table shows storage options and parameters for different hypervisors." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware vSphere" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format for Disks, Templates, and Snapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMDK" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "QCOW2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "iSCSI support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Clustered LVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yes, via Shared Mountpoint" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fiber Channel support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yes, via Existing SR" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Y" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Local storage support" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage over-provisioning" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS and iSCSI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer uses a clustered LVM system to store VM images on iSCSI and Fiber Channel volumes and does not support over-provisioning in the hypervisor. The storage server itself, however, can support thin-provisioning. As a result the &PRODUCT; can still support storage over-provisioning by running on thin-provisioned storage volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM supports \"Shared Mountpoint\" storage. A shared mountpoint is a file system path local to each server in a given cluster. The path must be the same across all Hosts in the cluster, for example /mnt/primary1. This shared mountpoint is assumed to be a clustered filesystem such as OCFS2. In this case the &PRODUCT; does not attempt to mount or unmount the storage as is done with NFS. The &PRODUCT; requires that the administrator insure that the storage is available" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "With NFS storage, &PRODUCT; manages the overprovisioning. In this case the global configuration parameter storage.overprovisioning.factor controls the degree of overprovisioning. This is independent of hypervisor type." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Local storage is an option for primary storage for vSphere, XenServer, and KVM. When the local disk option is enabled, a local disk storage pool is automatically created on each host. To use local storage for the System Virtual Machines (such as the Virtual Router), set system.vm.use.local.storage to true in global configuration." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports multiple primary storage pools in a Cluster. For example, you could provision 2 NFS servers in primary storage. Or you could provision 1 iSCSI LUN initially and then add a second iSCSI LUN when the first approaches capacity." -msgstr "" - diff --git a/docs/pot/import-ami.pot b/docs/pot/import-ami.pot deleted file mode 100644 index 84225fb4f2e..00000000000 --- a/docs/pot/import-ami.pot +++ /dev/null @@ -1,270 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Importing Amazon Machine Images" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following procedures describe how to import an Amazon Machine Image (AMI) into &PRODUCT; when using the XenServer hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Assume you have an AMI file and this file is called CentOS_6.2_x64. Assume further that you are working on a CentOS host. If the AMI is a Fedora image, you need to be working on a Fedora host initially." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to have a XenServer host with a file-based storage repository (either a local ext3 SR or an NFS SR) to convert to a VHD once the image file has been customized on the Centos/Fedora host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up loopback on image file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mkdir -p /mnt/loop/centos62\n" -"# mount -o loop CentOS_6.2_x64 /mnt/loop/centos54\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the kernel-xen package into the image. This downloads the PV kernel and ramdisk to the image." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# yum -c /mnt/loop/centos54/etc/yum.conf --installroot=/mnt/loop/centos62/ -y install kernel-xen" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a grub entry in /boot/grub/grub.conf." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mkdir -p /mnt/loop/centos62/boot/grub\n" -"# touch /mnt/loop/centos62/boot/grub/grub.conf\n" -"# echo \"\" > /mnt/loop/centos62/boot/grub/grub.conf\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Determine the name of the PV kernel that has been installed into the image." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cd /mnt/loop/centos62\n" -"# ls lib/modules/\n" -"2.6.16.33-xenU 2.6.16-xenU 2.6.18-164.15.1.el5xen 2.6.18-164.6.1.el5.centos.plus 2.6.18-xenU-ec2-v1.0 2.6.21.7-2.fc8xen 2.6.31-302-ec2\n" -"# ls boot/initrd*\n" -"boot/initrd-2.6.18-164.6.1.el5.centos.plus.img boot/initrd-2.6.18-164.15.1.el5xen.img\n" -"# ls boot/vmlinuz*\n" -"boot/vmlinuz-2.6.18-164.15.1.el5xen boot/vmlinuz-2.6.18-164.6.1.el5.centos.plus boot/vmlinuz-2.6.18-xenU-ec2-v1.0 boot/vmlinuz-2.6.21-2952.fc8xen\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Xen kernels/ramdisk always end with \"xen\". For the kernel version you choose, there has to be an entry for that version under lib/modules, there has to be an initrd and vmlinuz corresponding to that. Above, the only kernel that satisfies this condition is 2.6.18-164.15.1.el5xen." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Based on your findings, create an entry in the grub.conf file. Below is an example entry." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "default=0\n" -"timeout=5\n" -"hiddenmenu\n" -"title CentOS (2.6.18-164.15.1.el5xen)\n" -" root (hd0,0)\n" -" kernel /boot/vmlinuz-2.6.18-164.15.1.el5xen ro root=/dev/xvda \n" -" initrd /boot/initrd-2.6.18-164.15.1.el5xen.img\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit etc/fstab, changing “sda1†to “xvda†and changing “sdb†to “xvdbâ€." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cat etc/fstab\n" -"/dev/xvda / ext3 defaults 1 1\n" -"/dev/xvdb /mnt ext3 defaults 0 0\n" -"none /dev/pts devpts gid=5,mode=620 0 0\n" -"none /proc proc defaults 0 0\n" -"none /sys sysfs defaults 0 0\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enable login via the console. The default console device in a XenServer system is xvc0. Ensure that etc/inittab and etc/securetty have the following lines respectively:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# grep xvc0 etc/inittab \n" -"co:2345:respawn:/sbin/agetty xvc0 9600 vt100-nav\n" -"# grep xvc0 etc/securetty \n" -"xvc0\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure the ramdisk supports PV disk and PV network. Customize this for the kernel version you have determined above." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# chroot /mnt/loop/centos54\n" -"# cd /boot/\n" -"# mv initrd-2.6.18-164.15.1.el5xen.img initrd-2.6.18-164.15.1.el5xen.img.bak\n" -"# mkinitrd -f /boot/initrd-2.6.18-164.15.1.el5xen.img --with=xennet --preload=xenblk --omit-scsi-modules 2.6.18-164.15.1.el5xen\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the password." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# passwd\n" -"Changing password for user root.\n" -"New UNIX password: \n" -"Retype new UNIX password: \n" -"passwd: all authentication tokens updated successfully.\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Exit out of chroot." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# exit" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check etc/ssh/sshd_config for lines allowing ssh login using a password." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# egrep \"PermitRootLogin|PasswordAuthentication\" /mnt/loop/centos54/etc/ssh/sshd_config \n" -"PermitRootLogin yes\n" -"PasswordAuthentication yes\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you need the template to be enabled to reset passwords from the &PRODUCT; UI or API, install the password change script into the image at this point. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unmount and delete loopback mount." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# umount /mnt/loop/centos54\n" -"# losetup -d /dev/loop0\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the image file to your XenServer host's file-based storage repository. In the example below, the Xenserver is \"xenhost\". This XenServer has an NFS repository whose uuid is a9c5b8c8-536b-a193-a6dc-51af3e5ff799." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# scp CentOS_6.2_x64 xenhost:/var/run/sr-mount/a9c5b8c8-536b-a193-a6dc-51af3e5ff799/" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the Xenserver and create a VDI the same size as the image." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "[root@xenhost ~]# cd /var/run/sr-mount/a9c5b8c8-536b-a193-a6dc-51af3e5ff799\n" -"[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# ls -lh CentOS_6.2_x64\n" -"-rw-r--r-- 1 root root 10G Mar 16 16:49 CentOS_6.2_x64\n" -"[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# xe vdi-create virtual-size=10GiB sr-uuid=a9c5b8c8-536b-a193-a6dc-51af3e5ff799 type=user name-label=\"Centos 6.2 x86_64\"\n" -"cad7317c-258b-4ef7-b207-cdf0283a7923\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Import the image file into the VDI. This may take 10–20 minutes." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# xe vdi-import filename=CentOS_6.2_x64 uuid=cad7317c-258b-4ef7-b207-cdf0283a7923" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Locate a the VHD file. This is the file with the VDI’s UUID as its name. Compress it and upload it to your web server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# bzip2 -c cad7317c-258b-4ef7-b207-cdf0283a7923.vhd > CentOS_6.2_x64.vhd.bz2\n" -"[root@xenhost a9c5b8c8-536b-a193-a6dc-51af3e5ff799]# scp CentOS_6.2_x64.vhd.bz2 webserver:/var/www/html/templates/\n" -"" -msgstr "" - diff --git a/docs/pot/increase-management-server-max-memory.pot b/docs/pot/increase-management-server-max-memory.pot deleted file mode 100644 index 77e2d58622b..00000000000 --- a/docs/pot/increase-management-server-max-memory.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Increase Management Server Maximum Memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the Management Server is subject to high demand, the default maximum JVM memory allocation can be insufficient. To increase the memory:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the Tomcat configuration file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/etc/cloud/management/tomcat6.conf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the command-line parameter -XmxNNNm to a higher value of N." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, if the current value is -Xmx128m, change it to -Xmx1024m or higher." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To put the new setting into effect, restart the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information about memory issues, see \"FAQ: Memory\" at Tomcat Wiki." -msgstr "" - diff --git a/docs/pot/incremental-snapshots-backup.pot b/docs/pot/incremental-snapshots-backup.pot deleted file mode 100644 index 8bf7ddbbecb..00000000000 --- a/docs/pot/incremental-snapshots-backup.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Incremental Snapshots and Backup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Snapshots are created on primary storage where a disk resides. After a snapshot is created, it is immediately backed up to secondary storage and removed from primary storage for optimal utilization of space on primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; does incremental backups for some hypervisors. When incremental backups are supported, every N backup is a full backup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware vSphere" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Support incremental backup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "N" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Y" -msgstr "" - diff --git a/docs/pot/initial-setup-of-external-firewalls-loadbalancers.pot b/docs/pot/initial-setup-of-external-firewalls-loadbalancers.pot deleted file mode 100644 index 1534cbafb58..00000000000 --- a/docs/pot/initial-setup-of-external-firewalls-loadbalancers.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Initial Setup of External Firewalls and Load Balancers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the first VM is created for a new account, &PRODUCT; programs the external firewall and load balancer to work with the VM. The following objects are created on the firewall:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new logical interface to connect to the account's private VLAN. The interface IP is always the first IP of the account's private subnet (e.g. 10.1.1.1)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A source NAT rule that forwards all outgoing traffic from the account's private VLAN to the public Internet, using the account's public IP address as the source address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A firewall filter counter that measures the number of bytes of outgoing traffic for the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following objects are created on the load balancer:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new VLAN that matches the account's provisioned Zone VLAN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A self IP for the VLAN. This is always the second IP of the account's private subnet (e.g. 10.1.1.2)." -msgstr "" - diff --git a/docs/pot/initialize-and-test.pot b/docs/pot/initialize-and-test.pot deleted file mode 100644 index f6a88b9184f..00000000000 --- a/docs/pot/initialize-and-test.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Initialize and Test" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After everything is configured, &PRODUCT; will perform its initialization. This can take 30 minutes or more, depending on the speed of your network. When the initialization has completed successfully, the administrator's Dashboard should be displayed in the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Verify that the system is ready. In the left navigation bar, select Templates. Click on the CentOS 5.5 (64bit) no Gui (KVM) template. Check to be sure that the status is \"Download Complete.\" Do not proceed to the next step until this status is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Go to the Instances tab, and filter by My Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Instance and follow the steps in the wizard." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the zone you just added." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the template selection, choose the template to use in the VM. If this is a fresh installation, likely only the provided CentOS template is available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select a service offering. Be sure that the hardware you have allows starting the selected service offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In data disk offering, if desired, add another data disk. This is a second volume that will be available to but not mounted in the guest. For example, in Linux on XenServer you will see /dev/xvdb in the guest after rebooting the VM. A reboot is not required if you have a PV-enabled OS kernel in use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In default network, choose the primary network for the guest. In a trial installation, you would have only one option here." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Optionally give your VM a name and a group. Use any descriptive text you would like." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Launch VM. Your VM will be created and started. It might take some time to download the template and complete the VM startup. You can watch the VM’s progress in the Instances screen." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To use the VM, click the View Console button. ConsoleButton.png: button to launch a console " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Congratulations! You have successfully completed a &PRODUCT; Installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you decide to grow your deployment, you can add more hosts, primary storage, zones, pods, and clusters." -msgstr "" - diff --git a/docs/pot/install-usage-server.pot b/docs/pot/install-usage-server.pot deleted file mode 100644 index 7a82178ddfb..00000000000 --- a/docs/pot/install-usage-server.pot +++ /dev/null @@ -1,106 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Installing the Usage Server (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can optionally install the Usage Server once the Management Server is configured properly. The Usage Server takes data from the events in the system and enables usage-based billing for accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When multiple Management Servers are present, the Usage Server may be installed on any number of them. The Usage Servers will coordinate usage processing. A site that is concerned about availability should install Usage Servers on at least two Management Servers." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Requirements for Installing the Usage Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server must be running when the Usage Server is installed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Usage Server must be installed on the same server as a Management Server." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Steps to Install the Usage Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run ./install.sh." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# ./install.sh\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should see a few messages as the installer prepares, followed by a list of choices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose \"S\" to install the Usage Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" > S\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once installed, start the Usage Server with the following command." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# service cloud-usage start\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Administration Guide discusses further configuration of the Usage Server." -msgstr "" - diff --git a/docs/pot/installation-complete.pot b/docs/pot/installation-complete.pot deleted file mode 100644 index a2a7eb2a4ac..00000000000 --- a/docs/pot/installation-complete.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Installation Complete! Next Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Congratulations! You have now installed &PRODUCT; Management Server and the database it uses to persist system data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "What should you do next?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Even without adding any cloud infrastructure, you can run the UI to get a feel for what's offered and how you will interact with &PRODUCT; on an ongoing basis. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you're ready, add the cloud infrastructure and try running some virtual machines on it, so you can watch how &PRODUCT; manages the infrastructure. See ." -msgstr "" - diff --git a/docs/pot/installation-steps-overview.pot b/docs/pot/installation-steps-overview.pot deleted file mode 100644 index 43c22e135b1..00000000000 --- a/docs/pot/installation-steps-overview.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Overview of Installation Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choosing a Deployment Architecture" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choosing a Hypervisor: Supported Features" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Best Practices" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you have the required hardware ready. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the Management Server (choose single-node or multi-node). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the UI. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add a zone. Includes the first pod, cluster, and host. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more pods (optional). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more clusters (optional). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more hosts (optional). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more primary storage (optional). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more secondary storage (optional). See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Try using the cloud. See " -msgstr "" - diff --git a/docs/pot/installation.pot b/docs/pot/installation.pot deleted file mode 100644 index 1d572b02855..00000000000 --- a/docs/pot/installation.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Installation" -msgstr "" - diff --git a/docs/pot/installation_steps_overview.pot b/docs/pot/installation_steps_overview.pot deleted file mode 100644 index c3a7186e4b1..00000000000 --- a/docs/pot/installation_steps_overview.pot +++ /dev/null @@ -1,135 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Overview of Installation Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For anything more than a simple trial installation, you will need guidance for a variety of configuration choices. It is strongly recommended that you read the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choosing a Deployment Architecture" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choosing a Hypervisor: Supported Features" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Best Practices" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you have the required hardware ready" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) Fill out the preparation checklists" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the &PRODUCT; software" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the Management Server (choose single-node or multi-node)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the UI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provision your cloud infrastructure" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add a zone. Includes the first pod, cluster, and host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more pods" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more clusters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more primary storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more secondary storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Try using the cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Initialization and testing" -msgstr "" - diff --git a/docs/pot/inter-vlan-routing.pot b/docs/pot/inter-vlan-routing.pot deleted file mode 100644 index d65e88e4832..00000000000 --- a/docs/pot/inter-vlan-routing.pot +++ /dev/null @@ -1,120 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Inter-VLAN Routing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Inter-VLAN Routing is the capability to route network traffic between VLANs. This feature enables you to build Virtual Private Clouds (VPC), an isolated segment of your cloud, that can hold multi-tier applications. These tiers are deployed on different VLANs that can communicate with each other. You provision VLANs to the tiers your create, and VMs can be deployed on different tiers. The VLANs are connected to a virtual router, which facilitates communication between the VMs. In effect, you can segment VMs by means of VLANs into different networks that can host multi-tier applications, such as Web, Application, or Database. Such segmentation by means of VLANs logically separate application VMs for higher security and lower broadcasts, while remaining physically connected to the same device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This feature is supported on XenServer and VMware hypervisors." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The major advantages are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator can deploy a set of VLANs and allow users to deploy VMs on these VLANs. A guest VLAN is randomly alloted to an account from a pre-specified set of guest VLANs. All the VMs of a certain tier of an account reside on the guest VLAN allotted to that account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VLAN allocated for an account cannot be shared between multiple accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator can allow users create their own VPC and deploy the application. In this scenario, the VMs that belong to the account are deployed on the VLANs allotted to that account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both administrators and users can create multiple VPCs. The guest network NIC is plugged to the VPC virtual router when the first VM is deployed in a tier." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator can create the following gateways to send to or receive traffic from the VMs:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN Gateway: For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Gateway: The public gateway for a VPC is added to the virtual router when the virtual router is created for VPC. The public gateway is not exposed to the end users. You are not allowed to list it, nor allowed to create any static routes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Gateway: For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both administrators and users can create various possible destinations-gateway combinations. However, only one gateway of each type can be used in a deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLANs and Public Gateway: For example, an application is deployed in the cloud, and the Web application VMs communicate with the Internet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLANs, VPN Gateway, and Public Gateway: For example, an application is deployed in the cloud; the Web application VMs communicate with the Internet; and the database VMs communicate with the on-premise devices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator can define Access Control List (ACL) on the virtual router to filter the traffic among the VLANs or between the Internet and a VLAN. You can define ACL based on CIDR, port range, protocol, type code (if ICMP protocol is selected) and Ingress/Egress type." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following figure shows the possible deployment scenarios of a Inter-VLAN setup:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up a multi-tier Inter-VLAN deployment, see ." -msgstr "" - diff --git a/docs/pot/introduction.pot b/docs/pot/introduction.pot deleted file mode 100644 index d5ae7911e6a..00000000000 --- a/docs/pot/introduction.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Introduction" -msgstr "" - diff --git a/docs/pot/ip-forwarding-firewalling.pot b/docs/pot/ip-forwarding-firewalling.pot deleted file mode 100644 index d4aeefa0de3..00000000000 --- a/docs/pot/ip-forwarding-firewalling.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "IP Forwarding and Firewalling" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default, all incoming traffic to the public IP address is rejected. All outgoing traffic from the guests is translated via NAT to the public IP address and is allowed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To allow incoming traffic, users may set up firewall rules and/or port forwarding rules. For example, you can use a firewall rule to open a range of ports on the public IP address, such as 33 through 44. Then use port forwarding rules to direct traffic from individual ports within that range to specific ports on user VMs. For example, one port forwarding rule could route incoming traffic on the public IP's port 33 to port 100 on one user VM's private IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the steps to implement these rules, see Firewall Rules and Port Forwarding." -msgstr "" - diff --git a/docs/pot/ip-load-balancing.pot b/docs/pot/ip-load-balancing.pot deleted file mode 100644 index ef46d00aebb..00000000000 --- a/docs/pot/ip-load-balancing.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "IP Load Balancing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user may choose to associate the same public IP for multiple guests. &PRODUCT; implements a TCP-level load balancer with the following policies." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Round-robin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Least connection" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source IP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is similar to port forwarding but the destination may be multiple IP addresses." -msgstr "" - diff --git a/docs/pot/ipaddress-usage-record-format.pot b/docs/pot/ipaddress-usage-record-format.pot deleted file mode 100644 index 99a0044ccd0..00000000000 --- a/docs/pot/ipaddress-usage-record-format.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "IP Address Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For IP address usage the following fields exist in a usage record." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account - name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid - ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid - ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid - Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description - A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage - String representation of the usage, including the units of usage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype - A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage - A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid - IP address ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "issourcenat - Whether source NAT is enabled for the IP address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "iselastic - True if the IP address is elastic." -msgstr "" - diff --git a/docs/pot/isolated-networks.pot b/docs/pot/isolated-networks.pot deleted file mode 100644 index 0e550ba64ca..00000000000 --- a/docs/pot/isolated-networks.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Isolated Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An isolated network can be accessed only by virtual machines of a single account. Isolated networks have the following properties." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Resources such as VLAN are allocated and garbage collected dynamically" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is one network offering for the entire network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The network offering can be upgraded or downgraded but it is for the entire network" -msgstr "" - diff --git a/docs/pot/job-status.pot b/docs/pot/job-status.pot deleted file mode 100644 index 8e27d84b25f..00000000000 --- a/docs/pot/job-status.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Job Status" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The key to using an asynchronous command is the job ID that is returned immediately once the command has been executed. With the job ID, you can periodically check the job status by making calls to queryAsyncJobResult command. The command will return three possible job status integer values:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "0 - Job is still in progress. Continue to periodically poll for any status changes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "1 - Job has successfully completed. The job will return any successful response values associated with command that was originally executed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2 - Job has failed to complete. Please check the \"jobresultcode\" tag for failure reason code and \"jobresult\" for the failure reason." -msgstr "" - diff --git a/docs/pot/kvm-topology-req.pot b/docs/pot/kvm-topology-req.pot deleted file mode 100644 index 6b675244863..00000000000 --- a/docs/pot/kvm-topology-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "KVM Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Servers communicate with KVM hosts on port 22 (ssh)." -msgstr "" - diff --git a/docs/pot/large_scale_redundant_setup.pot b/docs/pot/large_scale_redundant_setup.pot deleted file mode 100644 index ccfbde45a8b..00000000000 --- a/docs/pot/large_scale_redundant_setup.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Large-Scale Redundant Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This diagram illustrates the network architecture of a large-scale &PRODUCT; deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A layer-3 switching layer is at the core of the data center. A router redundancy protocol like VRRP should be deployed. Typically high-end core switches also include firewall modules. Separate firewall appliances may also be used if the layer-3 switch does not have integrated firewall capabilities. The firewalls are configured in NAT mode. The firewalls provide the following functions:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Forwards HTTP requests and API calls from the Internet to the Management Server. The Management Server resides on the management network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the cloud spans multiple zones, the firewalls should enable site-to-site VPN such that servers in different zones can directly reach each other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A layer-2 access switch layer is established for each pod. Multiple switches can be stacked to increase port count. In either case, redundant pairs of layer-2 switches should be deployed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server cluster (including front-end load balancers, Management Server nodes, and the MySQL database) is connected to the management network through a pair of load balancers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage servers are connected to the management network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each pod contains storage and computing servers. Each storage and computing server should have redundant NICs connected to separate layer-2 access switches." -msgstr "" - diff --git a/docs/pot/layer2-switch.pot b/docs/pot/layer2-switch.pot deleted file mode 100644 index 8eab8f7e015..00000000000 --- a/docs/pot/layer2-switch.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Layer-2 Switch" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The layer-2 switch is the access switching layer inside the pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It should trunk all VLANs into every computing host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It should switch traffic for the management network containing computing and storage hosts. The layer-3 switch will serve as the gateway for the management network." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Example Configurations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section contains example configurations for specific switch models for pod-level layer-2 switching. It assumes VLAN management protocols such as VTP or GVRP have been disabled. The scripts must be changed appropriately if you choose to use VTP or GVRP." -msgstr "" - diff --git a/docs/pot/lb-policy-pfwd-rule-usage-record-format.pot b/docs/pot/lb-policy-pfwd-rule-usage-record-format.pot deleted file mode 100644 index 4fe042f3201..00000000000 --- a/docs/pot/lb-policy-pfwd-rule-usage-record-format.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Load Balancer Policy or Port Forwarding Rule Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account - name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid - ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid - ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid - Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description - A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage - String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype - A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage - A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid - ID of the load balancer policy or port forwarding rule" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate - The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/linux-installation.pot b/docs/pot/linux-installation.pot deleted file mode 100644 index 495d96b97b6..00000000000 --- a/docs/pot/linux-installation.pot +++ /dev/null @@ -1,101 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Linux OS Installation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the following steps to begin the Linux OS installation:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download the script file cloud-set-guest-password:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Linux: " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Windows: " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy this file to /etc/init.d." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On some Linux distributions, copy the file to /etc/rc.d/init.d." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command to make the script executable:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "chmod +x /etc/init.d/cloud-set-guest-password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Depending on the Linux distribution, continue with the appropriate step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Fedora, CentOS/RHEL, and Debian, run:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "chkconfig --add cloud-set-guest-password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu with VMware tools, link the script file to the /etc/network/if-up and /etc/network/if-down folders, and run the script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "#ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-up/cloud-set-guest-password\n" -" #ln -s /etc/init.d/cloud-set-guest-password /etc/network/if-down/cloud-set-guest-password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using Ubuntu 11.04, start by creating a directory called /var/lib/dhcp3 on your Ubuntu machine (works around a known issue with this version of Ubuntu). On all Ubuntu versions: Run “sudo update-rc.d cloud-set-guest-password defaults 98â€. To test, run \"mkpasswd\" and check that it is generating a new password. If the “mkpasswd†command does not exist, run \"sudo apt-get install whois\" (or sudo apt-get install mkpasswd, depending on your Ubuntu version) and repeat." -msgstr "" - diff --git a/docs/pot/load-balancer-rules.pot b/docs/pot/load-balancer-rules.pot deleted file mode 100644 index ac3cd64404b..00000000000 --- a/docs/pot/load-balancer-rules.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Load Balancer Rules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A &PRODUCT; user or administrator may create load balancing rules that balance traffic received at a public IP to one or more VMs. A user creates a rule, specifies an algorithm, and assigns the rule to a set of VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you create load balancing rules while using a network service offering that includes an external load balancer device such as NetScaler, and later change the network service offering to one that uses the &PRODUCT; virtual router, you must create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function." -msgstr "" - diff --git a/docs/pot/log-in-root-admin.pot b/docs/pot/log-in-root-admin.pot deleted file mode 100644 index 08484eb3c05..00000000000 --- a/docs/pot/log-in-root-admin.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Logging In as the Root Administrator" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the Management Server software is installed and running, you can run the &PRODUCT; user interface. This UI is there to help you provision, view, and manage your cloud infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Open your favorite Web browser and go to this URL. Substitute the IP address of your own Management Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://<management-server-ip-address>:8080/client" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After logging into a fresh Management Server installation, a guided tour splash screen appears. On later visits, you’ll be taken directly into the Dashboard." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you see the first-time splash screen, choose one of the following." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Continue with basic setup. Choose this if you're just trying &PRODUCT;, and you want a guided walkthrough of the simplest possible configuration so that you can get started right away. We'll help you set up a cloud with the following features: a single machine that runs &PRODUCT; software and uses NFS to provide storage; a single machine running VMs under the XenServer or KVM hypervisor; and a shared public network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The prompts in this guided tour should give you all the information you need, but if you want just a bit more detail, you can follow along in the Trial Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "I have used &PRODUCT; before. Choose this if you have already gone through a design phase and planned a more sophisticated deployment, or you are ready to start scaling up a trial cloud that you set up earlier with the basic setup screens. In the Administrator UI, you can start using the more powerful features of &PRODUCT;, such as advanced VLAN networking, high availability, additional network elements such as load balancers and firewalls, and support for multiple hypervisors including Citrix XenServer, KVM, and VMware vSphere." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The root administrator Dashboard appears." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should set a new root administrator password. If you chose basic setup, you’ll be prompted to create a new password right away. If you chose experienced user, use the steps in ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You are logging in as the root administrator. This account manages the &PRODUCT; deployment, including physical infrastructure. The root administrator can modify configuration settings to change basic functionality, create or delete user accounts, and take many actions that should be performed only by an authorized person. Please change the default password to a new, unique password." -msgstr "" - diff --git a/docs/pot/log-in.pot b/docs/pot/log-in.pot deleted file mode 100644 index 2faf14b1738..00000000000 --- a/docs/pot/log-in.pot +++ /dev/null @@ -1,90 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Log In to the UI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides a web-based UI that can be used by both administrators and end users. The appropriate version of the UI is displayed depending on the credentials used to log in. The UI is available in popular browsers including IE7, IE8, IE9, Firefox 3.5+, Firefox 4, Safari 4, and Safari 5. The URL is: (substitute your own management server IP address)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://<management-server-ip-address>:8080/client" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On a fresh Management Server installation, a guided tour splash screen appears. On later visits, you’ll see a login screen where you specify the following to proceed to your Dashboard:" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Username" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user ID of your account. The default username is admin." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The password associated with the user ID. The password for the default username is password." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Domain" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are a root user, leave this field blank." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are a user in the sub-domains, enter the full path to the domain, excluding the root domain." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, suppose multiple levels are created under the root domain, such as Comp1/hr. The users in the Comp1 domain should enter Comp1 in the Domain field, whereas the users in the Comp1/sales domain should enter Comp1/sales." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more guidance about the choices that appear when you log in to this UI, see Logging In as the Root Administrator." -msgstr "" - diff --git a/docs/pot/long-running-job-events.pot b/docs/pot/long-running-job-events.pot deleted file mode 100644 index ecb39473a44..00000000000 --- a/docs/pot/long-running-job-events.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Long Running Job Events" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The events log records three types of standard events." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "INFO. This event is generated when an operation has been successfully performed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "WARN. This event is generated in the following circumstances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a network is disconnected while monitoring a template download." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a template download is abandoned." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When an issue on the storage server causes the volumes to fail over to the mirror storage server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ERROR. This event is generated when an operation has not been successfully performed" -msgstr "" - diff --git a/docs/pot/maintain-hypervisors-on-hosts.pot b/docs/pot/maintain-hypervisors-on-hosts.pot deleted file mode 100644 index 5acc8771d40..00000000000 --- a/docs/pot/maintain-hypervisors-on-hosts.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Maintaining Hypervisors on Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When running hypervisor software on hosts, be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor’s support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The lack of up-do-date hotfixes can lead to data corruption and lost VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(XenServer) For more information, see Highly Recommended Hotfixes for XenServer in the &PRODUCT; Knowledge Base" -msgstr "" - diff --git a/docs/pot/maintenance-mode-for-primary-storage.pot b/docs/pot/maintenance-mode-for-primary-storage.pot deleted file mode 100644 index 85363ce5713..00000000000 --- a/docs/pot/maintenance-mode-for-primary-storage.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Maintenance Mode for Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Primary storage may be placed into maintenance mode. This is useful, for example, to replace faulty RAM in a storage device. Maintenance mode for a storage device will first stop any new guests from being provisioned on the storage device. Then it will stop all guests that have any volume on that storage device. When all such guests are stopped the storage device is in maintenance mode and may be shut down. When the storage device is online again you may cancel maintenance mode for the device. The &PRODUCT; will bring the device back online and attempt to start all guests that were running at the time of the entry into maintenance mode." -msgstr "" - diff --git a/docs/pot/making-api-request.pot b/docs/pot/making-api-request.pot deleted file mode 100644 index a3faea0b69a..00000000000 --- a/docs/pot/making-api-request.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Making API Requests" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All &PRODUCT; API requests are submitted in the form of a HTTP GET/POST with an associated command and any parameters. A request is composed of the following whether in HTTP or HTTPS:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; API URL: This is the web services API entry point(for example, http://www.cloud.com:8080/client/api)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Command: The web services command you wish to execute, such as start a virtual machine or create a disk volume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Parameters: Any additional required or optional parameters for the command" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A sample API GET request looks like the following:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Or in a more readable format:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"1. http://localhost:8080/client/api\n" -"2. ?command=deployVirtualMachine\n" -"3. &serviceOfferingId=1\n" -"4. &diskOfferingId=1\n" -"5. &templateId=2\n" -"6. &zoneId=4\n" -"7. &apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXqjB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ\n" -"8. &signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The first line is the &PRODUCT; API URL. This is the Cloud instance you wish to interact with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The second line refers to the command you wish to execute. In our example, we are attempting to deploy a fresh new virtual machine. It is preceded by a (?) to separate itself from the &PRODUCT; API URL." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Lines 3-6 are the parameters for this given command. To see the command and its request parameters, please refer to the appropriate section in the &PRODUCT; API documentation. Each parameter field-value pair (field=value) is preceded by an ampersand character (&)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Line 7 is the user API Key that uniquely identifies the account. See Signing API Requests on page 7." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Line 8 is the signature hash created to authenticate the user account executing the API command. See Signing API Requests on page 7." -msgstr "" - diff --git a/docs/pot/manage-cloud.pot b/docs/pot/manage-cloud.pot deleted file mode 100644 index a263fef34c9..00000000000 --- a/docs/pot/manage-cloud.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Managing the Cloud" -msgstr "" - diff --git a/docs/pot/management-server-install-client.pot b/docs/pot/management-server-install-client.pot deleted file mode 100644 index e663f7f213a..00000000000 --- a/docs/pot/management-server-install-client.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install the Management Server on the First Host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The first step in installation, whether you are installing the Management Server on one host or many, is to install the software on a single node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are planning to install the Management Server on multiple nodes for high availability, do not proceed to the additional nodes yet. That step will come later." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Management server can be installed using either RPM or DEB packages. These packages will depend on everything you need to run the Management server." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Downloading vhd-util" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This procedure is required only for installations where XenServer is installed on the hypervisor hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before setting up the Management Server, download vhd-util from vhd-util." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the Management Server is RHEL or CentOS, copy vhd-util to /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the Management Server is Ubuntu, copy vhd-util to /usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/vhd-util." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Install on CentOS/RHEL" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We start by installing the required packages:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "yum install cloud-client" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Install on Ubuntu" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "apt-get install cloud-client" -msgstr "" - diff --git a/docs/pot/management-server-install-complete.pot b/docs/pot/management-server-install-complete.pot deleted file mode 100644 index a171be6f1d6..00000000000 --- a/docs/pot/management-server-install-complete.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Installation Complete! Next Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Congratulations! You have now installed &PRODUCT; Management Server and the database it uses to persist system data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "What should you do next?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Even without adding any cloud infrastructure, you can run the UI to get a feel for what's offered and how you will interact with &PRODUCT; on an ongoing basis. See Log In to the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you're ready, add the cloud infrastructure and try running some virtual machines on it, so you can watch how &PRODUCT; manages the infrastructure. See Provision Your Cloud Infrastructure." -msgstr "" - diff --git a/docs/pot/management-server-install-db-external.pot b/docs/pot/management-server-install-db-external.pot deleted file mode 100644 index 7b0f6474c56..00000000000 --- a/docs/pot/management-server-install-db-external.pot +++ /dev/null @@ -1,219 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install the Database on a Separate Node" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section describes how to install MySQL on a standalone machine, separate from the Management Server. This technique is intended for a deployment that includes several Management Server nodes. If you have a single-node Management Server deployment, you will typically use the same node for MySQL. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The management server doesn't require a specific distribution for the MySQL node. You can use a distribution or Operating System of your choice. Using the same distribution as the management server is recommended, but not required. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install MySQL from the package repository from your distribution:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "yum install mysql-server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "apt-get install mysql-server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the MySQL configuration (/etc/my.cnf or /etc/mysql/my.cnf, depending on your OS) and insert the following lines in the [mysqld] section. You can put these lines below the datadir line. The max_connections parameter should be set to 350 multiplied by the number of Management Servers you are deploying. This example assumes two Management Servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu, you can also create /etc/mysql/conf.d/cloudstack.cnf file and add these directives there. Don't forget to add [mysqld] on the first line of the file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "innodb_rollback_on_timeout=1\n" -"innodb_lock_wait_timeout=600\n" -"max_connections=700\n" -"log-bin=mysql-bin\n" -"binlog-format = 'ROW'\n" -"bind-address = 0.0.0.0" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start or restart MySQL to put the new configuration into effect." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL/CentOS, MySQL doesn't automatically start after installation. Start it manually." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service mysqld start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu, restart MySQL." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service mysqld restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(CentOS and RHEL only; not required on Ubuntu)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL and CentOS, MySQL does not set a root password by default. It is very strongly recommended that you set a root password as a security precaution." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command to secure your installation. You can answer \"Y\" to all questions except \"Disallow root login remotely?\". Remote root login is required to set up the databases." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysql_secure_installation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If a firewall is present on the system, open TCP port 3306 so external MySQL connections can be established." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu, UFW is the default firewall. Open the port with this command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ufw allow mysql" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL/CentOS:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the /etc/sysconfig/iptables file and add the following line at the beginning of the INPUT chain." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "-A INPUT -p tcp --dport 3306 -j ACCEPT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now reload the iptables rules." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service iptables restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Return to the root shell on your first Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up the database. The following command creates the cloud user on the database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In dbpassword, specify the password to be assigned to the cloud user. You can choose to provide no password." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In deploy-as, specify the username and password of the user deploying the database. In the following command, it is assumed the root user is deploying the database and creating the cloud user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For encryption_type, use file or web to indicate the technique used to pass in the database encryption password. Default: file. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For management_server_key, substitute the default key that is used to encrypt confidential parameters in the &PRODUCT; properties file. Default: password. It is highly recommended that you replace this with a more secure value. See About Password and Key Encryption." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For database_key, substitute the default key that is used to encrypt confidential parameters in the &PRODUCT; database. Default: password. It is highly recommended that you replace this with a more secure value. See ." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "cloud-setup-databases cloud:<dbpassword>@<ip address mysql server> \\\n" -"--deploy-as=root:<password> \\\n" -"-e <encryption_type> \\\n" -"-m <management_server_key> \\\n" -"-k <database_key>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When this script is finished, you should see a message like “Successfully initialized the database.â€" -msgstr "" - diff --git a/docs/pot/management-server-install-db-local.pot b/docs/pot/management-server-install-db-local.pot deleted file mode 100644 index 36af4d0ff0e..00000000000 --- a/docs/pot/management-server-install-db-local.pot +++ /dev/null @@ -1,198 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install the Database on the Management Server Node" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section describes how to install MySQL on the same machine with the Management Server. This technique is intended for a simple deployment that has a single Management Server node. If you have a multi-node Management Server deployment, you will typically use a separate node for MySQL. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install MySQL from the package repository from your distribution:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "yum install mysql-server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "apt-get install mysql-server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the MySQL configuration (/etc/my.cnf or /etc/mysql/my.cnf, depending on your OS) and insert the following lines in the [mysqld] section. You can put these lines below the datadir line. The max_connections parameter should be set to 350 multiplied by the number of Management Servers you are deploying. This example assumes one Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu, you can also create a file /etc/mysql/conf.d/cloudstack.cnf and add these directives there. Don't forget to add [mysqld] on the first line of the file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "innodb_rollback_on_timeout=1\n" -"innodb_lock_wait_timeout=600\n" -"max_connections=350\n" -"log-bin=mysql-bin\n" -"binlog-format = 'ROW'" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start or restart MySQL to put the new configuration into effect." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL/CentOS, MySQL doesn't automatically start after installation. Start it manually." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service mysqld start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu, restart MySQL." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service mysqld restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(CentOS and RHEL only; not required on Ubuntu)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL and CentOS, MySQL does not set a root password by default. It is very strongly recommended that you set a root password as a security precaution." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command to secure your installation. You can answer \"Y\" to all questions." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysql_secure_installation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up the database. The following command creates the \"cloud\" user on the database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In dbpassword, specify the password to be assigned to the \"cloud\" user. You can choose to provide no password although that is not recommended." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In deploy-as, specify the username and password of the user deploying the database. In the following command, it is assumed the root user is deploying the database and creating the \"cloud\" user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For encryption_type, use file or web to indicate the technique used to pass in the database encryption password. Default: file. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For management_server_key, substitute the default key that is used to encrypt confidential parameters in the &PRODUCT; properties file. Default: password. It is highly recommended that you replace this with a more secure value. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) For database_key, substitute the default key that is used to encrypt confidential parameters in the &PRODUCT; database. Default: password. It is highly recommended that you replace this with a more secure value. See ." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "cloud-setup-databases cloud:<dbpassword>@localhost \\\n" -"--deploy-as=root:<password> \\\n" -"-e <encryption_type> \\\n" -"-m <management_server_key> \\\n" -"-k <database_key>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When this script is finished, you should see a message like “Successfully initialized the database.â€" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are running the KVM hypervisor on the same machine with the Management Server, edit /etc/sudoers and add the following line:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "Defaults:cloud !requiretty" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This type of single-machine setup is recommended only for a trial installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that the database is set up, you can finish configuring the OS for the Management Server. This command will set up iptables, sudoers, and start the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cloud-setup-management" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should see the message “&PRODUCT; Management Server setup is done.â€" -msgstr "" - diff --git a/docs/pot/management-server-install-db.pot b/docs/pot/management-server-install-db.pot deleted file mode 100644 index cca61e42ac4..00000000000 --- a/docs/pot/management-server-install-db.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Install the database server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; management server uses a MySQL database server to store its data. When you are installing the management server on a single node, you can install the MySQL server locally. For an installation that has multiple management server nodes, we assume the MySQL database also runs on a separate node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; has been tested with MySQL 5.1 and 5.5. These versions are included in RHEL/CentOS and Ubuntu." -msgstr "" - diff --git a/docs/pot/management-server-install-flow.pot b/docs/pot/management-server-install-flow.pot deleted file mode 100644 index 43c2a0cc68e..00000000000 --- a/docs/pot/management-server-install-flow.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Management Server Installation" -msgstr "" - diff --git a/docs/pot/management-server-install-multi-node.pot b/docs/pot/management-server-install-multi-node.pot deleted file mode 100644 index 22d80b6f21d..00000000000 --- a/docs/pot/management-server-install-multi-node.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare and Start Additional Management Servers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For your second and subsequent Management Servers, you will install the Management Server software, connect it to the database, and set up the OS for the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Perform the steps in and or as appropriate." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This step is required only for installations where XenServer is installed on the hypervisor hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download vhd-util from vhd-util" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the Management Server is RHEL or CentOS, copy vhd-util to /usr/lib64/cloud/common/scripts/vm/hypervisor/xenserver." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the Management Server is Ubuntu, copy vhd-util to /usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/vhd-util." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that necessary services are started and set to start on boot." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service rpcbind start\n" -"# service nfs start\n" -"# chkconfig nfs on\n" -"# chkconfig rpcbind on\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the database client. Note the absence of the --deploy-as argument in this case. (For more details about the arguments to this command, see .)" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cloud-setup-databases cloud:dbpassword@dbhost -e encryption_type -m management_server_key -k database_key\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure the OS and start the Management Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cloud-setup-management" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server on this node should now be running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps on each additional Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure to configure a load balancer for the Management Servers. See Management Server Load Balancing." -msgstr "" - diff --git a/docs/pot/management-server-install-nfs-shares.pot b/docs/pot/management-server-install-nfs-shares.pot deleted file mode 100644 index b269c4955e7..00000000000 --- a/docs/pot/management-server-install-nfs-shares.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:58\n" -"PO-Revision-Date: 2013-02-02T20:11:58\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare NFS Shares" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; needs a place to keep primary and secondary storage (see Cloud Infrastructure Overview). Both of these can be NFS shares. This section tells how to set up the NFS shares before adding the storage to &PRODUCT;." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Alternative Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS is not the only option for primary or secondary storage. For example, you may use a Ceph RDB cluster, GlusterFS, iSCSI, and otthers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The requirements for primary and secondary storage are described in:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A production installation typically uses a separate NFS server. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can also use the Management Server node as the NFS server. This is more typical of a trial installation, but is technically possible in a larger deployment. See ." -msgstr "" - diff --git a/docs/pot/management-server-install-overview.pot b/docs/pot/management-server-install-overview.pot deleted file mode 100644 index 5eb76440894..00000000000 --- a/docs/pot/management-server-install-overview.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Management Server Installation Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section describes installing the Management Server. There are two slightly different installation flows, depending on how many Management Server nodes will be in your cloud:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A single Management Server node, with MySQL on the same node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multiple Management Server nodes, with MySQL on a node separate from the Management Servers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In either case, each machine must meet the system requirements described in System Requirements." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The procedure for installing the Management Server is:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the Operating System" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(XenServer only) Download and install vhd-util." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the First Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install and Configure the MySQL database" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare NFS Shares" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare and Start Additional Management Servers (optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the System VM Template" -msgstr "" - diff --git a/docs/pot/management-server-install-prepare-os.pot b/docs/pot/management-server-install-prepare-os.pot deleted file mode 100644 index e8360ff6800..00000000000 --- a/docs/pot/management-server-install-prepare-os.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare the Operating System" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The OS must be prepared to host the Management Server using the following steps. These steps must be performed on each Management Server node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to your OS as root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check for a fully qualified hostname." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "hostname --fqdn" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This should return a fully qualified hostname such as \"managament1.lab.example.org\". If it does not, edit /etc/hosts so that it does." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure that the machine can reach the Internet." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ping www.cloudstack.org" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Turn on NTP for time synchronization." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NTP is required to synchronize the clocks of the servers in your cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install NTP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On RHEL or CentOS:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "yum install ntp" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "apt-get install openntpd" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat all of these steps on every host where the Management Server will be installed." -msgstr "" - diff --git a/docs/pot/management-server-install-systemvm.pot b/docs/pot/management-server-install-systemvm.pot deleted file mode 100644 index d6d238c049b..00000000000 --- a/docs/pot/management-server-install-systemvm.pot +++ /dev/null @@ -1,111 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare the System VM Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage must be seeded with a template that is used for &PRODUCT; system VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Management Server, run one or more of the following cloud-install-sys-tmplt commands to retrieve and decompress the system VM template. Run the command for each hypervisor type that you expect end users to run in this Zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If your secondary storage mount point is not named /mnt/secondary, substitute your own mount point name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you set the &PRODUCT; database encryption type to \"web\" when you set up the database, you must now add the parameter -s <management-server-secret-key>. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This process will require approximately 5 GB of free space on the local file system and up to 30 minutes each time it runs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2 -h xenserver -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/burbank/burbank-systemvm-08012012.ova -h vmware -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using a separate NFS server, perform this step. If you are using the Management Server as the NFS server, you MUST NOT perform this step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the script has finished, unmount secondary storage and remove the created directory." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# umount /mnt/secondary\n" -"# rmdir /mnt/secondary" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps for each secondary storage server." -msgstr "" - diff --git a/docs/pot/management-server-lb.pot b/docs/pot/management-server-lb.pot deleted file mode 100644 index df9ded46302..00000000000 --- a/docs/pot/management-server-lb.pot +++ /dev/null @@ -1,105 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Zone VLAN and Running VM Maximums" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; can use a load balancer to provide a virtual IP for multiple Management Servers. The administrator is responsible for creating the load balancer rules for the Management Servers. The application requires persistence or stickiness across multiple sessions. The following chart lists the ports that should be load balanced and whether or not persistence is required." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Even if persistence is not required, enabling it is permitted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source Port" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Destination Port" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Persistence Required?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "80 or 443" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "8080 (or 20400 with AJP)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "HTTP (or AJP)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Yes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "8250" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "8096" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "HTTP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "No" -msgstr "" - diff --git a/docs/pot/management-server-overview.pot b/docs/pot/management-server-overview.pot deleted file mode 100644 index 0f79f5e856a..00000000000 --- a/docs/pot/management-server-overview.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Management Server Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server is the &PRODUCT; software that manages cloud resources. By interacting with the Management Server through its UI or API, you can configure and manage your cloud infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server runs on a dedicated server or VM. It controls allocation of virtual machines to hosts and assigns storage and IP addresses to the virtual machine instances. The Management Server runs in a Tomcat container and requires a MySQL database for persistence." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The machine must meet the system requirements described in System Requirements." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provides the web user interface for the administrator and a reference user interface for end users." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provides the APIs for &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Manages the assignment of guest VMs to particular hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Manages the assignment of public and private IP addresses to particular accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Manages the allocation of storage to guests as virtual disks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Manages snapshots, templates, and ISO images, possibly replicating them across data centers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provides a single point of configuration for the cloud." -msgstr "" - diff --git a/docs/pot/manual-live-migration.pot b/docs/pot/manual-live-migration.pot deleted file mode 100644 index 80043620c32..00000000000 --- a/docs/pot/manual-live-migration.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Moving VMs Between Hosts (Manual Live Migration)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator can move a running VM from one host to another without interrupting service to users or going into maintenance mode. This is called manual live migration, and can be done under the following conditions:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The root administrator is logged in. Domain admins and users can not perform manual live migration of VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VM is running. Stopped VMs can not be live migrated." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The destination host must be in the same cluster as the original host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VM must not be using local disk storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The destination host must have enough available capacity. If not, the VM will remain in the \"migrating\" state until memory becomes available." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To manually live migrate a virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose the VM that you want to migrate." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Migrate Instance button Migrateinstance.png: button to migrate an instance " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "From the list of hosts, choose the one to which you want to move the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/marvin.pot b/docs/pot/marvin.pot deleted file mode 100644 index f693be5c0a0..00000000000 --- a/docs/pot/marvin.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Marvin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Marvin is the &PRODUCT; automation framework. It originated as a tool for integration testing but is now also used to build DevCloud as well as to provide a Python &PRODUCT; API binding." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Marvin's complete documenation is on the wiki at https://cwiki.apache.org/CLOUDSTACK/testing-with-python.html" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The source code is located at tools/marvin" -msgstr "" - diff --git a/docs/pot/max-result-page-returned.pot b/docs/pot/max-result-page-returned.pot deleted file mode 100644 index 031e59f2916..00000000000 --- a/docs/pot/max-result-page-returned.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Maximum Result Pages Returned" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For each cloud, there is a default upper limit on the number of results that any API command will return in a single page. This is to help prevent overloading the cloud servers and prevent DOS attacks. For example, if the page size limit is 500 and a command returns 10,000 results, the command will return 20 pages." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default page size limit can be different for each cloud. It is set in the global configuration parameter default.page.size. If your cloud has many users with lots of VMs, you might need to increase the value of this parameter. At the same time, be careful not to set it so high that your site can be taken down by an enormous return from an API call. For more information about how to set global configuration parameters, see \"Describe Your Deployment\" in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To decrease the page size limit for an individual API command, override the global setting with the page and pagesize parameters, which are available in any list* command (listCapabilities, listDiskOfferings, etc.)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both parameters must be specified together." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The value of the pagesize parameter must be smaller than the value of default.page.size. That is, you can not increase the number of possible items in a result page, only decrease it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For syntax information on the list* commands, see the API Reference." -msgstr "" - diff --git a/docs/pot/migrate-datadisk-volume-new-storage-pool.pot b/docs/pot/migrate-datadisk-volume-new-storage-pool.pot deleted file mode 100644 index d9e2f9716b9..00000000000 --- a/docs/pot/migrate-datadisk-volume-new-storage-pool.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Migrating a Data Disk Volume to a New Storage Pool" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Detach the data disk from the VM. See Detaching and Moving Volumes (but skip the “reattach†step at the end. You will do that after migrating to new storage)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Call the &PRODUCT; API command migrateVolume and pass in the volume ID and the ID of any storage pool in the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch for the volume status to change to Migrating, then back to Ready." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Attach the volume to any desired VM running in the same cluster as the new storage server. See Attaching a Volume " -msgstr "" - diff --git a/docs/pot/migrate-vm-rootvolume-volume-new-storage-pool.pot b/docs/pot/migrate-vm-rootvolume-volume-new-storage-pool.pot deleted file mode 100644 index 7da0f8d0751..00000000000 --- a/docs/pot/migrate-vm-rootvolume-volume-new-storage-pool.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Migrating a VM Root Volume to a New Storage Pool" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When migrating the root disk volume, the VM must first be stopped, and users can not access the VM. After migration is complete, the VM can be restarted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as a user or admin." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Detach the data disk from the VM. See Detaching and Moving Volumes (but skip the “reattach†step at the end. You will do that after migrating to new storage)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Call the &PRODUCT; API command migrateVirtualMachine with the ID of the VM to migrate and the IDs of a destination host and destination storage pool in the same zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch for the VM status to change to Migrating, then back to Stopped." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the VM." -msgstr "" - diff --git a/docs/pot/minimum-system-requirements.pot b/docs/pot/minimum-system-requirements.pot deleted file mode 100644 index c7862b4b6e1..00000000000 --- a/docs/pot/minimum-system-requirements.pot +++ /dev/null @@ -1,150 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Minimum System Requirements" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Management Server, Database, and Storage System Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The machines that will run the Management Server and MySQL database must meet the following requirements. The same machines can also be used to provide primary and secondary storage, such as via localdisk or NFS. The Management Server may be placed on a virtual machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Operating system:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Preferred: CentOS/RHEL 6.3+ or Ubuntu 12.04(.1)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "64-bit x86 CPU (more cores results in better performance)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4 GB of memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "50 GB of local disk (When running secondary storage on the management server 500GB is recommended)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At least 1 NIC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Statically allocated IP address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fully qualified domain name as returned by the hostname command" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Host/Hypervisor System Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host is where the cloud services run in the form of guest virtual machines. Each host is one machine that meets the following requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Must support HVM (Intel-VT or AMD-V enabled)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hardware virtualization support required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "36 GB of local disk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If DHCP is used for hosts, ensure that no conflict occurs between DHCP server used for these hosts and the DHCP router created by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Latest hotfixes applied to hypervisor software" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you deploy &PRODUCT;, the hypervisor host must not have any VMs already running" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts within a cluster must be homogenous. The CPUs must be of the same type, count, and feature flags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts have additional requirements depending on the hypervisor. See the requirements listed at the top of the Installation section for your chosen hypervisor:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure you fulfill the additional hypervisor requirements and installation steps provided in this Guide. Hypervisor hosts must be properly prepared to work with CloudStack. For example, the requirements for XenServer are listed under Citrix XenServer Installation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/pot/modify-delete-service-offerings.pot b/docs/pot/modify-delete-service-offerings.pot deleted file mode 100644 index 02835318331..00000000000 --- a/docs/pot/modify-delete-service-offerings.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Modifying or Deleting a Service Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Service offerings cannot be changed once created. This applies to both compute offerings and disk offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A service offering can be deleted. If it is no longer in use, it is deleted immediately and permanently. If the service offering is still in use, it will remain in the database until all the virtual machines referencing it have been deleted. After deletion by the administrator, a service offering will not be available to end users that are creating new instances." -msgstr "" - diff --git a/docs/pot/multi_node_management_server.pot b/docs/pot/multi_node_management_server.pot deleted file mode 100644 index edada3cbebe..00000000000 --- a/docs/pot/multi_node_management_server.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Multi-Node Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Management Server is deployed on one or more front-end servers connected to a single MySQL database. Optionally a pair of hardware load balancers distributes requests from the web. A backup management server set may be deployed using MySQL replication at a remote site to add DR capabilities." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator must decide the following." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Whether or not load balancers will be used." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "How many Management Servers will be deployed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Whether MySQL replication will be deployed to enable disaster recovery." -msgstr "" - diff --git a/docs/pot/multi_node_overview.pot b/docs/pot/multi_node_overview.pot deleted file mode 100644 index c67e8a79233..00000000000 --- a/docs/pot/multi_node_overview.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Management Server Multi-Node Installation Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section describes installing multiple Management Servers and installing MySQL on a node separate from the Management Servers. The machines must meet the system requirements described in System Requirements." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the sake of security, be sure the public Internet can not access port 8096 or port 8250 on the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The procedure for a multi-node installation is:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the Operating System" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the First Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install and Configure the Database" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare NFS Shares" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare and Start Additional Management Servers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare the System VM Template" -msgstr "" - diff --git a/docs/pot/multi_site_deployment.pot b/docs/pot/multi_site_deployment.pot deleted file mode 100644 index 822c64a19ea..00000000000 --- a/docs/pot/multi_site_deployment.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Multi-Site Deployment" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; platform scales well into multiple sites through the use of zones. The following diagram shows an example of a multi-site deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Data Center 1 houses the primary Management Server as well as zone 1. The MySQL database is replicated in real time to the secondary Management Server installation in Data Center 2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This diagram illustrates a setup with a separate storage network. Each server has four NICs, two connected to pod-level network switches and two connected to storage network switches." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are two ways to configure the storage network:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Bonded NIC and redundant switches can be deployed for NFS. In NFS deployments, redundant switches and bonded NICs still result in one network (one CIDR block+ default gateway address)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "iSCSI can take advantage of two separate storage networks (two CIDR blocks each with its own default gateway). Multipath iSCSI client can failover and load balance between separate storage networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This diagram illustrates the differences between NIC bonding and Multipath I/O (MPIO). NIC bonding configuration involves only one network. MPIO involves two separate networks." -msgstr "" - diff --git a/docs/pot/multiple-system-vm-vmware.pot b/docs/pot/multiple-system-vm-vmware.pot deleted file mode 100644 index 1ef8e4357fa..00000000000 --- a/docs/pot/multiple-system-vm-vmware.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Multiple System VM Support for VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Every &PRODUCT; zone has single System VM for template processing tasks such as downloading templates, uploading templates, and uploading ISOs. In a zone where VMware is being used, additional System VMs can be launched to process VMware-specific tasks such as taking snapshots and creating private templates. The &PRODUCT; management server launches additional System VMs for VMware-specific tasks as the load increases. The management server monitors and weights all commands sent to these System VMs and performs dynamic load balancing and scaling-up of more System VMs." -msgstr "" - diff --git a/docs/pot/network-offering-usage-record-format.pot b/docs/pot/network-offering-usage-record-format.pot deleted file mode 100644 index b50371eb7ef..00000000000 --- a/docs/pot/network-offering-usage-record-format.pot +++ /dev/null @@ -1,90 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Offering Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – ID of the network offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "offeringid – Network offering ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "virtualMachineId – The ID of the virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/network-offerings.pot b/docs/pot/network-offerings.pot deleted file mode 100644 index 5493e2bdcbe..00000000000 --- a/docs/pot/network-offerings.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the most up-to-date list of supported network services, see the &PRODUCT; UI or call listNetworkServices." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A network offering is a named set of network services, such as:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DHCP" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "DNS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Source NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Static NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Port Forwarding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Load Balancing" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Firewall" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Optional) Name one of several available providers to use for a given service, such as Juniper for the firewall" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) Network tag to specify which physical network to use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When creating a new VM, the user chooses one of the available network offerings, and that determines which network services the VM can use." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator can create any number of custom network offerings, in addition to the default network offerings provided by &PRODUCT;. By creating multiple custom network offerings, you can set up your cloud to offer different classes of service on a single multi-tenant physical network. For example, while the underlying physical wiring may be the same for two tenants, tenant A may only need simple firewall protection for their website, while tenant B may be running a web server farm and require a scalable firewall solution, load balancing solution, and alternate networks for accessing the database backend." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you create load balancing rules while using a network service offering that includes an external load balancer device such as NetScaler, and later change the network service offering to one that uses the &PRODUCT; virtual router, you must create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When creating a new virtual network, the &PRODUCT; administrator chooses which network offering to enable for that network. Each virtual network is associated with one network offering. A virtual network can be upgraded or downgraded by changing its associated network offering. If you do this, be sure to reprogram the physical network to match." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; also has internal network offerings for use by &PRODUCT; system VMs. These network offerings are not visible to users but can be modified by administrators." -msgstr "" - diff --git a/docs/pot/network-rate.pot b/docs/pot/network-rate.pot deleted file mode 100644 index ca9784b9c0d..00000000000 --- a/docs/pot/network-rate.pot +++ /dev/null @@ -1,195 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Throttling" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network throttling is the process of controlling the network access and bandwidth usage based on certain rules. &PRODUCT; controls this behaviour of the guest networks in the cloud by using the network rate parameter. This parameter is defined as the default data transfer rate in Mbps (Megabits Per Second) allowed in a guest network. It defines the upper limits for network utilization. If the current utilization is below the allowed upper limits, access is granted, else revoked." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can throttle the network bandwidth either to control the usage above a certain limit for some accounts, or to control network congestion in a large cloud environment. The network rate for your cloud can be configured on the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Service Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Global parameter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If network rate is set to NULL in service offering, the value provided in the vm.network.throttling.rate global parameter is applied. If the value is set to NULL for network offering, the value provided in the network.throttling.rate global parameter is considered." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the default public, storage, and management networks, network rate is set to 0. This implies that the public, storage, and management networks will have unlimited bandwidth by default. For default guest networks, network rate is set to NULL. In this case, network rate is defaulted to the global parameter value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table gives you an overview of how network rate is applied on different types of networks in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Rate Is Taken from" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest network of Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Guest Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public network of Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage network of Secondary Storage VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System Network Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management network of Secondary Storage VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage network of Console Proxy VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management network of Console Proxy VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage network of Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management network of Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public network of Secondary Storage VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public network of Console Proxy VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default network of a guest VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Compute Offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional networks of a guest VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Corresponding Network Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A guest VM must have a default network, and can also have many additional networks. Depending on various parameters, such as the host and virtual switch used, you can observe a difference in the network rate in your cloud. For example, on a VMware host the actual network rate varies based on where they are configured (compute offering, network offering, or both); the network type (shared or isolated); and traffic direction (ingress or egress)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The network rate set for a network offering used by a particular network in &PRODUCT; is used for the traffic shaping policy of a port group, for example: port group A, for that network: a particular subnet or VLAN on the actual network. The virtual routers for that network connects to the port group A, and by default instances in that network connects to this port group. However, if an instance is deployed with a compute offering with the network rate set, and if this rate is used for the traffic shaping policy of another port group for the network, for example port group B, then instances using this compute offering are connected to the port group B, instead of connecting to port group A." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The traffic shaping policy on standard port groups in VMware only applies to the egress traffic, and the net effect depends on the type of network used in &PRODUCT;. In shared networks, ingress traffic is unlimited for &PRODUCT;, and egress traffic is limited to the rate that applies to the port group used by the instance if any. If the compute offering has a network rate configured, this rate applies to the egress traffic, otherwise the network rate set for the network offering applies. For isolated networks, the network rate set for the network offering, if any, effectively applies to the ingress traffic. This is mainly because the network rate set for the network offering applies to the egress traffic from the virtual router to the instance. The egress traffic is limited by the rate that applies to the port group used by the instance if any, similar to shared networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network rate of network offering = 10 Mbps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network rate of compute offering = 200 Mbps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In shared networks, ingress traffic will not be limited for &PRODUCT;, while egress traffic will be limited to 200 Mbps. In an isolated network, ingress traffic will be limited to 10 Mbps and egress to 200 Mbps." -msgstr "" - diff --git a/docs/pot/network-service-providers.pot b/docs/pot/network-service-providers.pot deleted file mode 100644 index 7bdf8d7998a..00000000000 --- a/docs/pot/network-service-providers.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Service Providers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the most up-to-date list of supported network service providers, see the &PRODUCT; UI or call listNetworkServiceProviders." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A service provider (also called a network element) is hardware or virtual appliance that makes a network service possible; for example, a firewall appliance can be installed in the cloud to provide firewall service. On a single network, multiple providers can provide the same network service. For example, a firewall service may be provided by Cisco or Juniper devices in the same physical network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can have multiple instances of the same service provider in a network (say, more than one Juniper SRX device)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If different providers are set up to provide the same service on the network, the administrator can create network offerings so users can specify which network service provider they prefer (along with the other choices offered in network offerings). Otherwise, &PRODUCT; will choose which provider to use whenever the service is called for." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Supported Network Service Providers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; ships with an internal list of the supported service providers, and you can choose from this list when creating a network offering." -msgstr "" - diff --git a/docs/pot/network-setup.pot b/docs/pot/network-setup.pot deleted file mode 100644 index 37153fb6de9..00000000000 --- a/docs/pot/network-setup.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Achieving the correct networking setup is crucial to a successful &PRODUCT; installation. This section contains information to help you make decisions and follow the right procedures to get your network set up correctly." -msgstr "" - diff --git a/docs/pot/network-usage-record-format.pot b/docs/pot/network-usage-record-format.pot deleted file mode 100644 index 75d6481534d..00000000000 --- a/docs/pot/network-usage-record-format.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Network Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For network usage (bytes sent/received), the following fields exist in a usage record." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – Device ID (virtual router ID or external device ID)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "type – Device type (domain router, external load balancer, etc.)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/networking-in-a-pod.pot b/docs/pot/networking-in-a-pod.pot deleted file mode 100644 index 3cbf807cfe0..00000000000 --- a/docs/pot/networking-in-a-pod.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Networking in a Pod" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Figure 2 illustrates network setup within a single pod. The hosts are connected to a pod-level switch. At a minimum, the hosts should have one physical uplink to each switch. Bonded NICs are supported as well. The pod-level switch is a pair of redundant gigabit switches with 10 G uplinks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Servers are connected as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage devices are connected to only the network that carries management traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts are connected to networks for both management traffic and public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts are also connected to one or more networks carrying guest traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "We recommend the use of multiple physical Ethernet cards to implement each network interface as well as redundant switch fabric in order to maximize throughput and improve reliability." -msgstr "" - diff --git a/docs/pot/networking-in-a-zone.pot b/docs/pot/networking-in-a-zone.pot deleted file mode 100644 index 99481c81b27..00000000000 --- a/docs/pot/networking-in-a-zone.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Networking in a Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Figure 3 illustrates the network setup within a single zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A firewall for management traffic operates in the NAT mode. The network typically is assigned IP addresses in the 192.168.0.0/16 Class B private address space. Each pod is assigned IP addresses in the 192.168.*.0/24 Class C private address space." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each zone has its own set of public IP addresses. Public IP addresses from different zones do not overlap." -msgstr "" - diff --git a/docs/pot/networking-overview.pot b/docs/pot/networking-overview.pot deleted file mode 100644 index 24c56a960a2..00000000000 --- a/docs/pot/networking-overview.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Networking Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; offers two types of networking scenario:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more details, see Network Setup." -msgstr "" - diff --git a/docs/pot/networking_overview.pot b/docs/pot/networking_overview.pot deleted file mode 100644 index aedc7ae3867..00000000000 --- a/docs/pot/networking_overview.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Networking Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack offers two types of networking scenario:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Basic. For AWS-style networking. Provides a single network where guest isolation can be provided through layer-3 means such as security groups (IP address source filtering)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more details, see Network Setup." -msgstr "" - diff --git a/docs/pot/networks-for-users-overview.pot b/docs/pot/networks-for-users-overview.pot deleted file mode 100644 index 62eed8c41b4..00000000000 --- a/docs/pot/networks-for-users-overview.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Overview of Setting Up Networking for Users" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "People using cloud infrastructure have a variety of needs and preferences when it comes to the networking services provided by the cloud. As a &PRODUCT; administrator, you can do the following things to set up networking for your users:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up physical networks in zones" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up several different providers for the same service on a single physical network (for example, both Cisco and Juniper firewalls)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Bundle different types of network services into network offerings, so users can choose the desired network services for any given virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add new network offerings as time goes on so end users can upgrade to a better class of service on their network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide more ways for a network to be accessed by a user, such as through a project of which the user is a member" -msgstr "" - diff --git a/docs/pot/networks.pot b/docs/pot/networks.pot deleted file mode 100644 index 6f08ba2d5d7..00000000000 --- a/docs/pot/networks.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Managing Networks and Traffic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a &PRODUCT;, guest VMs can communicate with each other using shared infrastructure with the security and user perception that the guests have a private LAN. The &PRODUCT; virtual router is the main component providing networking features for guest traffic." -msgstr "" - diff --git a/docs/pot/nfs-shares-on-management-server.pot b/docs/pot/nfs-shares-on-management-server.pot deleted file mode 100644 index bec36a1ee5e..00000000000 --- a/docs/pot/nfs-shares-on-management-server.pot +++ /dev/null @@ -1,240 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using the Management Server as the NFS Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section tells how to set up NFS shares for primary and secondary storage on the same node with the Management Server. This is more typical of a trial installation, but is technically possible in a larger deployment. It is assumed that you will have less than 16TB of storage on the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The exact commands for the following steps may vary depending on your operating system version." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Ubuntu/Debian systems, you'll need to install the nfs-kernel-server package:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"$ sudo apt-get install nfs-kernel-server\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Management Server host, create two directories that you will use for primary and secondary storage. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# mkdir -p /export/primary\n" -"# mkdir -p /export/secondary\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/exports" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Insert the following line." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/export *(rw,async,no_root_squash)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Export the /export directory." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# exportfs -a" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the /etc/sysconfig/nfs file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/sysconfig/nfs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Uncomment the following lines:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"LOCKD_TCPPORT=32803\n" -"LOCKD_UDPPORT=32769\n" -"MOUNTD_PORT=892\n" -"RQUOTAD_PORT=875\n" -"STATD_PORT=662\n" -"STATD_OUTGOING_PORT=2020\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the /etc/sysconfig/iptables file." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/sysconfig/iptables" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add the following lines at the beginning of the INPUT chain where <NETWORK> is the network that you'll be using:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 111 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 111 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 2049 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 32803 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 32769 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 892 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 892 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 875 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 875 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p tcp --dport 662 -j ACCEPT\n" -"-A INPUT -s <NETWORK> -m state --state NEW -p udp --dport 662 -j ACCEPT \n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following commands:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# service iptables restart\n" -"# service iptables save\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If NFS v4 communication is used between client and server, add your domain to /etc/idmapd.conf on both the hypervisor host and Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/idmapd.conf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remove the character # from the beginning of the Domain line in idmapd.conf and replace the value in the file with your own domain. In the example below, the domain is company.com." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "Domain = company.com" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reboot the Management Server host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Two NFS shares called /export/primary and /export/secondary are now set up." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It is recommended that you test to be sure the previous steps have been successful." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the hypervisor host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure NFS and rpcbind are running. The commands might be different depending on your OS. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# service rpcbind start\n" -"# service nfs start\n" -"# chkconfig nfs on\n" -"# chkconfig rpcbind on\n" -"# reboot \n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log back in to the hypervisor host and try to mount the /export directories. For example (substitute your own management server name):" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# mkdir /primarymount\n" -"# mount -t nfs <management-server-name>:/export/primary /primarymount\n" -"# umount /primarymount\n" -"# mkdir /secondarymount\n" -"# mount -t nfs <management-server-name>:/export/secondary /secondarymount\n" -"# umount /secondarymount \n" -" " -msgstr "" - diff --git a/docs/pot/nfs-shares-on-separate-server.pot b/docs/pot/nfs-shares-on-separate-server.pot deleted file mode 100644 index 6dc037a9813..00000000000 --- a/docs/pot/nfs-shares-on-separate-server.pot +++ /dev/null @@ -1,108 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using a Separate NFS Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section tells how to set up NFS shares for secondary and (optionally) primary storage on an NFS server running on a separate node from the Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The exact commands for the following steps may vary depending on your operating system version." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(KVM only) Ensure that no volume is already mounted at your NFS mount point." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the storage server, create an NFS share for secondary storage and, if you are using NFS for primary storage as well, create a second NFS share. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"# mkdir -p /export/primary\n" -"# mkdir -p /export/secondary\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To configure the new directories as NFS exports, edit /etc/exports. Export the NFS share(s) with rw,async,no_root_squash. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# vi /etc/exports" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Insert the following line." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/export *(rw,async,no_root_squash)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Export the /export directory." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# exportfs -a" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the management server, create a mount point for secondary storage. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mkdir -p /mnt/secondary" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Mount the secondary storage on your Management Server. Replace the example NFS server name and NFS share paths below with your own." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# mount -t nfs nfsservername:/nfs/share/secondary /mnt/secondary" -msgstr "" - diff --git a/docs/pot/offerings.pot b/docs/pot/offerings.pot deleted file mode 100644 index 7f22cbed35d..00000000000 --- a/docs/pot/offerings.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In this chapter we discuss compute, disk, and system service offerings. Network offerings are discussed in the section on setting up networking for users." -msgstr "" - diff --git a/docs/pot/ongoing-config-of-external-firewalls-lb.pot b/docs/pot/ongoing-config-of-external-firewalls-lb.pot deleted file mode 100644 index 1976fc00f76..00000000000 --- a/docs/pot/ongoing-config-of-external-firewalls-lb.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Ongoing Configuration of External Firewalls and Load Balancers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional user actions (e.g. setting a port forward) will cause further programming of the firewall and load balancer. A user may request additional public IP addresses and forward traffic received at these IPs to specific VMs. This is accomplished by enabling static NAT for a public IP address, assigning the IP to a VM, and specifying a set of protocols and port ranges to open. When a static NAT rule is created, &PRODUCT; programs the zone's external firewall with the following objects:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A static NAT rule that maps the public IP address to the private IP address of a VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A security policy that allows traffic within the set of protocols and port ranges that are specified." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A firewall filter counter that measures the number of bytes of incoming traffic to the public IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The number of incoming and outgoing bytes through source NAT, static NAT, and load balancing rules is measured and saved on each external element. This data is collected on a regular basis and stored in the &PRODUCT; database." -msgstr "" - diff --git a/docs/pot/over-provisioning-service-offering-limits.pot b/docs/pot/over-provisioning-service-offering-limits.pot deleted file mode 100644 index 1be8e13a1af..00000000000 --- a/docs/pot/over-provisioning-service-offering-limits.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Over-Provisioning and Service Offering Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; performs CPU over-provisioning based on an over-provisioning ratio configured by the administrator. This is defined by the cpu.overprovisioning.factor global configuration variable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; performs CPU over-provisioning based on an over-provisioning ratio configured by the administrator. This is defined by the cpu.overprovisioning.factor global configuration variable" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Service offerings limits (e.g. 1 GHz, 1 core) are strictly enforced for core count. For example, a guest with a service offering of one core will have only one core available to it regardless of other activity on the Host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Service offering limits for gigahertz are enforced only in the presence of contention for CPU resources. For example, suppose that a guest was created with a service offering of 1 GHz on a Host that has 2 GHz cores, and that guest is the only guest running on the Host. The guest will have the full 2 GHz available to it. When multiple guests are attempting to use the CPU a weighting factor is used to schedule CPU resources. The weight is based on the clock speed in the service offering. Guests receive a CPU allocation that is proportionate to the GHz in the service offering. For example, a guest created from a 2 GHz service offering will receive twice the CPU allocation as a guest created from a 1 GHz service offering. &PRODUCT; does not perform memory over-provisioning." -msgstr "" - diff --git a/docs/pot/ovm-install.pot b/docs/pot/ovm-install.pot deleted file mode 100644 index aba639b633c..00000000000 --- a/docs/pot/ovm-install.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Installing OVM for &PRODUCT;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TODO" -msgstr "" - diff --git a/docs/pot/ovm-requirements.pot b/docs/pot/ovm-requirements.pot deleted file mode 100644 index 263f46ebf38..00000000000 --- a/docs/pot/ovm-requirements.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Requirements for OVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TODO" -msgstr "" - diff --git a/docs/pot/per-domain-limits.pot b/docs/pot/per-domain-limits.pot deleted file mode 100644 index 4ac3828d230..00000000000 --- a/docs/pot/per-domain-limits.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Per-Domain Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; allows the configuration of limits on a domain basis. With a domain limit in place, all users still have their account limits. They are additionally limited, as a group, to not exceed the resource limits set on their domain. Domain limits aggregate the usage of all accounts in the domain as well as all accounts in all subdomains of that domain. Limits set at the root domain level apply to the sum of resource usage by the accounts in all domains and sub-domains below that root domain." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set a domain limit:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation tree, click Domains." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the domain you want to modify. The current domain limits are displayed. A value of -1 shows that there is no limit in place." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Edit button editbutton.png: edits the settings. " -msgstr "" - diff --git a/docs/pot/performance-monitoring.pot b/docs/pot/performance-monitoring.pot deleted file mode 100644 index b5afcfb7012..00000000000 --- a/docs/pot/performance-monitoring.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Performance Monitoring" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host and guest performance monitoring is available to end users and administrators. This allows the user to monitor their utilization of resources and determine when it is appropriate to choose a more powerful service offering or larger disk." -msgstr "" - diff --git a/docs/pot/physical-network-configuration-settings.pot b/docs/pot/physical-network-configuration-settings.pot deleted file mode 100644 index 204f7f2ad68..00000000000 --- a/docs/pot/physical-network-configuration-settings.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configurable Characteristics of Physical Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides configuration settings you can use to set up a physical network in a zone, including:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "What type of network traffic it carries (guest, public, management, storage)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLANs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unique name that the hypervisor can use to find that particular network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enabled or disabled. When a network is first set up, it is disabled – not in use yet. The administrator sets the physical network to enabled, and it begins to be used. The administrator can later disable the network again, which prevents any new virtual networks from being created on that physical network; the existing network traffic continues even though the state is disabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Speed" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags, so network offerings can be matched to physical networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Isolation method" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-about.pot b/docs/pot/plugin-niciranvp-about.pot deleted file mode 100644 index fb39acbc17d..00000000000 --- a/docs/pot/plugin-niciranvp-about.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "The Nicira NVP Plugin" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-devicemanagement.pot b/docs/pot/plugin-niciranvp-devicemanagement.pot deleted file mode 100644 index 34bcc0ebf58..00000000000 --- a/docs/pot/plugin-niciranvp-devicemanagement.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Device-management" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack 4.0.x each Nicira NVP setup is considered a \"device\" that can be added and removed from a physical network. To complete the configuration of the Nicira NVP plugin a device needs to be added to the physical network using the \"addNiciraNVPDevice\" API call. The plugin is now enabled on the physical network and any guest networks created on that network will be provisioned using the Nicra NVP Controller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The plugin introduces a set of new API calls to manage the devices, see below or refer to the API reference." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addNiciraNvpDevice" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "physicalnetworkid: the UUID of the physical network on which the device is configured" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "hostname: the IP address of the NVP controller" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "username: the username for access to the NVP API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "password: the password for access to the NVP API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "transportzoneuuid: the UUID of the transportzone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "deleteNiciraNVPDevice" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "nvpdeviceid: the UUID of the device" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNiciraNVPDevices" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-features.pot b/docs/pot/plugin-niciranvp-features.pot deleted file mode 100644 index 0c4154bfbb8..00000000000 --- a/docs/pot/plugin-niciranvp-features.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Features of the Nicira NVP Plugin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack release 4.0.0-incubating this plugin supports the Connectivity service. This service is responsible for creating Layer 2 networks supporting the networks created by Guests. In other words when an tennant creates a new network, instead of the traditional VLAN a logical network will be created by sending the appropriate calls to the Nicira NVP Controller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The plugin has been tested with Nicira NVP versions 2.1.0, 2.2.0 and 2.2.1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack 4.0.0-incubating only the XenServer hypervisor is supported for use in combination with Nicira NVP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack 4.1.0-incubating both KVM and XenServer hypervisors are supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack 4.0.0-incubating the UI components for this plugin are not complete, configuration is done by sending commands to the API." -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-guide.pot b/docs/pot/plugin-niciranvp-guide.pot deleted file mode 100644 index 665e4ff217f..00000000000 --- a/docs/pot/plugin-niciranvp-guide.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Plugin Guide for the Nicira NVP Plugin" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-introduction.pot b/docs/pot/plugin-niciranvp-introduction.pot deleted file mode 100644 index 2f50cf01fa6..00000000000 --- a/docs/pot/plugin-niciranvp-introduction.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Introduction to the Nicira NVP Plugin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Nicira NVP plugin allows CloudStack to use the Nicira solution for virtualized network as a provider for CloudStack networks and services." -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-preparations.pot b/docs/pot/plugin-niciranvp-preparations.pot deleted file mode 100644 index c3bdbaea3fd..00000000000 --- a/docs/pot/plugin-niciranvp-preparations.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prerequisites" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before enabling the Nicira NVP plugin the NVP Controller needs to be configured. Please review the NVP User Guide on how to do that." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack needs to have at least one physical network with the isolation method set to \"STT\". This network should be enabled for the Guest traffic type." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Guest traffic type should be configured with the traffic label that matches the name of the Integration Bridge on the hypervisor. See the Nicira NVP User Guide for more details on how to set this up in XenServer or KVM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you have the following information ready:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP address of the NVP Controller" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The username to access the API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The password to access the API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The UUID of the Transport Zone that contains the hypervisors in this Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The UUID of the Physical Network that will used for the Guest networks" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-provider.pot b/docs/pot/plugin-niciranvp-provider.pot deleted file mode 100644 index 1f1b1e914e1..00000000000 --- a/docs/pot/plugin-niciranvp-provider.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Enabling the service provider" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To allow CloudStack to use the Nicira NVP Plugin the network service provider needs to be enabled on the physical network. The following sequence of API calls will enable the network service provider" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "addNetworkServiceProvider" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "name = \"NiciraNvp\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "physicalnetworkid = <the uuid of the physical network>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "updateNetworkServiceProvider" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "id = <the provider uuid returned by the previous call>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "state = \"Enabled\"" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-revisions.pot b/docs/pot/plugin-niciranvp-revisions.pot deleted file mode 100644 index ea63a27dca9..00000000000 --- a/docs/pot/plugin-niciranvp-revisions.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Revision History" -msgstr "" - -#. Tag: firstname -#, no-c-format -msgid "Hugo" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "Trippaers" -msgstr "" - -#. Tag: member -#, no-c-format -msgid "Documentation created for 4.0.0-incubating version of the NVP Plugin" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-tables.pot b/docs/pot/plugin-niciranvp-tables.pot deleted file mode 100644 index 5e1570f9984..00000000000 --- a/docs/pot/plugin-niciranvp-tables.pot +++ /dev/null @@ -1,90 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Database tables" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following tables are added to the cloud database for the Nicira NVP Plugin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "nicira_nvp_nic_map, contains a mapping from nic to logical switch port" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "id" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "logicalswitch, uuid of the logical switch this port is connected to" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "logicalswitchport, uuid of the logical switch port for this nic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "nic, the CloudStack uuid for this nic, reference to the nics table" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "external_nicira_nvp_devices, contains all configured devices" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "uuid" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "physical_network_id, the physical network this device is configured on" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "provider_name, set to \"NiciraNvp\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "device_name, display name for this device" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "host_id, reference to the host table with the device configuration" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-troubleshooting.pot b/docs/pot/plugin-niciranvp-troubleshooting.pot deleted file mode 100644 index 45cdc9aa099..00000000000 --- a/docs/pot/plugin-niciranvp-troubleshooting.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Troubleshooting the Nicira NVP Plugin" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-ui.pot b/docs/pot/plugin-niciranvp-ui.pot deleted file mode 100644 index 80d69bd636c..00000000000 --- a/docs/pot/plugin-niciranvp-ui.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Configuring the Nicira NVP plugin from the UI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In CloudStack 4.1.0-incubating the Nicira NVP plugin and its resources can be configured in the infrastructure tab of the UI. Navigate to the physical network with STT isolation and configure the network elements. The NiciraNvp is listed here." -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-usage.pot b/docs/pot/plugin-niciranvp-usage.pot deleted file mode 100644 index 165ea6d7413..00000000000 --- a/docs/pot/plugin-niciranvp-usage.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using the Nicira NVP Plugin" -msgstr "" - diff --git a/docs/pot/plugin-niciranvp-uuidreferences.pot b/docs/pot/plugin-niciranvp-uuidreferences.pot deleted file mode 100644 index 8cbf4774251..00000000000 --- a/docs/pot/plugin-niciranvp-uuidreferences.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "UUID References" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The plugin maintains several references in the CloudStack database to items created on the NVP Controller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Every guest network this is created will have its broadcast type set to Lswitch and if the network is in state \"Implemented\", the broadcast URI will have the UUID of the Logical Switch that was created for this network on the NVP Controller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Nics that are connected to one of the Logical Switches will have their Logical Switch Port UUID listed in the nicira_nvp_nic_map table" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All devices created on the NVP Controller will have a tag set to domain-account of the owner of the network, this string can be used to search for items in the NVP Controller." -msgstr "" - diff --git a/docs/pot/pod-add.pot b/docs/pot/pod-add.pot deleted file mode 100644 index 857b5a1360d..00000000000 --- a/docs/pot/pod-add.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Pod" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you created a new zone, &PRODUCT; adds the first pod for you. You can add more pods at any time using the procedure in this section." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone to which you want to add a pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute and Storage tab. In the Pods node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enter the following details in the dialog." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateway. The gateway for the hosts in that pod." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Netmask. The network prefix that defines the pod's subnet. Use CIDR notation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start/End Reserved System IP. The IP range in the management network that &PRODUCT; uses to manage various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP. For more information, see System Reserved IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/port-forwarding.pot b/docs/pot/port-forwarding.pot deleted file mode 100644 index 47f16ff90dc..00000000000 --- a/docs/pot/port-forwarding.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Port Forwarding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A port forward service is a set of port forwarding rules that define a policy. A port forward service is then applied to one or more guest VMs. The guest VM then has its inbound network access managed according to the policy defined by the port forwarding service. You can optionally specify one or more CIDRs to filter the source IPs. This is useful when you want to allow only incoming requests from certain IP addresses to be forwarded." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A guest VM can be in any number of port forward services. Port forward services can be defined but have no members. If a guest VM is part of more than one network, port forwarding rules will function only if they are defined on the default network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You cannot use port forwarding to open ports for an elastic IP address. When elastic IP is used, outside access is instead controlled through the use of security groups. See Security Groups." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up port forwarding:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have not already done so, add a public IP address range to a zone in &PRODUCT;. See Adding a Zone and Pod in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add one or more VM instances to &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the guest network where the VMs are running." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose an existing IP address or acquire a new IP address. (See Acquiring a New IP Address on page 73.) Click the name of the IP address in the list." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configuration tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Port Forwarding node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Fill in the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Port. The port to which public traffic will be addressed on the IP address you acquired in the previous step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Port. The port on which the instance is listening for forwarded public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. The communication protocol in use between the two ports." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add." -msgstr "" - diff --git a/docs/pot/prepare-system-vm-template.pot b/docs/pot/prepare-system-vm-template.pot deleted file mode 100644 index 6429e4ee078..00000000000 --- a/docs/pot/prepare-system-vm-template.pot +++ /dev/null @@ -1,116 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prepare the System VM Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage must be seeded with a template that is used for &PRODUCT; system VMs. Citrix provides you with the necessary binary package of the system VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Management Server, run one or more of the following cloud-install-sys-tmplt commands to retrieve and decompress the system VM template. Run the command for each hypervisor type that you expect end users to run in this Zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If your secondary storage mount point is not named /mnt/secondary, substitute your own mount point name." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you set the &PRODUCT; database encryption type to \"web\" when you set up the database, you must now add the parameter -s <management-server-secret-key>. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This process will require approximately 5 GB of free space on the local file system and up to 30 minutes each time it runs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2 -h xenserver -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.ova -h vmware -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# /usr/lib64/cloud/common/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -s <optional-management-server-secret-key> -F" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using a separate NFS server, perform this step." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not perform this step if you are using the Management Server as the NFS server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the script has finished, unmount secondary storage and remove the created directory." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# umount /mnt/secondary\n" -"# rmdir /mnt/secondary" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps for each secondary storage server." -msgstr "" - diff --git a/docs/pot/primary-storage-add.pot b/docs/pot/primary-storage-add.pot deleted file mode 100644 index c414a11103a..00000000000 --- a/docs/pot/primary-storage-add.pot +++ /dev/null @@ -1,190 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add Primary Storage" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "System Requirements for Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hardware requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Any standards-compliant iSCSI or NFS server that is supported by the underlying hypervisor." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The storage server should be a machine with a large number of disks. The disks should ideally be managed by a hardware RAID controller." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Minimum required capacity depends on your needs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When setting up primary storage, follow these restrictions:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Primary storage cannot be added until a host has been added to the cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you do not provision shared primary storage, you must set the global configuration parameter system.vm.local.storage.required to true, or else you will not be able to start VMs." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Adding Primary Stroage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you create a new zone, the first primary storage is added as part of that procedure. You can add primary storage servers at any time, such as when adding a new cluster or adding more servers to an existing cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure there is nothing stored on the server. Adding the server to &PRODUCT; will destroy any existing data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI (see )." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure. In Zones, click View More, then click the zone in which you want to add the primary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Compute tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Primary Storage node of the diagram, click View All." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Primary Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following information in the dialog. The information required varies depending on your choice in Protocol." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pod. The pod for the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cluster. The cluster for the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name. The name of the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Protocol. For XenServer, choose either NFS, iSCSI, or PreSetup. For KVM, choose NFS or SharedMountPoint. For vSphere choose either VMFS (iSCSI or FiberChannel) or NFS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server (for NFS, iSCSI, or PreSetup). The IP address or DNS name of the storage device." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Server (for VMFS). The IP address or DNS name of the vCenter server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for NFS). In NFS this is the exported path from the server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for VMFS). In vSphere this is a combination of the datacenter name and the datastore name. The format is \"/\" datacenter name \"/\" datastore name. For example, \"/cloud.dc.VM/cluster1datastore\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Path (for SharedMountPoint). With KVM this is the path on each host that is where this primary storage is mounted. For example, \"/mnt/primary\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SR Name-Label (for PreSetup). Enter the name-label of the SR that has been set up outside &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Target IQN (for iSCSI). In iSCSI this is the IQN of the target. For example, iqn.1986-03.com.sun:02:01ec9bb549-1271378984." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Lun # (for iSCSI). In iSCSI this is the LUN number. For example, 3." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tags (optional). The comma-separated list of tags for this storage device. It should be an equivalent set or superset of the tags on your disk offerings.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The tag sets on primary storage across clusters in a Zone must be identical. For example, if cluster A provides primary storage that has tags T1 and T2, all other clusters in the Zone must also provide primary storage that has tags T1 and T2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK." -msgstr "" - diff --git a/docs/pot/primary-storage-outage-and-data-loss.pot b/docs/pot/primary-storage-outage-and-data-loss.pot deleted file mode 100644 index 4801afdf6e4..00000000000 --- a/docs/pot/primary-storage-outage-and-data-loss.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Primary Storage Outage and Data Loss" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a primary storage outage occurs the hypervisor immediately stops all VMs stored on that storage device. Guests that are marked for HA will be restarted as soon as practical when the primary storage comes back on line. With NFS, the hypervisor may allow the virtual machines to continue running depending on the nature of the issue. For example, an NFS hang will cause the guest VMs to be suspended until storage connectivity is restored.Primary storage is not designed to be backed up. Individual volumes in primary storage can be backed up using snapshots." -msgstr "" - diff --git a/docs/pot/primary-storage.pot b/docs/pot/primary-storage.pot deleted file mode 100644 index 5fb36b3022d..00000000000 --- a/docs/pot/primary-storage.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section gives concepts and technical details about &PRODUCT; primary storage. For information about how to install and configure primary storage through the &PRODUCT; UI, see the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/pot/private-public-template.pot b/docs/pot/private-public-template.pot deleted file mode 100644 index b982f60f491..00000000000 --- a/docs/pot/private-public-template.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Private and Public Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a user creates a template, it can be designated private or public." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private templates are only available to the user who created them. By default, an uploaded template is private." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a user marks a template as “public,†the template becomes available to all users in all accounts in the user's domain, as well as users in any other domains that have access to the Zone where the template is stored. This depends on whether the Zone, in turn, was defined as private or public. A private Zone is assigned to a single domain, and a public Zone is accessible to any domain. If a public template is created in a private Zone, it is available only to users in the domain assigned to that Zone. If a public template is created in a public Zone, it is available to all users in all domains." -msgstr "" - diff --git a/docs/pot/projects-overview.pot b/docs/pot/projects-overview.pot deleted file mode 100644 index e5fd8f122c8..00000000000 --- a/docs/pot/projects-overview.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Overview of Projects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Projects are used to organize people and resources. &PRODUCT; users within a single domain can group themselves into project teams so they can collaborate and share virtual resources such as VMs, snapshots, templates, data disks, and IP addresses. &PRODUCT; tracks resource usage per project as well as per user, so the usage can be billed to either a user account or a project. For example, a private cloud within a software company might have all members of the QA department assigned to one project, so the company can track the resources used in testing while the project members can more easily isolate their efforts from other users of the same cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can configure &PRODUCT; to allow any user to create a new project, or you can restrict that ability to just &PRODUCT; administrators. Once you have created a project, you become that project’s administrator, and you can add others within your domain to the project. &PRODUCT; can be set up either so that you can add people directly to a project, or so that you have to send an invitation which the recipient must accept. Project members can view and manage all virtual resources created by anyone in the project (for example, share VMs). A user can be a member of any number of projects and can switch views in the &PRODUCT; UI to show only project-related information, such as project VMs, fellow project members, project-related alerts, and so on." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The project administrator can pass on the role to another project member. The project administrator can also add more members, remove members from the project, set new resource limits (as long as they are below the global defaults set by the &PRODUCT; administrator), and delete the project. When the administrator removes a member from the project, resources created by that user, such as VM instances, remain with the project. This brings us to the subject of resource ownership and which resources can be used by a project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Resources created within a project are owned by the project, not by any particular &PRODUCT; account, and they can be used only within the project. A user who belongs to one or more projects can still create resources outside of those projects, and those resources belong to the user’s account; they will not be counted against the project’s usage or resource limits. You can create project-level networks to isolate traffic within the project and provide network services such as port forwarding, load balancing, VPN, and static NAT. A project can also make use of certain types of resources from outside the project, if those resources are shared. For example, a shared network or public template is available to any project in the domain. A project can get access to a private template if the template’s owner will grant permission. A project can use any service offering or disk offering available in its domain; however, you can not create private service and disk offerings at the project level.." -msgstr "" - diff --git a/docs/pot/projects.pot b/docs/pot/projects.pot deleted file mode 100644 index 2b6939a713f..00000000000 --- a/docs/pot/projects.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Projects to Organize Users and Resources" -msgstr "" - diff --git a/docs/pot/provisioning-auth-api.pot b/docs/pot/provisioning-auth-api.pot deleted file mode 100644 index 671e64173f0..00000000000 --- a/docs/pot/provisioning-auth-api.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Provisioning and Authentication API" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; expects that a customer will have their own user provisioning infrastructure. It provides APIs to integrate with these existing systems where the systems call out to &PRODUCT; to add/remove users.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports pluggable authenticators. By default, &PRODUCT; assumes it is provisioned with the user’s password, and as a result authentication is done locally. However, external authentication is possible as well. For example, see Using an LDAP Server for User Authentication." -msgstr "" - diff --git a/docs/pot/provisioning-steps-overview.pot b/docs/pot/provisioning-steps-overview.pot deleted file mode 100644 index 8d2873aefee..00000000000 --- a/docs/pot/provisioning-steps-overview.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Overview of Provisioning Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After the Management Server is installed and running, you can add the compute resources for it to manage. For an overview of how a &PRODUCT; cloud infrastructure is organized, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To provision the cloud infrastructure, or to scale it up at any time, follow these procedures:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Change the root password. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add a zone. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more pods (optional). See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more clusters (optional). See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add more hosts (optional). See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add primary storage. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Add secondary storage. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Initialize and test the new cloud. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you have finished these steps, you will have a deployment with the following basic structure:" -msgstr "" - diff --git a/docs/pot/provisioning-steps.pot b/docs/pot/provisioning-steps.pot deleted file mode 100644 index cb3d44c369f..00000000000 --- a/docs/pot/provisioning-steps.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Steps to Provisioning Your Cloud Infrastructure" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section tells how to add zones, pods, clusters, hosts, storage, and networks to your cloud. If you are unfamiliar with these entities, please begin by looking through ." -msgstr "" - diff --git a/docs/pot/query-filter.pot b/docs/pot/query-filter.pot deleted file mode 100644 index ff221b07aa6..00000000000 --- a/docs/pot/query-filter.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Query Filter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The query filter is used to find a mapped user in the external LDAP server. The query filter should uniquely map the &PRODUCT; user to LDAP user for a meaningful authentication. For more information about query filter syntax, consult the documentation for your LDAP server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; query filter wildcards are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Query Filter Wildcard" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "%u" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "%e" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Email address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "%n" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First and last name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following examples assume you are using Active Directory, and refer to user attributes from the Active Directory schema." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the &PRODUCT; user name is the same as the LDAP user ID:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "(uid=%u)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the &PRODUCT; user name is the LDAP display name:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "(displayName=%u)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To find a user by email address:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "(mail=%e)" -msgstr "" - diff --git a/docs/pot/re-install-hosts.pot b/docs/pot/re-install-hosts.pot deleted file mode 100644 index 5f21c743b86..00000000000 --- a/docs/pot/re-install-hosts.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Re-Installing Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can re-install a host after placing it in maintenance mode and then removing it. If a host is down and cannot be placed in maintenance mode, it should still be removed before the re-install." -msgstr "" - diff --git a/docs/pot/release-ip-address.pot b/docs/pot/release-ip-address.pot deleted file mode 100644 index 3f70c4d19a7..00000000000 --- a/docs/pot/release-ip-address.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Releasing an IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the network where you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click View IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP address you want to release." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Release IP button ReleaseIPButton.png: button to release an IP ." -msgstr "" - diff --git a/docs/pot/release-ip-for-vpc.pot b/docs/pot/release-ip-for-vpc.pot deleted file mode 100644 index 49d743a4ea4..00000000000 --- a/docs/pot/release-ip-for-vpc.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Releasing an IP Address Alloted to a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP address is a limited resource. If you no longer need a particular IP, you can disassociate it from its VPC and return it to the pool of available addresses. An IP address can be released from its tier, only when all the networking ( port forwarding, load balancing, or StaticNAT ) rules are removed for this IP address. The released IP address will still belongs to the same VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC whose IP you want to release." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The VPC page is displayed where all the tiers you created are listed in a diagram." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Settings icon." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following options are displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network ACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The IP Addresses page is displayed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the IP you want to release." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Details tab, click the Release IP button release-ip-icon.png: button to release an IP. " -msgstr "" - diff --git a/docs/pot/remove-member-from-project.pot b/docs/pot/remove-member-from-project.pot deleted file mode 100644 index 9933fe45ea4..00000000000 --- a/docs/pot/remove-member-from-project.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removing a Member From a Project" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a member is removed from a project, the member’s resources continue to be owned by the project. The former project member cannot create any new resources within the project or use any of the project’s existing resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A member of a project can be removed by the project administrator, the domain administrator of the domain the project belongs to or of its parent domain, or the &PRODUCT; root administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Accounts tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the member." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Delete button. deletebutton.png: Removes a member " -msgstr "" - diff --git a/docs/pot/remove-tier.pot b/docs/pot/remove-tier.pot deleted file mode 100644 index ac674250681..00000000000 --- a/docs/pot/remove-tier.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removing Tiers" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can remove a tier from a VPC. A removed tier cannot be revoked. When a tier is removed, only the resources of the tier are expunged. All the network rules (port forwarding, load balancing and staticNAT) and the IP addresses associated to the tier are removed. The IP address still be belonging to the same VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPC that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure button of the VPC for which you want to set up tiers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Configure VPC page is displayed. Locate the tier you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Remove VPC button:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait for some time for the tier to be removed." -msgstr "" - diff --git a/docs/pot/remove-vpc.pot b/docs/pot/remove-vpc.pot deleted file mode 100644 index 5627b1eacb5..00000000000 --- a/docs/pot/remove-vpc.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Editing, Restarting, and Removing a Virtual Private Cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that all the tiers are removed before you remove a VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or end user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Select view, select VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the VPCs that you have created for the account is listed in the page." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the VPC you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To remove, click the Remove VPC button remove-vpc.png: button to remove a VPC " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can edit the name and description of a VPC. To do that, select the VPC, then click the Edit button. edit-icon.png: button to edit a VPC " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To restart a VPC, select the VPC, then click the Restart button. restart-vpc.png: button to restart a VPC " -msgstr "" - diff --git a/docs/pot/removed-API-commands.pot b/docs/pot/removed-API-commands.pot deleted file mode 100644 index 3a068c23b83..00000000000 --- a/docs/pot/removed-API-commands.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removed API commands" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "createConfiguration (Adds configuration value)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "configureSimulator (Configures simulator)" -msgstr "" - diff --git a/docs/pot/removing-hosts.pot b/docs/pot/removing-hosts.pot deleted file mode 100644 index 335eeef81db..00000000000 --- a/docs/pot/removing-hosts.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removing Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hosts can be removed from the cloud as needed. The procedure to remove a host depends on the hypervisor type." -msgstr "" - diff --git a/docs/pot/removing-vsphere-hosts.pot b/docs/pot/removing-vsphere-hosts.pot deleted file mode 100644 index 02db0a5e4da..00000000000 --- a/docs/pot/removing-vsphere-hosts.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removing vSphere Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To remove this type of host, first place it in maintenance mode, as described in . Then use &PRODUCT; to remove the host. &PRODUCT; will not direct commands to a host that has been removed using &PRODUCT;. However, the host may still exist in the vCenter cluster." -msgstr "" - diff --git a/docs/pot/removing-xenserver-kvm-hosts.pot b/docs/pot/removing-xenserver-kvm-hosts.pot deleted file mode 100644 index fa092ae4683..00000000000 --- a/docs/pot/removing-xenserver-kvm-hosts.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Removing XenServer and KVM Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A node cannot be removed from a cluster until it has been placed in maintenance mode. This will ensure that all of the VMs on it have been migrated to other Hosts. To remove a Host from the cloud:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Place the node in maintenance mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM, stop the cloud-agent service." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the UI option to remove the node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then you may power down the Host, re-use its IP address, re-install it, etc" -msgstr "" - diff --git a/docs/pot/requirements-templates.pot b/docs/pot/requirements-templates.pot deleted file mode 100644 index 5d1a8e7ae5f..00000000000 --- a/docs/pot/requirements-templates.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Requirements for Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer, install PV drivers / Xen tools on each template that you create. This will enable live migration and clean guest shutdown." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere, install VMware Tools on each template that you create. This will enable console view to work properly." -msgstr "" - diff --git a/docs/pot/resizing-volumes.pot b/docs/pot/resizing-volumes.pot deleted file mode 100644 index 457d1bace2a..00000000000 --- a/docs/pot/resizing-volumes.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Resizing Volumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; does not provide the ability to resize root disks or data disks; the disk size is fixed based on the template used to create the VM. However, the tool VHD Resizer), while not officially supported by Cloud.com or Citrix, might provide a workaround. To increase disk size with VHD Resizer:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Get the VHD from the secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Import it into VHD Resizer." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Resize the VHD." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Upload the new VHD." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a new VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Take a snapshot, then create a new template from that snapshot." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see How to Resize a Provisioning Server 5 Virtual Disk at the Citrix Knowledge Center" -msgstr "" - diff --git a/docs/pot/response-formats.pot b/docs/pot/response-formats.pot deleted file mode 100644 index 706fcc811e1..00000000000 --- a/docs/pot/response-formats.pot +++ /dev/null @@ -1,79 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Response Formats: XML and JSON" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CloudStack supports two formats as the response to an API call. The default response is XML. If you would like the response to be in JSON, add &response=json to the Command String." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Sample XML Response:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" <listipaddressesresponse> \n" -" <allocatedipaddress>\n" -" <ipaddress>192.168.10.141</ipaddress> \n" -" <allocated>2009-09-18T13:16:10-0700</allocated> \n" -" <zoneid>4</zoneid> \n" -" <zonename>WC</zonename> \n" -" <issourcenat>true</issourcenat> \n" -" </allocatedipaddress>\n" -" </listipaddressesresponse>\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Sample JSON Response:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" { \"listipaddressesresponse\" : \n" -" { \"allocatedipaddress\" :\n" -" [ \n" -" { \n" -" \"ipaddress\" : \"192.168.10.141\", \n" -" \"allocated\" : \"2009-09-18T13:16:10-0700\",\n" -" \"zoneid\" : \"4\", \n" -" \"zonename\" : \"WC\", \n" -" \"issourcenat\" : \"true\" \n" -" } \n" -" ]\n" -" } \n" -" } \n" -" " -msgstr "" - diff --git a/docs/pot/responses.pot b/docs/pot/responses.pot deleted file mode 100644 index 629746bffc4..00000000000 --- a/docs/pot/responses.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Responses" -msgstr "" - diff --git a/docs/pot/roles.pot b/docs/pot/roles.pot deleted file mode 100644 index 9dba537d770..00000000000 --- a/docs/pot/roles.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Roles" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; API supports three access roles:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Root Admin. Access to all features of the cloud, including both virtual and physical resource management." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Domain Admin. Access to only the virtual resources of the clouds that belong to the administrator’s domain." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User. Access to only the features that allow management of the user’s virtual instances, storage, and network." -msgstr "" - diff --git a/docs/pot/root-admin-ui-overview.pot b/docs/pot/root-admin-ui-overview.pot deleted file mode 100644 index 5f0df53fee7..00000000000 --- a/docs/pot/root-admin-ui-overview.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Root Administrator's UI Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; UI helps the &PRODUCT; administrator provision, view, and manage the cloud infrastructure, domains, user accounts, projects, and configuration settings. The first time you start the UI after a fresh Management Server installation, you can choose to follow a guided tour to provision your cloud infrastructure. On subsequent logins, the dashboard of the logged-in user appears. The various links in this screen and the navigation bar on the left provide access to a variety of administrative functions. The root administrator can also use the UI to perform all the same tasks that are present in the end-user’s UI." -msgstr "" - diff --git a/docs/pot/runtime-allocation-virtual-network-resources.pot b/docs/pot/runtime-allocation-virtual-network-resources.pot deleted file mode 100644 index df343402c43..00000000000 --- a/docs/pot/runtime-allocation-virtual-network-resources.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Runtime Allocation of Virtual Network Resources" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you define a new virtual network, all your settings for that network are stored in &PRODUCT;. The actual network resources are activated only when the first virtual machine starts in the network. When all virtual machines have left the virtual network, the network resources are garbage collected so they can be allocated again. This helps to conserve network resources.." -msgstr "" - diff --git a/docs/pot/runtime-behavior-of-primary-storage.pot b/docs/pot/runtime-behavior-of-primary-storage.pot deleted file mode 100644 index 1862b3f892b..00000000000 --- a/docs/pot/runtime-behavior-of-primary-storage.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Runtime Behavior of Primary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Root volumes are created automatically when a virtual machine is created. Root volumes are deleted when the VM is destroyed. Data volumes can be created and dynamically attached to VMs. Data volumes are not deleted when VMs are destroyed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Administrators should monitor the capacity of primary storage devices and add additional primary storage as needed. See the Advanced Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Administrators add primary storage to the system by creating a &PRODUCT; storage pool. Each storage pool is associated with a cluster." -msgstr "" - diff --git a/docs/pot/runtime-internal-comm-req.pot b/docs/pot/runtime-internal-comm-req.pot deleted file mode 100644 index 78738beeec4..00000000000 --- a/docs/pot/runtime-internal-comm-req.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Runtime Internal Communications Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Servers communicate with each other to coordinate tasks. This communication uses TCP on ports 8250 and 9090." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The console proxy VMs connect to all hosts in the zone over the management traffic network. Therefore the management traffic network of any given pod in the zone must have connectivity to the management traffic network of all other pods in the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The secondary storage VMs and console proxy VMs connect to the Management Server on port 8250. If you are using multiple Management Servers, the load balanced IP address of the Management Servers on port 8250 must be reachable." -msgstr "" - diff --git a/docs/pot/scheduled-maintenance-maintenance-mode-hosts.pot b/docs/pot/scheduled-maintenance-maintenance-mode-hosts.pot deleted file mode 100644 index f83df48da7c..00000000000 --- a/docs/pot/scheduled-maintenance-maintenance-mode-hosts.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Scheduled Maintenance and Maintenance Mode for Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can place a host into maintenance mode. When maintenance mode is activated, the host becomes unavailable to receive new guest VMs, and the guest VMs already running on the host are seamlessly migrated to another host not in maintenance mode. This migration uses live migration technology and does not interrupt the execution of the guest." -msgstr "" - diff --git a/docs/pot/search-base.pot b/docs/pot/search-base.pot deleted file mode 100644 index 99efd746d56..00000000000 --- a/docs/pot/search-base.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Search Base" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An LDAP query is relative to a given node of the LDAP directory tree, called the search base. The search base is the distinguished name (DN) of a level of the directory tree below which all users can be found. The users can be in the immediate base directory or in some subdirectory. The search base may be equivalent to the organization, group, or domain name. The syntax for writing a DN varies depending on which LDAP server you are using. A full discussion of distinguished names is outside the scope of our documentation. The following table shows some examples of search bases to find users in the testing department.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LDAP Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Example Search Base DN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ApacheDS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ou=testing,o=project" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Active Directory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OU=testing, DC=company" -msgstr "" - diff --git a/docs/pot/search-user-bind-dn.pot b/docs/pot/search-user-bind-dn.pot deleted file mode 100644 index 5cb326c9f03..00000000000 --- a/docs/pot/search-user-bind-dn.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Search User Bind DN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The bind DN is the user on the external LDAP server permitted to search the LDAP directory within the defined search base. When the DN is returned, the DN and passed password are used to authenticate the &PRODUCT; user with an LDAP bind. A full discussion of bind DNs is outside the scope of our documentation. The following table shows some examples of bind DNs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LDAP Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Example Bind DN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ApacheDS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "cn=Administrator,dc=testing,ou=project,ou=org" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Active Directory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CN=Administrator, OU=testing, DC=company, DC=com" -msgstr "" - diff --git a/docs/pot/secondary-storage-add.pot b/docs/pot/secondary-storage-add.pot deleted file mode 100644 index 419cb15e5c5..00000000000 --- a/docs/pot/secondary-storage-add.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Add Secondary Storage" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "System Requirements for Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NFS storage appliance or Linux NFS server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) OpenStack Object Storage (Swift) (see http://swift.openstack.org)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "100GB minimum capacity" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A secondary storage device must be located in the same zone as the guest VMs it serves." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each Secondary Storage server must be available to all hosts in the zone." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Adding Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When you create a new zone, the first secondary storage is added as part of that procedure. You can add secondary storage servers at any time to add more servers to an existing zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure there is nothing stored on the server. Adding the server to &PRODUCT; will destroy any existing data." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are going to use Swift for cloud-wide secondary storage, you must add the Swift storage to &PRODUCT; before you add the local zone secondary storage servers. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To prepare for local zone secondary storage, you should have created and mounted an NFS share during Management Server installation. See .See Preparing NFS Shares in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure you prepared the system VM template during Management Server installation. See .See Prepare the System VM Template in the Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now that the secondary storage server for per-zone storage is prepared, add it to &PRODUCT;. Secondary storage is added as part of the procedure for adding a new zone. See ." -msgstr "" - diff --git a/docs/pot/secondary-storage-outage-and-data-loss.pot b/docs/pot/secondary-storage-outage-and-data-loss.pot deleted file mode 100644 index bc00e1c560b..00000000000 --- a/docs/pot/secondary-storage-outage-and-data-loss.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Secondary Storage Outage and Data Loss" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For a Zone that has only one secondary storage server, a secondary storage outage will have feature level impact to the system but will not impact running guest VMs. It may become impossible to create a VM with the selected template for a user. A user may also not be able to save snapshots or examine/restore saved snapshots. These features will automatically be available when the secondary storage comes back online." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Secondary storage data loss will impact recently added user data including templates, snapshots, and ISO images. Secondary storage should be backed up periodically. Multiple secondary storage servers can be provisioned within each zone to increase the scalability of the system." -msgstr "" - diff --git a/docs/pot/secondary-storage-vm.pot b/docs/pot/secondary-storage-vm.pot deleted file mode 100644 index 8f9354a9350..00000000000 --- a/docs/pot/secondary-storage-vm.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Secondary Storage VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the hosts, &PRODUCT;’s Secondary Storage VM mounts and writes to secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Submissions to secondary storage go through the Secondary Storage VM. The Secondary Storage VM can retrieve templates and ISO images from URLs using a variety of protocols." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The secondary storage VM provides a background task that takes care of a variety of secondary storage activities: downloading a new template to a Zone, copying templates between Zones, and snapshot backups." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The administrator can log in to the secondary storage VM if needed." -msgstr "" - diff --git a/docs/pot/secondary-storage.pot b/docs/pot/secondary-storage.pot deleted file mode 100644 index 3139de6a919..00000000000 --- a/docs/pot/secondary-storage.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section gives concepts and technical details about &PRODUCT; secondary storage. For information about how to install and configure secondary storage through the &PRODUCT; UI, see the Advanced Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/pot/security-groups.pot b/docs/pot/security-groups.pot deleted file mode 100644 index c7c8480d9eb..00000000000 --- a/docs/pot/security-groups.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Security Groups" -msgstr "" - diff --git a/docs/pot/security-req.pot b/docs/pot/security-req.pot deleted file mode 100644 index 373e3ba840c..00000000000 --- a/docs/pot/security-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Security Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The public Internet must not be able to access port 8096 or port 8250 on the Management Server." -msgstr "" - diff --git a/docs/pot/send-projects-membership-invitation.pot b/docs/pot/send-projects-membership-invitation.pot deleted file mode 100644 index f77e51f2d17..00000000000 --- a/docs/pot/send-projects-membership-invitation.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Sending Project Membership Invitations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use these steps to add a new member to a project if the invitations feature is enabled in the cloud as described in . If the invitations feature is not turned on, use the procedure in Adding Project Members From the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the project you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Invitations tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Add by, select one of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Account – The invitation will appear in the user’s Invitations tab in the Project View. See Using the Project View." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Email – The invitation will be sent to the user’s email address. Each emailed invitation includes a unique code called a token which the recipient will provide back to &PRODUCT; when accepting the invitation. Email invitations will work only if the global parameters related to the SMTP server have been set. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type the user name or email address of the new member you want to add, and click Invite. Type the &PRODUCT; user name if you chose Account in the previous step. If you chose Email, type the email address. You can invite only people who have an account in this cloud within the same domain as the project. However, you can send the invitation to any email address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To view and manage the invitations you have sent, return to this tab. When an invitation is accepted, the new member will appear in the project’s Accounts tab." -msgstr "" - diff --git a/docs/pot/separate_storage_network.pot b/docs/pot/separate_storage_network.pot deleted file mode 100644 index 421246003b0..00000000000 --- a/docs/pot/separate_storage_network.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Separate Storage Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the large-scale redundant setup described in the previous section, storage traffic can overload the management network. A separate storage network is optional for deployments. Storage protocols such as iSCSI are sensitive to network delays. A separate storage network ensures guest network traffic contention does not impact storage performance." -msgstr "" - diff --git a/docs/pot/service-offerings.pot b/docs/pot/service-offerings.pot deleted file mode 100644 index 59d9de44e72..00000000000 --- a/docs/pot/service-offerings.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer and Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer and Maintenance Mode" -msgstr "" - diff --git a/docs/pot/set-database-buffer-pool-size.pot b/docs/pot/set-database-buffer-pool-size.pot deleted file mode 100644 index c29786c53e9..00000000000 --- a/docs/pot/set-database-buffer-pool-size.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Set Database Buffer Pool Size" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It is important to provide enough memory space for the MySQL database to cache data and indexes:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Edit the Tomcat configuration file:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/etc/my.cnf" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Insert the following line in the [mysqld] section, below the datadir line. Use a value that is appropriate for your situation. We recommend setting the buffer pool at 40% of RAM if MySQL is on the same server as the management server or 70% of RAM if MySQL has a dedicated server. The following example assumes a dedicated server with 1024M of RAM." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "innodb_buffer_pool_size=700M" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the MySQL service." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service mysqld restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information about the buffer pool, see \"The InnoDB Buffer Pool\" at MySQL Reference Manual." -msgstr "" - diff --git a/docs/pot/set-global-project-resource-limits.pot b/docs/pot/set-global-project-resource-limits.pot deleted file mode 100644 index ece55dbe370..00000000000 --- a/docs/pot/set-global-project-resource-limits.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting the Global Project Resource Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search box, type max.projects and click the search button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search results, you will see the parameters you can use to set per-project maximum resource amounts that apply to all projects in the cloud. No project can have more resources, but an individual project can have lower limits. Click the edit button to set each parameter. editbutton.png: Edits parameters " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.project.public.ips" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum number of public IP addresses that can be owned by any project in the cloud. See About Public IP Addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.project.snapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum number of snapshots that can be owned by any project in the cloud. See Working with Snapshots." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.project.templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum number of templates that can be owned by any project in the cloud. See Working with Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.project.uservms" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum number of guest virtual machines that can be owned by any project in the cloud. See Working With Virtual Machines." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "max.project.volumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum number of data volumes that can be owned by any project in the cloud. See Working with Volumes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart" -msgstr "" - diff --git a/docs/pot/set-monitor-total-vm-limits-per-host.pot b/docs/pot/set-monitor-total-vm-limits-per-host.pot deleted file mode 100644 index 50fe4321303..00000000000 --- a/docs/pot/set-monitor-total-vm-limits-per-host.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Set and Monitor Total VM Limits per Host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator should monitor the total number of VM instances in each cluster, and disable allocation to the cluster if the total is approaching the maximum that the hypervisor can handle. Be sure to leave a safety margin to allow for the possibility of one or more hosts failing, which would increase the VM load on the other hosts as the VMs are automatically redeployed. Consult the documentation for your chosen hypervisor to find the maximum permitted number of VMs per host, then use &PRODUCT; global configuration settings to set this as the default limit. Monitor the VM activity in each cluster at all times. Keep the total number of VMs below a safe level that allows for the occasional host failure. For example, if there are N hosts in the cluster, and you want to allow for one host in the cluster to be down at any given time, the total number of VM instances you can permit in the cluster is at most (N-1) * (per-host-limit). Once a cluster reaches this number of VMs, use the &PRODUCT; UI to disable allocation of more VMs to the cluster." -msgstr "" - diff --git a/docs/pot/set-per-project-resource-limits.pot b/docs/pot/set-per-project-resource-limits.pot deleted file mode 100644 index b791974010c..00000000000 --- a/docs/pot/set-per-project-resource-limits.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Per-Project Resource Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; root administrator or the domain administrator of the domain where the project resides can set new resource limits for an individual project. The project owner can set resource limits only if the owner is also a domain or root administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The new limits must be below the global default limits set by the &PRODUCT; administrator (as described in ). If the project already owns more of a given type of resource than the new maximum, the resources are not affected; however, the project can not add any new resources of that type until the total drops below the new limit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the project you want to work with." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Resources tab. This tab lists the current maximum amount that the project is allowed to own for each type of resource." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type new values for one or more resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Apply." -msgstr "" - diff --git a/docs/pot/set-projects-creator-permissions.pot b/docs/pot/set-projects-creator-permissions.pot deleted file mode 100644 index 230e3651f9f..00000000000 --- a/docs/pot/set-projects-creator-permissions.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Project Creator Permissions" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can configure &PRODUCT; to allow any user to create a new project, or you can restrict that ability to just &PRODUCT; administrators." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search box, type allow.user.create.projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the edit button to set the parameter. editbutton.png: Edits parameters " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "allow.user.create.projects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set to true to allow end users to create projects. Set to false if you want only the &PRODUCT; root administrator and domain administrators to create projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart" -msgstr "" - diff --git a/docs/pot/set-resource-limits-for-projects.pot b/docs/pot/set-resource-limits-for-projects.pot deleted file mode 100644 index e833c546456..00000000000 --- a/docs/pot/set-resource-limits-for-projects.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Resource Limits for Projects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator can set global default limits to control the amount of resources that can be owned by each project in the cloud. This serves to prevent uncontrolled usage of resources such as snapshots, IP addresses, and virtual machine instances. Domain administrators can override these resource limits for individual projects with their domains, as long as the new limits are below the global defaults set by the &PRODUCT; root administrator. The root administrator can also set lower resource limits for any project in the cloud" -msgstr "" - diff --git a/docs/pot/set-up-invitations.pot b/docs/pot/set-up-invitations.pot deleted file mode 100644 index 0ea9fe7cce8..00000000000 --- a/docs/pot/set-up-invitations.pot +++ /dev/null @@ -1,160 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Up Invitations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; can be set up either so that project administrators can add people directly to a project, or so that it is necessary to send an invitation which the recipient must accept. The invitation can be sent by email or through the user’s &PRODUCT; account. If you want administrators to use invitations to add members to projects, turn on and set up the invitations feature in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in as administrator to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search box, type project and click the search button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search box, type project and click the search button. searchbutton.png: Searches projects " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search results, you will see a few other parameters you need to set to control how invitations behave. The table below shows global configuration parameters related to project invitations. Click the edit button to set each parameter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configuration Parameters" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.invite.required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set to true to turn on the invitations feature." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.email.sender" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The email address to show in the From field of invitation emails." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.invite.timeout" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Amount of time to allow for a new member to respond to the invitation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.smtp.host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name of the host that acts as an email server to handle invitations." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.smtp.password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) Password required by the SMTP server. You must also set project.smtp.username and set project.smtp.useAuth to true." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.smtp.port" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SMTP server’s listening port." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.smtp.useAuth" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set to true if the SMTP server requires a username and password." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "project.smtp.username" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) User name required by the SMTP server for authentication. You must also set project.smtp.password and set project.smtp.useAuth to true.." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service cloud-management restart" -msgstr "" - diff --git a/docs/pot/set-up-network-for-users.pot b/docs/pot/set-up-network-for-users.pot deleted file mode 100644 index 48c1cb76270..00000000000 --- a/docs/pot/set-up-network-for-users.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Up Networking for Users" -msgstr "" - diff --git a/docs/pot/set-usage-limit.pot b/docs/pot/set-usage-limit.pot deleted file mode 100644 index f39f0d6292b..00000000000 --- a/docs/pot/set-usage-limit.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Usage Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides several administrator control points for capping resource usage by users. Some of these limits are global configuration parameters. Others are applied at the ROOT domain and may be overridden on a per-account basis." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Aggregate limits may be set on a per-domain basis. For example, you may limit a domain and all subdomains to the creation of 100 VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section covers the following topics:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Globally Configured Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Default Account Resource Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Per Domain Limits" -msgstr "" - diff --git a/docs/pot/set-zone-vlan-run-vm-max.pot b/docs/pot/set-zone-vlan-run-vm-max.pot deleted file mode 100644 index 6fc47e67b84..00000000000 --- a/docs/pot/set-zone-vlan-run-vm-max.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Zone VLAN and Running VM Maximums" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the external networking case, every VM in a zone must have a unique guest IP address. There are two variables that you need to consider in determining how to configure &PRODUCT; to support this: how many Zone VLANs do you expect to have and how many VMs do you expect to have running in the Zone at any one time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the following table to determine how to configure &PRODUCT; for your deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "guest.vlan.bits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum Running VMs per Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Maximum Zone VLANs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "12" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4096" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4094" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "11" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "8192" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2048" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "10" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "16384" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "1024" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "32768" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "512" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Based on your deployment's needs, choose the appropriate value of guest.vlan.bits. Set it as described in Edit the Global Configuration Settings (Optional) section and restart the Management Server." -msgstr "" - diff --git a/docs/pot/shared-networks.pot b/docs/pot/shared-networks.pot deleted file mode 100644 index 9d8085dcb81..00000000000 --- a/docs/pot/shared-networks.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Shared Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A shared network can be accessed by virtual machines that belong to many different accounts. Network Isolation on shared networks is accomplished using techniques such as security groups (supported only in basic zones in &PRODUCT; 3.0.3)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shared Networks are created by the administrator" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shared Networks can be designated to a certain domain" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shared Network resources such as VLAN and physical network that it maps to are designated by the administrator" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Shared Networks are isolated by security groups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Network is a shared network that is not shown to the end users" -msgstr "" - diff --git a/docs/pot/signing-api-requests.pot b/docs/pot/signing-api-requests.pot deleted file mode 100644 index 10d171473d7..00000000000 --- a/docs/pot/signing-api-requests.pot +++ /dev/null @@ -1,140 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Signing API Requests" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Whether you access the CloudStack API with HTTP or HTTPS, it must still be signed so that CloudStack can verify the caller has been authenticated and authorized to execute the command. Make sure that you have both the API Key and Secret Key provided by the CloudStack administrator for your account before proceeding with the signing process." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To show how to sign a request, we will re-use the previous example." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Breaking this down, we have several distinct parts to this URL." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Base URL: This is the base URL to the CloudStack Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://localhost:8080" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "API Path: This is the path to the API Servlet that processes the incoming requests." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "/client/api?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Command String: This part of the query string comprises of the command, its parameters, and the API Key that identifies the account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "As with all query string parameters of field-value pairs, the \"field\" component is case insensitive while all \"value\" values are case sensitive." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Signature: This is the hashed signature of the Base URL that is generated using a combination of the user’s Secret Key and the HMAC SHA-1 hashing algorithm." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Every API request has the format Base URL+API Path+Command String+Signature." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To generate the signature." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For each field-value pair (as separated by a '&') in the Command String, URL encode each value so that it can be safely sent via HTTP GET." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure all spaces are encoded as \"%20\" rather than \"+\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Lower case the entire Command String and sort it alphabetically via the field for each field-value pair. The result of this step would look like the following." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "apikey=mivr6x7u6bn_sdahobpjnejpgest35exq-jb8cg20yi3yaxxcgpyuairmfi_ejtvwz0nukkjbpmy3y2bcikwfq&command=deployvirtualmachine&diskofferingid=1&serviceofferingid=1&templateid=2&zoneid=4" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Take the sorted Command String and run it through the HMAC SHA-1 hashing algorithm (most programming languages offer a utility method to do this) with the user’s Secret Key. Base64 encode the resulting byte array in UTF-8 so that it can be safely transmitted via HTTP. The final string produced after Base64 encoding should be \"Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D\"." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By reconstructing the final URL in the format Base URL+API Path+Command String+Signature, the final URL should look like:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "http://localhost:8080/client/api?command=deployVirtualMachine&serviceOfferingId=1&diskOfferingId=1&templateId=2&zoneId=4&apiKey=miVr6X7u6bN_sdahOBpjNejPgEsT35eXq-jB8CG20YI3yaxXcgpyuaIRmFI_EJTVwZ0nUkkJbPmY3y2bciKwFQ&signature=Lxx1DM40AjcXU%2FcaiK8RAP0O1hU%3D" -msgstr "" - diff --git a/docs/pot/site-to-site-vpn.pot b/docs/pot/site-to-site-vpn.pot deleted file mode 100644 index e3be2197d5c..00000000000 --- a/docs/pot/site-to-site-vpn.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Setting Up a Site-to-Site VPN Connection" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A Site-to-Site VPN connection helps you establish a secure connection from an enterprise datacenter to the cloud infrastructure. This allows users to access the guest VMs by establishing a VPN connection to the virtual router of the account from a device in the datacenter of the enterprise. Having this facility eliminates the need to establish VPN connections to individual VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The supported endpoints on the remote datacenters are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cisco ISR with IOS 12.4 or later" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Juniper J-Series routers with JunOS 9.5 or later" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the specific Cisco and Juniper devices listed above, the expectation is that any Cisco or Juniper device running on the supported operating systems are able to establish VPN connections." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To set up a Site-to-Site VPN connection, perform the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a Virtual Private Cloud (VPC)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a VPN Customer Gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a VPN gateway for the VPC that you created." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create VPN connection from the VPC VPN gateway to the customer VPN gateway." -msgstr "" - diff --git a/docs/pot/small_scale_deployment.pot b/docs/pot/small_scale_deployment.pot deleted file mode 100644 index 033f3c425a3..00000000000 --- a/docs/pot/small_scale_deployment.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Small-Scale Deployment" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This diagram illustrates the network architecture of a small-scale &PRODUCT; deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A firewall provides a connection to the Internet. The firewall is configured in NAT mode. The firewall forwards HTTP requests and API calls from the Internet to the Management Server. The Management Server resides on the management network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A layer-2 switch connects all physical servers and storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A single NFS server functions as both the primary and secondary storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server is connected to the management network." -msgstr "" - diff --git a/docs/pot/snapshot-restore.pot b/docs/pot/snapshot-restore.pot deleted file mode 100644 index 5d4d1f55147..00000000000 --- a/docs/pot/snapshot-restore.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Snapshot Restore" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are two paths to restoring snapshots. Users can create a volume from the snapshot. The volume can then be mounted to a VM and files recovered as needed. Alternatively, a template may be created from the snapshot of a root disk. The user can then boot a VM from this template to effect recovery of the root disk." -msgstr "" - diff --git a/docs/pot/source-build.pot b/docs/pot/source-build.pot deleted file mode 100644 index 304d2eb36db..00000000000 --- a/docs/pot/source-build.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building &PRODUCT; from Source" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prior to the 4.0.0 incubating release, Ant was used to build &PRODUCT;. Starting with 4.0.0 a migration to Maven is underway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The website and the wiki contain up to date information on the build procedure at:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "https://cwiki.apache.org/CLOUDSTACK/building-with-maven.html" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://incubator.apache.org/cloudstack/develop/environment.html" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The overarching steps to build &PRODUCT; are:." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Install the prerequisites and setup your environment" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Understand that various Maven profiles and build targets" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Deploy and test your build" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If needed, learn how to build binaries" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Learning Maven is outside the scope of this documentation." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Go to the Maven website at http://maven.apache.org/guides/getting-started/index.html" -msgstr "" - diff --git a/docs/pot/source-prereqs.pot b/docs/pot/source-prereqs.pot deleted file mode 100644 index b22b5f7144a..00000000000 --- a/docs/pot/source-prereqs.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Prerequisites for building Apache CloudStack" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are a number of prerequisites needed to build &PRODUCT;. This document assumes compilation on a Linux system that uses RPMs or DEBs for package management." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The minimum bootstrapped prerequisites for building &PRODUCT; includes the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ant" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "maven (version 3)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Java (Java 6/OpenJDK 1.6)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rpmbuild or dpkg-dev" -msgstr "" - diff --git a/docs/pot/source.pot b/docs/pot/source.pot deleted file mode 100644 index 525c0b43d42..00000000000 --- a/docs/pot/source.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Building from Source" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The official &PRODUCT; release is always in source code form. While there may exist convenience binaries in various forms from a number of places, the source is the canonical release will be source. In this document we'll cover acquiring the source release, building that into binary, deployable packages." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "While building and deploying directly from source is certainly possible, the reality of Infrastructure-as-a-Service cloud computing implies a need to deploy packages on a potentially large number of systems, which RPMs and DEBs fill nicely." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Building and deploying directly from source is thus outside the scope of this document, but is documented in the INSTALL.md file in the release." -msgstr "" - diff --git a/docs/pot/ssl.pot b/docs/pot/ssl.pot deleted file mode 100644 index 037169bea8f..00000000000 --- a/docs/pot/ssl.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "SSL (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides HTTP access in its default installation. There are a number of technologies and sites which choose to implement SSL. As a result, we have left &PRODUCT; to expose HTTP under the assumption that a site will implement its typical practice." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses Tomcat as its servlet container. For sites that would like &PRODUCT; to terminate the SSL session, Tomcat’s SSL access may be enabled. Tomcat SSL configuration is described at http://tomcat.apache.org/tomcat-6.0-doc/ssl-howto.html." -msgstr "" - diff --git a/docs/pot/standard-events.pot b/docs/pot/standard-events.pot deleted file mode 100644 index adb22613c10..00000000000 --- a/docs/pot/standard-events.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Standard Events" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The events log records three types of standard events." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "INFO. This event is generated when an operation has been successfully performed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "WARN. This event is generated in the following circumstances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a network is disconnected while monitoring a template download." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a template download is abandoned." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When an issue on the storage server causes the volumes to fail over to the mirror storage server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ERROR. This event is generated when an operation has not been successfully performed" -msgstr "" - diff --git a/docs/pot/static-nat.pot b/docs/pot/static-nat.pot deleted file mode 100644 index 60f01be8e33..00000000000 --- a/docs/pot/static-nat.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Static NAT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A static NAT rule maps a public IP address to the private IP address of a VM in order to allow Internet traffic into the VM. The public IP address always remains the same, which is why it is called \"static\" NAT. This section tells how to enable or disable static NAT for a particular IP address." -msgstr "" - diff --git a/docs/pot/sticky-session-policies-for-lb-rules.pot b/docs/pot/sticky-session-policies-for-lb-rules.pot deleted file mode 100644 index cbe4ae616ea..00000000000 --- a/docs/pot/sticky-session-policies-for-lb-rules.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Sticky Session Policies for Load Balancer Rules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Sticky sessions are used in Web-based applications to ensure continued availability of information across the multiple requests in a user's session. For example, if a shopper is filling a cart, you need to remember what has been purchased so far. The concept of \"stickiness\" is also referred to as persistence or maintaining state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Any load balancer rule defined in &PRODUCT; can have a stickiness policy. The policy consists of a name, stickiness method, and parameters. The parameters are name-value pairs or flags, which are defined by the load balancer vendor. The stickiness method could be load balancer-generated cookie, application-generated cookie, or source-based. In the source-based method, the source IP address is used to identify the user and locate the user’s stored data. In the other methods, cookies are used. The cookie generated by the load balancer or application is included in request and response URLs to create persistence. The cookie name can be specified by the administrator or automatically generated. A variety of options are provided to control the exact behavior of cookies, such as how they are generated and whether they are cached." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the most up to date list of available stickiness methods, see the &PRODUCT; UI or call listNetworks and check the SupportedStickinessMethods capability." -msgstr "" - diff --git a/docs/pot/stop-restart-management-server.pot b/docs/pot/stop-restart-management-server.pot deleted file mode 100644 index dec05253f6a..00000000000 --- a/docs/pot/stop-restart-management-server.pot +++ /dev/null @@ -1,65 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Stopping and Restarting the Management Server" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The root administrator will need to stop and restart the Management Server from time to time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, after changing a global configuration parameter, a restart is required. If you have multiple Management Server nodes, restart all of them to put the new parameter value into effect consistently throughout the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To stop the Management Server, issue the following command at the operating system prompt on the Management Server node:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management stop" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To start the Management Server:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management start" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To stop the Management Server:" -msgstr "" - diff --git a/docs/pot/stopped-vm.pot b/docs/pot/stopped-vm.pot deleted file mode 100644 index 8f5f7c1b3fe..00000000000 --- a/docs/pot/stopped-vm.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Stopped VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; now supports creating a VM without starting it. You can determine whether the VM needs to be started as part of the VM deployment. A VM can now be deployed in two ways: create and start a VM (the default method); or create a VM and leave it in the stopped state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A new request parameter, startVM, is introduced in the deployVm API to support the stopped VM feature." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The possible values are:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "true - The VM starts as a part of the VM deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "false - The VM is left in the stopped state at the end of the VM deployment." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default value is true." -msgstr "" - diff --git a/docs/pot/stopping-and-starting-vms.pot b/docs/pot/stopping-and-starting-vms.pot deleted file mode 100644 index 2228d58c246..00000000000 --- a/docs/pot/stopping-and-starting-vms.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Stopping and Starting VMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Any user can access their own virtual machines. The administrator can access all VMs running in the cloud." -msgstr "" - diff --git a/docs/pot/storage-nw-topology-req.pot b/docs/pot/storage-nw-topology-req.pot deleted file mode 100644 index 9786961851f..00000000000 --- a/docs/pot/storage-nw-topology-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Storage Network Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The secondary storage NFS export is mounted by the secondary storage VM. Secondary storage traffic goes over the management traffic network, even if there is a separate storage network. Primary storage traffic goes over the storage network, if available. If you choose to place secondary storage NFS servers on the storage network, you must make sure there is a route from the management traffic network to the storage network." -msgstr "" - diff --git a/docs/pot/storage-overview.pot b/docs/pot/storage-overview.pot deleted file mode 100644 index a2cad3573f4..00000000000 --- a/docs/pot/storage-overview.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Storage Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; defines two types of storage: primary and secondary. Primary storage can be accessed by either iSCSI or NFS. Additionally, direct attached storage may be used for primary storage. Secondary storage is always accessed using NFS." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is no ephemeral storage in &PRODUCT;. All volumes on all nodes are persistent." -msgstr "" - diff --git a/docs/pot/storage-tags.pot b/docs/pot/storage-tags.pot deleted file mode 100644 index 8ed6ccf2214..00000000000 --- a/docs/pot/storage-tags.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Storage Tags" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage may be \"tagged\". A tag is a text string attribute associated with primary storage, a Disk Offering, or a Service Offering. Tags allow administrators to provide additional information about the storage. For example, that is a \"SSD\" or it is \"slow\". Tags are not interpreted by &PRODUCT;. They are matched against tags placed on service and disk offerings. &PRODUCT; requires all tags on service and disk offerings to exist on the primary storage before it allocates root or data disks on the primary storage. Service and disk offering tags are used to identify the requirements of the storage that those offerings have. For example, the high end service offering may require \"fast\" for its root disk volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The interaction between tags, allocation, and volume copying across clusters and pods can be complex. To simplify the situation, use the same set of tags on the primary storage for all clusters in a pod. Even if different devices are used to present those tags, the set of exposed tags can be the same." -msgstr "" - diff --git a/docs/pot/storage.pot b/docs/pot/storage.pot deleted file mode 100644 index cc72673f6d9..00000000000 --- a/docs/pot/storage.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working With Storage" -msgstr "" - diff --git a/docs/pot/suspend-project.pot b/docs/pot/suspend-project.pot deleted file mode 100644 index e2c6c7d3d01..00000000000 --- a/docs/pot/suspend-project.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Suspending or Deleting a Project" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a project is suspended, it retains the resources it owns, but they can no longer be used. No new resources or members can be added to a suspended project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a project is deleted, its resources are destroyed, and member accounts are removed from the project. The project’s status is shown as Disabled pending final deletion." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A project can be suspended or deleted by the project administrator, the domain administrator of the domain the project belongs to or of its parent domain, or the &PRODUCT; root administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, click Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Select View, choose Projects." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the name of the project." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click one of the buttons:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To delete, use deletebutton.png: Removes a project " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To suspend, use deletebutton.png: suspends a project " -msgstr "" - diff --git a/docs/pot/sys-offering-sysvm.pot b/docs/pot/sys-offering-sysvm.pot deleted file mode 100644 index 995ae9a3c5e..00000000000 --- a/docs/pot/sys-offering-sysvm.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Changing the Default System Offering for System VMs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can manually change the system offering for a particular System VM. Additionally, as a &PRODUCT; administrator, you can also change the default system offering used for System VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a new system offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see Creating a New System Service Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Back up the database:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysqldump -u root -p cloud | bzip2 > cloud_backup.sql.bz2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Open an MySQL prompt:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "mysql -u cloud -p cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following queries on the cloud database." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the disk_offering table, identify the original default offering and the new offering you want to use by default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Take a note of the ID of the new offering." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "select id,name,unique_name,type from disk_offering;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the original default offering, set the value of unique_name to NULL." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# update disk_offering set unique_name = NULL where id = 10;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that you use the correct value for the ID." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the new offering that you want to use by default, set the value of unique_name as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For the default Console Proxy VM (CPVM) offering,set unique_name to 'Cloud.com-ConsoleProxy'. For the default Secondary Storage VM (SSVM) offering, set unique_name to 'Cloud.com-SecondaryStorage'. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "update disk_offering set unique_name = 'Cloud.com-ConsoleProxy' where id = 16;" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart &PRODUCT; Management Server. Restarting is required because the default offerings are loaded into the memory at startup." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "service cloud-management restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Destroy the existing CPVM or SSVM offerings and wait for them to be recreated. The new CPVM or SSVM are configured with the new offering." -msgstr "" - diff --git a/docs/pot/sys-reliability-and-ha.pot b/docs/pot/sys-reliability-and-ha.pot deleted file mode 100644 index f6820688aa7..00000000000 --- a/docs/pot/sys-reliability-and-ha.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Reliability and High Availability" -msgstr "" - diff --git a/docs/pot/sysprep-for-windows-server-2003R2.pot b/docs/pot/sysprep-for-windows-server-2003R2.pot deleted file mode 100644 index c975206ed97..00000000000 --- a/docs/pot/sysprep-for-windows-server-2003R2.pot +++ /dev/null @@ -1,160 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Sysprep for Windows Server 2003 R2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Earlier versions of Windows have a different sysprep tool. Follow these steps for Windows Server 2003 R2." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extract the content of \\support\\tools\\deploy.cab on the Windows installation CD into a directory called c:\\sysprep on the Windows 2003 R2 VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run c:\\sysprep\\setupmgr.exe to create the sysprep.inf file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Create New to create a new Answer File." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enter “Sysprep setup†for the Type of Setup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the appropriate OS version and edition." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the License Agreement screen, select “Yes fully automate the installationâ€." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide your name and organization." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Leave display settings at default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set the appropriate time zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide your product key." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select an appropriate license mode for your deployment" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select “Automatically generate computer nameâ€." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Type a default administrator password. If you enable the password reset feature, the users will not actually use this password. This password will be reset by the instance manager after the guest boots up." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Leave Network Components at “Typical Settingsâ€." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the “WORKGROUP†option." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Leave Telephony options at default." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select appropriate Regional Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select appropriate language settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not install printers." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Do not specify “Run Once commandsâ€." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need not specify an identification string." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Save the Answer File as c:\\sysprep\\sysprep.inf." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command to sysprep the image:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "c:\\sysprep\\sysprep.exe -reseal -mini -activated" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After this step the machine will automatically shut down" -msgstr "" - diff --git a/docs/pot/sysprep-windows-server-2008R2.pot b/docs/pot/sysprep-windows-server-2008R2.pot deleted file mode 100644 index 91202d855da..00000000000 --- a/docs/pot/sysprep-windows-server-2008R2.pot +++ /dev/null @@ -1,147 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Preparation for Windows Server 2008 R2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For Windows 2008 R2, you run Windows System Image Manager to create a custom sysprep response XML file. Windows System Image Manager is installed as part of the Windows Automated Installation Kit (AIK). Windows AIK can be downloaded from the Microsoft Download Center at the following location:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Microsoft Download Center." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use the following steps to run sysprep for Windows 2008 R2:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The steps outlined here are derived from the excellent guide by Charity Shelbourne, originally published at Windows Server 2008 Sysprep Mini-Setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download and install the Windows AIK" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Windows AIK should not be installed on the Windows 2008 R2 VM you just created. Windows AIK should not be part of the template you create. It is only used to create the sysprep answer file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the install.wim file in the \\sources directory of the Windows 2008 R2 installation DVD to the hard disk. This is a very large file and may take a long time to copy. Windows AIK requires the WIM file to be writable." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Start the Windows System Image Manager, which is part of the Windows AIK." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Windows Image pane, right click “Select a Windows image or catalog file†to load the install.wim file you just copied." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select the Windows 2008 R2 Edition" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You may be prompted with a warning that the catalog file cannot be opened. Click Yes to create a new catalog file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Answer File pane, right click to create a new answer file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Generate the answer file from the Windows System Image Manager using the following steps:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The first page you need to automate is the Language and Country or Region Selection page. To automate this, expand Components in your Windows Image pane, right-click and add the Microsoft-Windows-International-Core setting to Pass 7 oobeSystem. In your Answer File pane, configure the InputLocale, SystemLocale, UILanguage, and UserLocale with the appropriate settings for your language and country or region. Should you have a question about any of these settings, you can right-click on the specific setting and select Help. This will open the appropriate CHM help file with more information, including examples on the setting you are attempting to configure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to automate the Software License Terms Selection page, otherwise known as the End-User License Agreement (EULA). To do this, expand the Microsoft-Windows-Shell-Setup component. High-light the OOBE setting, and add the setting to the Pass 7 oobeSystem. In Settings, set HideEULAPage true." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the license key is properly set. If you use MAK key, you can just enter the MAK key on the Windows 2008 R2 VM. You need not input the MAK into the Windows System Image Manager. If you use KMS host for activation you need not enter the Product Key. Details of Windows Volume Activation can be found at " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to automate is the Change Administrator Password page. Expand the Microsoft-Windows-Shell-Setup component (if it is not still expanded), expand UserAccounts, right-click on AdministratorPassword, and add the setting to the Pass 7 oobeSystem configuration pass of your answer file. Under Settings, specify a password next to Value." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You may read the AIK documentation and set many more options that suit your deployment. The steps above are the minimum needed to make Windows unattended setup work." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Save the answer file as unattend.xml. You can ignore the warning messages that appear in the validation window." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the unattend.xml file into the c:\\windows\\system32\\sysprep directory of the Windows 2008 R2 Virtual Machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once you place the unattend.xml file in c:\\windows\\system32\\sysprep directory, you run the sysprep tool as follows:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "cd c:\\Windows\\System32\\sysprep\n" -"sysprep.exe /oobe /generalize /shutdown\n" -"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Windows 2008 R2 VM will automatically shut down after sysprep is complete." -msgstr "" - diff --git a/docs/pot/system-reserved-ip-addresses.pot b/docs/pot/system-reserved-ip-addresses.pot deleted file mode 100644 index 5caf9fa39b8..00000000000 --- a/docs/pot/system-reserved-ip-addresses.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Reserved IP Addresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In each zone, you need to configure a range of reserved IP addresses for the management network. This network carries communication between the &PRODUCT; Management Server and various system VMs, such as Secondary Storage VMs, Console Proxy VMs, and DHCP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The reserved IP addresses must be unique across the cloud. You cannot, for example, have a host in one zone which has the same private IP address as a host in another zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The hosts in a pod are assigned private IP addresses. These are typically RFC1918 addresses. The Console Proxy and Secondary Storage system VMs are also allocated private IP addresses in the CIDR of the pod that they are created in." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure computing servers and Management Servers use IP addresses outside of the System Reserved IP range. For example, suppose the System Reserved IP range starts at 192.168.154.2 and ends at 192.168.154.7. &PRODUCT; can use .2 to .7 for System VMs. This leaves the rest of the pod CIDR, from .8 to .254, for the Management Server and hypervisor hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In all zones:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide private IPs for the system in each pod and provision them in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For KVM and XenServer, the recommended number of private IPs per pod is one per host. If you expect a pod to grow, add enough private IPs now to accommodate the growth." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a zone that uses advanced networking:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For zones with advanced networking, we recommend provisioning enough private IPs for your total number of customers, plus enough for the required &PRODUCT; System VMs. Typically, about 10 additional IPs are required for the System VMs. For more information about System VMs, see Working with System Virtual Machines in the Administrator's Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When advanced networking is being used, the number of private IP addresses available in each pod varies depending on which hypervisor is running on the nodes in that pod. Citrix XenServer and KVM use link-local addresses, which in theory provide more than 65,000 private IP addresses within the address block. As the pod grows over time, this should be more than enough for any reasonable number of hosts as well as IP addresses for guest virtual routers. VMWare ESXi, by contrast uses any administrator-specified subnetting scheme, and the typical administrator provides only 255 IPs per pod. Since these are shared by physical machines, the guest virtual router, and other entities, it is possible to run out of private IPs when scaling up a pod whose nodes are running ESXi." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To ensure adequate headroom to scale private IP space in an ESXi pod that uses advanced networking, use one or both of the following techniques:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specify a larger CIDR block for the subnet. A subnet mask with a /20 suffix will provide more than 4,000 IP addresses." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create multiple pods, each with its own subnet. For example, if you create 10 pods and each pod has 255 IPs, this will provide 2,550 IP addresses." -msgstr "" - diff --git a/docs/pot/system-service-offerings.pot b/docs/pot/system-service-offerings.pot deleted file mode 100644 index 726d0d0f2ce..00000000000 --- a/docs/pot/system-service-offerings.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "System service offerings provide a choice of CPU speed, number of CPUs, tags, and RAM size, just as other service offerings do. But rather than being used for virtual machine instances and exposed to users, system service offerings are used to change the default properties of virtual routers, console proxies, and other system VMs. System service offerings are visible only to the &PRODUCT; root administrator. &PRODUCT; provides default system service offerings. The &PRODUCT; root administrator can create additional custom system service offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When &PRODUCT; creates a virtual router for a guest network, it uses default settings which are defined in the system service offering associated with the network offering. You can upgrade the capabilities of the virtual router by applying a new network offering that contains a different system service offering. All virtual routers in that network will begin using the settings from the new service offering." -msgstr "" - diff --git a/docs/pot/system-vm-template.pot b/docs/pot/system-vm-template.pot deleted file mode 100644 index 9b739867a7f..00000000000 --- a/docs/pot/system-vm-template.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "The System VM Template" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The System VMs come from a single template. The System VM has the following characteristics:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Debian 6.0 (\"Squeeze\"), 2.6.32 kernel with the latest security patches from the Debian security APT repository" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Has a minimal set of packages installed thereby reducing the attack surface" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "32-bit for enhanced performance on Xen/VMWare" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "pvops kernel with Xen PV drivers, KVM virtio drivers, and VMware tools for optimum performance on all hypervisors" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Xen tools inclusion allows performance monitoring" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Latest versions of HAProxy, iptables, IPsec, and Apache from debian repository ensures improved security and speed" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Latest version of JRE from Sun/Oracle ensures improved security and speed" -msgstr "" - diff --git a/docs/pot/tagging-resources.pot b/docs/pot/tagging-resources.pot deleted file mode 100644 index 935fc5f5193..00000000000 --- a/docs/pot/tagging-resources.pot +++ /dev/null @@ -1,140 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Tags to Organize Resources in the Cloud" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A tag is a key-value pair that stores metadata about a resource in the cloud. Tags are useful for categorizing resources. For example, you can tag a user VM with a value that indicates the user's city of residence. In this case, the key would be \"city\" and the value might be \"Toronto\" or \"Tokyo.\" You can then request &PRODUCT; to find all resources that have a given tag; for example, VMs for users in a given city." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can tag a user virtual machine, volume, snapshot, guest network, template, ISO, firewall rule, port forwarding rule, public IP address, security group, load balancer rule, project, VPC, network ACL, or static route. You can not tag a remote access VPN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can work with tags through the UI or through the API commands createTags, deleteTags, and listTags. You can define multiple tags for each resource. There is no limit on the number of tags you can define. Each tag can be up to 255 characters long. Users can define tags on the resources they own, and administrators can define tags on any resources in the cloud." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "An optional input parameter, \"tags,\" exists on many of the list* API commands. The following example shows how to use this new parameter to find all the volumes having tag region=canada OR tag city=Toronto:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "command=listVolumes\n" -" &listAll=true\n" -" &tags[0].key=region\n" -" &tags[0].value=canada\n" -" &tags[1].key=city\n" -" &tags[1].value=Toronto" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following API commands have the \"tags\" input parameter:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVirtualMachines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVolumes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSnapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listTemplates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listIsos" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listFirewallRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPortForwardingRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listPublicIpAddresses" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listSecurityGroups" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listLoadBalancerRules" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listProjects" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listVPCs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listNetworkACLs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listStaticRoutes" -msgstr "" - diff --git a/docs/pot/template-iso-snapshot-usage-record-format.pot b/docs/pot/template-iso-snapshot-usage-record-format.pot deleted file mode 100644 index c76cb5baa34..00000000000 --- a/docs/pot/template-iso-snapshot-usage-record-format.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Template, ISO, and Snapshot Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for hours)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – The ID of the the template, ISO, or snapshot" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "offeringid – The ID of the disk offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "templateid – – Included only for templates (usage type 7). Source template ID." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "size – Size of the template, ISO, or snapshot" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/templates.pot b/docs/pot/templates.pot deleted file mode 100644 index 90eac76c4b3..00000000000 --- a/docs/pot/templates.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Templates" -msgstr "" - diff --git a/docs/pot/time-zones.pot b/docs/pot/time-zones.pot deleted file mode 100644 index b5573031c86..00000000000 --- a/docs/pot/time-zones.pot +++ /dev/null @@ -1,330 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Time Zones" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following time zone identifiers are accepted by &PRODUCT;. There are several places that have a time zone as a required or optional parameter. These include scheduling recurring snapshots, creating a user, and specifying the usage time zone in the Configuration table." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Etc/GMT+12" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Etc/GMT+11" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pacific/Samoa" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pacific/Honolulu" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "US/Alaska" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Los_Angeles" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Mexico/BajaNorte" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "US/Arizona" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "US/Mountain" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Chihuahua" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Chicago" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Costa_Rica" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Mexico_City" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Canada/Saskatchewan" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Bogota" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/New_York" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Caracas" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Asuncion" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Cuiaba" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Halifax" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/La_Paz" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Santiago" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/St_Johns" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Araguaina" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Argentina/Buenos_Aires" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Cayenne" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Godthab" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "America/Montevideo" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Etc/GMT+2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Atlantic/Azores" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Atlantic/Cape_Verde" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Africa/Casablanca" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Etc/UTC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Atlantic/Reykjavik" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Europe/London" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "CET" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Europe/Bucharest" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Africa/Johannesburg" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Beirut" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Africa/Cairo" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Jerusalem" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Europe/Minsk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Europe/Moscow" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Africa/Nairobi" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Karachi" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Kolkata" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Bangkok" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Shanghai" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Kuala_Lumpur" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Australia/Perth" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Taipei" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Tokyo" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Asia/Seoul" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Australia/Adelaide" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Australia/Darwin" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Australia/Brisbane" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Australia/Canberra" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pacific/Guam" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Pacific/Auckland" -msgstr "" - diff --git a/docs/pot/tools.pot b/docs/pot/tools.pot deleted file mode 100644 index 08a478c9279..00000000000 --- a/docs/pot/tools.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Tools" -msgstr "" - diff --git a/docs/pot/topology-req.pot b/docs/pot/topology-req.pot deleted file mode 100644 index 3eb0f27ebf3..00000000000 --- a/docs/pot/topology-req.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Topology Requirements" -msgstr "" - diff --git a/docs/pot/troubleshooting-alerts.pot b/docs/pot/troubleshooting-alerts.pot deleted file mode 100644 index be428933b9f..00000000000 --- a/docs/pot/troubleshooting-alerts.pot +++ /dev/null @@ -1,68 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Alerts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following is the list of alert type numbers." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"MEMORY = 0\n" -"CPU = 1\n" -"STORAGE =2\n" -"STORAGE_ALLOCATED = 3\n" -"PUBLIC_IP = 4\n" -"PRIVATE_IP = 5\n" -"HOST = 6\n" -"USERVM = 7\n" -"DOMAIN_ROUTER = 8\n" -"CONSOLE_PROXY = 9\n" -"ROUTING = 10// lost connection to default route (to the gateway)\n" -"STORAGE_MISC = 11 // lost connection to default route (to the gateway)\n" -"USAGE_SERVER = 12 // lost connection to default route (to the gateway)\n" -"MANAGMENT_NODE = 13 // lost connection to default route (to the gateway)\n" -"DOMAIN_ROUTER_MIGRATE = 14\n" -"CONSOLE_PROXY_MIGRATE = 15\n" -"USERVM_MIGRATE = 16\n" -"VLAN = 17\n" -"SSVM = 18\n" -"USAGE_SERVER_RESULT = 19\n" -"STORAGE_DELETE = 20;\n" -"UPDATE_RESOURCE_COUNT = 21; //Generated when we fail to update the resource count\n" -"USAGE_SANITY_RESULT = 22;\n" -"DIRECT_ATTACHED_PUBLIC_IP = 23;\n" -"LOCAL_STORAGE = 24;\n" -"RESOURCE_LIMIT_EXCEEDED = 25; //Generated when the resource limit exceeds the limit. Currently used for recurring snapshots only\n" -"\n" -" " -msgstr "" - diff --git a/docs/pot/troubleshooting-lb-rules-fails.pot b/docs/pot/troubleshooting-lb-rules-fails.pot deleted file mode 100644 index 224d0b880cb..00000000000 --- a/docs/pot/troubleshooting-lb-rules-fails.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Load balancer rules fail after changing network offering" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After changing the network offering on a network, load balancer rules stop working." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Load balancing rules were created while using a network service offering that includes an external load balancer device such as NetScaler, and later the network service offering changed to one that uses the &PRODUCT; virtual router." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a firewall rule on the virtual router for each of your existing load balancing rules so that they continue to function." -msgstr "" - diff --git a/docs/pot/troubleshooting-recover-lost-virtual-router.pot b/docs/pot/troubleshooting-recover-lost-virtual-router.pot deleted file mode 100644 index 8affe2a2fd6..00000000000 --- a/docs/pot/troubleshooting-recover-lost-virtual-router.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Recovering a Lost Virtual Router" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A virtual router is running, but the host is disconnected. A virtual router no longer functions as expected." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Virtual router is lost or down." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are sure that a virtual router is down forever, or no longer functions as expected, destroy it. You must create one afresh while keeping the backup router up and running (it is assumed this is in a redundant router setup):" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Force stop the router. Use the stopRouter API with forced=true parameter to do so." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Before you continue with destroying this router, ensure that the backup router is running. Otherwise the network connection will be lost." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Destroy the router by using the destroyRouter API." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Recreate the missing router by using the restartNetwork API with cleanup=false parameter. For more information about redundant router setup, see Creating a New Network Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information about the API syntax, see the API Reference at API Reference." -msgstr "" - diff --git a/docs/pot/troubleshooting-unable-to-deploy-vms.pot b/docs/pot/troubleshooting-unable-to-deploy-vms.pot deleted file mode 100644 index ebc0f213003..00000000000 --- a/docs/pot/troubleshooting-unable-to-deploy-vms.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Unable to deploy VMs from uploaded vSphere template" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When attempting to create a VM, the VM will not deploy." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the template was created by uploading an OVA file that was created using vSphere Client, it is possible the OVA contained an ISO image. If it does, the deployment of VMs from the template will fail." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remove the ISO and re-upload the template." -msgstr "" - diff --git a/docs/pot/troubleshooting-unable-to-power-on-vm.pot b/docs/pot/troubleshooting-unable-to-power-on-vm.pot deleted file mode 100644 index f2b5e71b73c..00000000000 --- a/docs/pot/troubleshooting-unable-to-power-on-vm.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Unable to power on virtual machine on VMware" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual machine does not power on. You might see errors like:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unable to open Swap File" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unable to access a file since it is locked" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Unable to access Virtual machine configuration" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A known issue on VMware machines. ESX hosts lock certain critical virtual machine files and file systems to prevent concurrent changes. Sometimes the files are not unlocked when the virtual machine is powered off. When a virtual machine attempts to power on, it can not access these critical files, and the virtual machine is unable to power on." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware Knowledge Base Article" -msgstr "" - diff --git a/docs/pot/troubleshooting-working-with-server-logs.pot b/docs/pot/troubleshooting-working-with-server-logs.pot deleted file mode 100644 index 367936c4881..00000000000 --- a/docs/pot/troubleshooting-working-with-server-logs.pot +++ /dev/null @@ -1,76 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with Server Logs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Management Server logs all web site, middle tier, and database activities for diagnostics purposes in /var/log/cloud/management/. The &PRODUCT; logs a variety of error messages. We recommend this command to find the problematic output in the Management Server log:." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When copying and pasting a command, be sure the command has pasted as a single line before executing. Some document viewers may introduce unwanted line breaks in copied text." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" grep -i -E 'exception|unable|fail|invalid|leak|warn|error' /var/log/cloud/management/management-server.log\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; processes requests with a Job ID. If you find an error in the logs and you are interested in debugging the issue you can grep for this job ID in the management server log. For example, suppose that you find the following ERROR message:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" 2010-10-04 13:49:32,595 ERROR [cloud.vm.UserVmManagerImpl] (Job-Executor-11:job-1076) Unable to find any host for [User|i-8-42-VM-untagged]\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Note that the job ID is 1076. You can track back the events relating to job 1076 with the following grep:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -" grep \"job-1076)\" management-server.log\n" -" " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; Agent Server logs its activities in /var/log/cloud/agent/." -msgstr "" - diff --git a/docs/pot/troubleshooting.pot b/docs/pot/troubleshooting.pot deleted file mode 100644 index b233f5a9f7d..00000000000 --- a/docs/pot/troubleshooting.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Troubleshooting" -msgstr "" - diff --git a/docs/pot/troublesht-dataloss-on-exp-primary-storage.pot b/docs/pot/troublesht-dataloss-on-exp-primary-storage.pot deleted file mode 100644 index 43272a472c3..00000000000 --- a/docs/pot/troublesht-dataloss-on-exp-primary-storage.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Data Loss on Exported Primary Storage" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Loss of existing data on primary storage which has been exposed as a Linux NFS server export on an iSCSI volume." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "It is possible that a client from outside the intended pool has mounted the storage. When this occurs, the LVM is wiped and all data in the volume is lost" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When setting up LUN exports, restrict the range of IP addresses that are allowed access by specifying a subnet mask. For example:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "echo “/export 192.168.1.0/24(rw,async,no_root_squash)†> /etc/exports" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Adjust the above command to suit your deployment needs." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "More Information" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See the export procedure in the \"Secondary Storage\" section of the &PRODUCT; Installation Guide" -msgstr "" - diff --git a/docs/pot/troublesht-mtn-mode-not-working-on-vCenter.pot b/docs/pot/troublesht-mtn-mode-not-working-on-vCenter.pot deleted file mode 100644 index e49786b6044..00000000000 --- a/docs/pot/troublesht-mtn-mode-not-working-on-vCenter.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Maintenance mode not working on vCenter" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Symptom" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Host was placed in maintenance mode, but still appears live in vCenter." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Cause" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; administrator UI was used to place the host in scheduled maintenance mode. This mode is separate from vCenter's maintenance mode." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Solution" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use vCenter to place the host in maintenance mode." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "More Information" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See " -msgstr "" - diff --git a/docs/pot/tuning.pot b/docs/pot/tuning.pot deleted file mode 100644 index 0c4544734bd..00000000000 --- a/docs/pot/tuning.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Tuning" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This section provides tips on how to improve the performance of your cloud." -msgstr "" - diff --git a/docs/pot/ui.pot b/docs/pot/ui.pot deleted file mode 100644 index 59f89ccf5f1..00000000000 --- a/docs/pot/ui.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "User Interface" -msgstr "" - diff --git a/docs/pot/upgrade-virtual-router-with-service-offering.pot b/docs/pot/upgrade-virtual-router-with-service-offering.pot deleted file mode 100644 index 51de0692e61..00000000000 --- a/docs/pot/upgrade-virtual-router-with-service-offering.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Upgrading a Virtual Router with System Service Offerings" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When &PRODUCT; creates a virtual router, it uses default settings which are defined in a default system service offering. See . All the virtual routers in a single guest network use the same system service offering. You can upgrade the capabilities of the virtual router by creating and applying a custom system service offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Define your custom system service offering. See . In System VM Type, choose Domain Router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Associate the system service offering with a network offering. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apply the network offering to the network where you want the virtual routers to use the new system service offering. If this is a new network, follow the steps in Adding an Additional Guest Network on page 66. To change the service offering for existing virtual routers, follow the steps in ." -msgstr "" - diff --git a/docs/pot/upload-existing-volume-to-vm.pot b/docs/pot/upload-existing-volume-to-vm.pot deleted file mode 100644 index 167f1e9e062..00000000000 --- a/docs/pot/upload-existing-volume-to-vm.pot +++ /dev/null @@ -1,150 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Uploading an Existing Volume to a Virtual Machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Existing data can be made accessible to a virtual machine. This is called uploading a volume to the VM. For example, this is useful to upload data from a local file system and attach it to a VM. Root administrators, domain administrators, and end users can all upload existing volumes to VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The upload is performed using HTTP. The uploaded volume is placed in the zone's secondary storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You cannot upload a volume if the preconfigured volume limit has already been reached. The default limit for the cloud is set in the global configuration parameter max.account.volumes, but administrators can also set per-domain limits that are different from the global default. See Setting Usage Limits" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To upload a volume:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) Create an MD5 hash (checksum) of the disk image file that you are going to upload. After uploading the data disk, &PRODUCT; will use this value to verify that no data corruption has occurred." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as an administrator or user" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Storage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Upload Volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name and Description. Any desired name and a brief description that can be shown in the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Availability Zone. Choose the zone where you want to store the volume. VMs running on hosts in this zone can attach the volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format. Choose one of the following to indicate the disk image format of the volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk Image Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "XenServer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VHD" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OVA" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "KVM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "QCOW2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL. The secure HTTP or HTTPS URL that &PRODUCT; can use to access your disk. The type of file at the URL must match the value chosen in Format. For example, if Format is VHD, the URL might look like the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "http://yourFileServerIP/userdata/myDataDisk.vhd" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MD5 checksum. (Optional) Use the hash that you created in step 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait until the status of the volume shows that the upload is complete. Click Instances - Volumes, find the name you specified in step 5, and make sure the status is Uploaded." -msgstr "" - diff --git a/docs/pot/upload-template.pot b/docs/pot/upload-template.pot deleted file mode 100644 index 9d6e56ce3de..00000000000 --- a/docs/pot/upload-template.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Uploading Templates" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "vSphere Templates and ISOs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are uploading a template that was created using vSphere Client, be sure the OVA file does not contain an ISO. If it does, the deployment of VMs from the template will fail." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Templates are uploaded based on a URL. HTTP is the supported access protocol. Templates are frequently large files. You can optionally gzip them to decrease upload times." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To upload a template:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create Template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name and Display Text. These will be shown in the UI, so choose something descriptive." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL. The Management Server will download the file from the specified URL, such as http://my.web.server/filename.vhd.gz." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Zone. Choose the zone where you want the template to be available, or All Zones to make it available throughout &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "OS Type: This helps &PRODUCT; and the hypervisor perform certain operations and make assumptions that improve the performance of the guest. Select one of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the operating system of the stopped VM is listed, choose it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the OS type of the stopped VM is not listed, choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should not choose an older version of the OS than the version in the image. For example, choosing CentOS 5.4 to support a CentOS 6.2 image will in general not work. In those cases you should choose Other." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Format. The format of the template upload file, such as VHD or OVA." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password Enabled. Choose Yes if your template has the &PRODUCT; password change script installed. See Adding Password Management to Your Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Extractable. Choose Yes if the template is available for extraction. If this option is selected, end users can download a full image of a template." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public. Choose Yes to make this template accessible to all users of this &PRODUCT; installation. The template will appear in the Community Templates list. See " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Featured. Choose Yes if you would like this template to be more prominent for users to select. The template will appear in the Featured Templates list. Only an administrator can make a template Featured." -msgstr "" - diff --git a/docs/pot/usage-record-format.pot b/docs/pot/usage-record-format.pot deleted file mode 100644 index 3142d0445f8..00000000000 --- a/docs/pot/usage-record-format.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Usage Record Format" -msgstr "" - diff --git a/docs/pot/usage-types.pot b/docs/pot/usage-types.pot deleted file mode 100644 index cb246352dc7..00000000000 --- a/docs/pot/usage-types.pot +++ /dev/null @@ -1,245 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Usage Types" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following table shows all usage types." -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Type ID" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Type Name" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Description" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "1" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "RUNNING_VM" -msgstr "" - -#. Tag: entry -#, no-c-format -msgid "Tracks the total running time of a VM per usage record period. If the VM is upgraded during the usage period, you will get a separate Usage Record for the new upgraded VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ALLOCATED_VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time the VM has been created to the time when it has been destroyed. This usage type is also useful in determining usage for specific templates such as Windows-based templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "3" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP_ADDRESS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the public IP address owned by the account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK_BYTES_SENT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total number of bytes sent by all the VMs for an account. Cloud.com does not currently track network traffic per VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK_BYTES_RECEIVED" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total number of bytes received by all the VMs for an account. Cloud.com does not currently track network traffic per VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "6" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VOLUME" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time a disk volume has been created to the time when it has been destroyed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "7" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "TEMPLATE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time a template (either created from a snapshot or uploaded to the cloud) has been created to the time it has been destroyed. The size of the template is also returned." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "8" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time an ISO has been uploaded to the time it has been removed from the cloud. The size of the ISO is also returned." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "9" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SNAPSHOT" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time from when a snapshot has been created to the time it have been destroyed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "11" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "LOAD_BALANCER_POLICY" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the total time a load balancer policy has been created to the time it has been removed. Cloud.com does not track whether a VM has been assigned to a policy." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "12" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "PORT_FORWARDING_RULE" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Tracks the time from when a port forwarding rule was created until the time it was removed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "13" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NETWORK_OFFERING" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The time from when a network offering was assigned to a VM until it is removed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "14" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN_USERS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The time from when a VPN user is created until it is removed." -msgstr "" - diff --git a/docs/pot/use-project-view.pot b/docs/pot/use-project-view.pot deleted file mode 100644 index e89b390b07c..00000000000 --- a/docs/pot/use-project-view.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using the Project View" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are a member of a project, you can use &PRODUCT;’s project view to see project members, resources consumed, and more. The project view shows only information related to one project. It is a useful way to filter out other information so you can concentrate on a project status and resources." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Project View." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The project dashboard appears, showing the project’s VMs, volumes, users, events, network settings, and more. From the dashboard, you can:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Accounts tab to view and manage project members. If you are the project administrator, you can add new members, remove members, or change the role of a member from user to admin. Only one member at a time can have the admin role, so if you set another user’s role to admin, your role will change to regular user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(If invitations are enabled) Click the Invitations tab to view and manage invitations that have been sent to new project members but not yet accepted. Pending invitations will remain in this list until the new member accepts, the invitation timeout is reached, or you cancel the invitation." -msgstr "" - diff --git a/docs/pot/user-data-and-meta-data.pot b/docs/pot/user-data-and-meta-data.pot deleted file mode 100644 index 239cd72a20e..00000000000 --- a/docs/pot/user-data-and-meta-data.pot +++ /dev/null @@ -1,100 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:11:59\n" -"PO-Revision-Date: 2013-02-02T20:11:59\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "User Data and Meta Data" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; provides API access to attach user data to a deployed VM. Deployed VMs also have access to instance metadata via the virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "User data can be accessed once the IP address of the virtual router is known. Once the IP address is known, use the following steps to access the user data:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following command to find the virtual router." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# cat /var/lib/dhclient/dhclient-eth0.leases | grep dhcp-server-identifier | tail -1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Access user data by running the following command using the result of the above command" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# curl http://10.1.1.1/latest/user-data" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Meta Data can be accessed similarly, using a URL of the form http://10.1.1.1/latest/meta-data/{metadata type}. (For backwards compatibility, the previous URL http://10.1.1.1/latest/{metadata type} is also supported.) For metadata type, use one of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "service-offering. A description of the VMs service offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "availability-zone. The Zone name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "local-ipv4. The guest IP of the VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "local-hostname. The hostname of the VM" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "public-ipv4. The first public IP for the router. (E.g. the first IP of eth2)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "public-hostname. This is the same as public-ipv4" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "instance-id. The instance name of the VM" -msgstr "" - diff --git a/docs/pot/user-services-overview.pot b/docs/pot/user-services-overview.pot deleted file mode 100644 index 7fc57020ed7..00000000000 --- a/docs/pot/user-services-overview.pot +++ /dev/null @@ -1,70 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "User Services Overview" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the physical and logical infrastructure of your cloud, and the &PRODUCT; software and servers, you also need a layer of user services so that people can actually make use of the cloud. This means not just a user UI, but a set of options and resources that users can choose from, such as templates for creating virtual machines, disk storage, and more. If you are running a commercial service, you will be keeping track of what services and resources users are consuming and charging them for that usage. Even if you do not charge anything for people to use your cloud – say, if the users are strictly internal to your organization, or just friends who are sharing your cloud – you can still keep track of what services they use and how much of them." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Service Offerings, Disk Offerings, Network Offerings, and Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A user creating a new instance can make a variety of choices about its characteristics and capabilities. &PRODUCT; provides several ways to present users with choices when creating a new instance:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Service Offerings, defined by the &PRODUCT; administrator, provide a choice of CPU speed, number of CPUs, RAM size, tags on the root disk, and other choices. See Creating a New Compute Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk Offerings, defined by the &PRODUCT; administrator, provide a choice of disk size for primary data storage. See Creating a New Disk Offering." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Offerings, defined by the &PRODUCT; administrator, describe the feature set that is available to end users from the virtual router or external networking devices on a given guest network. See Network Offerings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Templates, defined by the &PRODUCT; administrator or by any &PRODUCT; user, are the base OS images that the user can choose from when creating a new instance. For example, &PRODUCT; includes CentOS as a template. See Working with Templates." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to these choices that are provided for users, there is another type of service offering which is available only to the &PRODUCT; root administrator, and is used for configuring virtual infrastructure resources. For more information, see Upgrading a Virtual Router with System Service Offerings." -msgstr "" - diff --git a/docs/pot/using-multiple-guest-networks.pot b/docs/pot/using-multiple-guest-networks.pot deleted file mode 100644 index c3a6c564332..00000000000 --- a/docs/pot/using-multiple-guest-networks.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Multiple Guest Networks" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In zones that use advanced networking, additional networks for guest traffic may be added at any time after the initial installation. You can also customize the domain name associated with the network by specifying a DNS suffix for each network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VM's networks are defined at VM creation time. A VM cannot add or remove networks after it has been created, although the user can go into the guest and remove the IP address from the NIC on a particular network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each VM has just one default network. The virtual router's DHCP reply will set the guest's default gateway as that for the default network. Multiple non-default networks may be added to a guest in addition to the single, required default network. The administrator can control which networks are available as the default network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional networks can either be available to all accounts or be assigned to a specific account. Networks that are available to all accounts are zone-wide. Any user with access to the zone can create a VM with access to that network. These zone-wide networks provide little or no isolation between guests.Networks that are assigned to a specific account provide strong isolation." -msgstr "" - diff --git a/docs/pot/using-netscaler-load-balancers.pot b/docs/pot/using-netscaler-load-balancers.pot deleted file mode 100644 index 289490169e5..00000000000 --- a/docs/pot/using-netscaler-load-balancers.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Using a NetScaler Load Balancer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Citrix NetScaler is supported as an external network element for load balancing in zones that use advanced networking (also called advanced zones). Set up an external load balancer when you want to provide load balancing through means other than &PRODUCT;’s provided virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The NetScaler can be set up in direct (outside the firewall) mode. It must be added before any load balancing rules are deployed on guest VMs in the zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The functional behavior of the NetScaler with &PRODUCT; is the same as described in the &PRODUCT; documentation for using an F5 external load balancer. The only exception is that the F5 supports routing domains, and NetScaler does not. NetScaler can not yet be used as a firewall." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Citrix NetScaler comes in three varieties. The following table summarizes how these variants are treated in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NetScaler ADC Type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Description of Capabilities" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; Supported Features" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "MPX" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Physical appliance. Capable of deep packet inspection. Can act as application firewall and load balancer" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In advanced zones, load balancer functionality fully supported without limitation. In basic zones, static NAT, elastic IP (EIP), and elastic load balancing (ELB) are also provided" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPX" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual appliance. Can run as VM on XenServer, ESXi, and Hyper-V hypervisors. Same functionality as MPX" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported only on ESXi. Same functional support as for MPX. &PRODUCT; will treat VPX and MPX as the same device type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "SDX" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Physical appliance. Can create multiple fully isolated VPX instances on a single appliance to support multi-tenant usage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; will dynamically provision, configure, and manage the lifecycle of VPX instances on the SDX. Provisioned instances are added into &PRODUCT; automatically – no manual configuration by the administrator is required. Once a VPX instance is added into &PRODUCT;, it is treated the same as a VPX on an ESXi host." -msgstr "" - diff --git a/docs/pot/using-sshkeys.pot b/docs/pot/using-sshkeys.pot deleted file mode 100644 index d594e8a1238..00000000000 --- a/docs/pot/using-sshkeys.pot +++ /dev/null @@ -1,238 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using SSH Keys for Authentication" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the username and password authentication, &PRODUCT; supports using SSH keys to log in to the cloud infrastructure for additional security. You can use the createSSHKeyPair API to generate the SSH keys." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Because each cloud user has their own SSH key, one cloud user cannot log in to another cloud user's instances unless they share their SSH key files. Using a single SSH key pair, you can manage multiple instances." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating an Instance Template that Supports SSH Keys" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a instance template that supports SSH Keys." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Create a new instance by using the template provided by cloudstack." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information on creating a new instance, see" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download the cloudstack script from The SSH Key Gen Scriptto the instance you have created." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "wget http://downloads.sourceforge.net/project/cloudstack/SSH%20Key%20Gen%20Script/cloud-set-guest-sshkey.in?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fcloudstack%2Ffiles%2FSSH%2520Key%2520Gen%2520Script%2F&ts=1331225219&use_mirror=iweb" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the file to /etc/init.d." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "cp cloud-set-guest-sshkey.in /etc/init.d/" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Give the necessary permissions on the script:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "chmod +x /etc/init.d/cloud-set-guest-sshkey.in" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the script while starting up the operating system:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "chkconfig --add cloud-set-guest-sshkey.in" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Stop the instance." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating the SSH Keypair" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You must make a call to the createSSHKeyPair api method. You can either use the &PRODUCT; Python API library or the curl commands to make the call to the cloudstack api." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, make a call from the cloudstack server to create a SSH keypair called \"keypair-doc\" for the admin account in the root domain:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that you adjust these values to meet your needs. If you are making the API call from a different server, your URL/PORT will be different, and you will need to use the API keys." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Run the following curl command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "curl --globoff \"http://localhost:8096/?command=createSSHKeyPair&name=keypair-doc&account=admin&domainid=5163440e-c44b-42b5-9109-ad75cae8e8a2\"" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The output is something similar to what is given below:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><createsshkeypairresponse cloud-stack-version=\"3.0.0.20120228045507\"><keypair><name>keypair-doc</name><fingerprint>f6:77:39:d5:5e:77:02:22:6a:d8:7f:ce:ab:cd:b3:56</fingerprint><privatekey>-----BEGIN RSA PRIVATE KEY-----\n" -"MIICXQIBAAKBgQCSydmnQ67jP6lNoXdX3noZjQdrMAWNQZ7y5SrEu4wDxplvhYci\n" -"dXYBeZVwakDVsU2MLGl/K+wefwefwefwefwefJyKJaogMKn7BperPD6n1wIDAQAB\n" -"AoGAdXaJ7uyZKeRDoy6wA0UmF0kSPbMZCR+UTIHNkS/E0/4U+6lhMokmFSHtu\n" -"mfDZ1kGGDYhMsdytjDBztljawfawfeawefawfawfawQQDCjEsoRdgkduTy\n" -"QpbSGDIa11Jsc+XNDx2fgRinDsxXI/zJYXTKRhSl/LIPHBw/brW8vzxhOlSOrwm7\n" -"VvemkkgpAkEAwSeEw394LYZiEVv395ar9MLRVTVLwpo54jC4tsOxQCBlloocK\n" -"lYaocpk0yBqqOUSBawfIiDCuLXSdvBo1Xz5ICTM19vgvEp/+kMuECQBzm\n" -"nVo8b2Gvyagqt/KEQo8wzH2THghZ1qQ1QRhIeJG2aissEacF6bGB2oZ7Igim5L14\n" -"4KR7OeEToyCLC2k+02UCQQCrniSnWKtDVoVqeK/zbB32JhW3Wullv5p5zUEcd\n" -"KfEEuzcCUIxtJYTahJ1pvlFkQ8anpuxjSEDp8x/18bq3\n" -"-----END RSA PRIVATE KEY-----\n" -"</privatekey></keypair></createsshkeypairresponse>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Copy the key data into a file. The file looks like this:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "-----BEGIN RSA PRIVATE KEY-----\n" -"MIICXQIBAAKBgQCSydmnQ67jP6lNoXdX3noZjQdrMAWNQZ7y5SrEu4wDxplvhYci\n" -"dXYBeZVwakDVsU2MLGl/K+wefwefwefwefwefJyKJaogMKn7BperPD6n1wIDAQAB\n" -"AoGAdXaJ7uyZKeRDoy6wA0UmF0kSPbMZCR+UTIHNkS/E0/4U+6lhMokmFSHtu\n" -"mfDZ1kGGDYhMsdytjDBztljawfawfeawefawfawfawQQDCjEsoRdgkduTy\n" -"QpbSGDIa11Jsc+XNDx2fgRinDsxXI/zJYXTKRhSl/LIPHBw/brW8vzxhOlSOrwm7\n" -"VvemkkgpAkEAwSeEw394LYZiEVv395ar9MLRVTVLwpo54jC4tsOxQCBlloocK\n" -"lYaocpk0yBqqOUSBawfIiDCuLXSdvBo1Xz5ICTM19vgvEp/+kMuECQBzm\n" -"nVo8b2Gvyagqt/KEQo8wzH2THghZ1qQ1QRhIeJG2aissEacF6bGB2oZ7Igim5L14\n" -"4KR7OeEToyCLC2k+02UCQQCrniSnWKtDVoVqeK/zbB32JhW3Wullv5p5zUEcd\n" -"KfEEuzcCUIxtJYTahJ1pvlFkQ8anpuxjSEDp8x/18bq3\n" -"-----END RSA PRIVATE KEY-----" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Save the file." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Creating an Instance" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "After you save the SSH keypair file, you must create an instance by using the template that you created at . Ensure that you use the same SSH key name that you created at ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You cannot create the instance by using the GUI at this time and associate the instance with the newly created SSH keypair." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A sample curl command to create a new instance is:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "curl --globoff http://localhost:<port numbet>/?command=deployVirtualMachine\\&zoneId=1\\&serviceOfferingId=18727021-7556-4110-9322-d625b52e0813\\&templateId=e899c18a-ce13-4bbf-98a9-625c5026e0b5\\&securitygroupids=ff03f02f-9e3b-48f8-834d-91b822da40c5\\&account=admin\\&domainid=1\\&keypair=keypair-doc" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Substitute the template, service offering and security group IDs (if you are using the security group feature) that are in your cloud environment." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Logging In Using the SSH Keypair" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To test your SSH key generation is successful, check whether you can log in to the cloud setup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For exaple, from a Linux OS, run:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "ssh -i ~/.ssh/keypair-doc <ip address>" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The -i parameter tells the ssh client to use a ssh key found at ~/.ssh/keypair-doc." -msgstr "" - diff --git a/docs/pot/using-swift-for-secondary-storage.pot b/docs/pot/using-swift-for-secondary-storage.pot deleted file mode 100644 index 72863d80bd1..00000000000 --- a/docs/pot/using-swift-for-secondary-storage.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Swift for Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports OpenStack Object Storage ( Swift) for secondary storage. When using Swift, you configure Swift storage for the entire &PRODUCT;, then set up NFS secondary storage for each zone as usual. The NFS storage in each zone acts as a staging area through which all templates and other secondary storage data pass before being forwarded to Swift. The Swift storage acts as a cloud-wide resource, making templates and other data available to any zone in the cloud. There is no hierarchy in the Swift storage, just one Swift container per storage object. Any secondary storage in the whole cloud can pull a container from Swift at need. It is not necessary to copy templates and snapshots from one zone to another, as would be required when using zone NFS alone. Everything is available everywhere" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Swift storage must be set up before you add NFS secondary storage to zones. This is accomplished through some additional configuration steps on a fresh Management Server installation, before you add the first zone. The procedure is described in Adding a Zone in the Advanced Installation Guide." -msgstr "" - diff --git a/docs/pot/using-vpn-with-mac.pot b/docs/pot/using-vpn-with-mac.pot deleted file mode 100644 index d54cfb378df..00000000000 --- a/docs/pot/using-vpn-with-mac.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using VPN with Mac OS X" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Mac OS X, in Network Preferences - Advanced, make sure Send all traffic over VPN connection is not checked." -msgstr "" - diff --git a/docs/pot/using-vpn-with-windows.pot b/docs/pot/using-vpn-with-windows.pot deleted file mode 100644 index ccde5a2111e..00000000000 --- a/docs/pot/using-vpn-with-windows.pot +++ /dev/null @@ -1,95 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using VPN with Windows" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The procedure to use VPN varies by Windows version. Generally, the user must edit the VPN properties and make sure that the default route is not the VPN. The following steps are for Windows L2TP clients on Windows Vista. The commands should be similar for other Windows versions." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI and click on the source NAT IP for the account. The VPN tab should display the IPsec preshared key. Make a note of this and the source NAT IP. The UI also lists one or more users and their passwords. Choose one of these users, or, if none exists, add a user and password." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Windows box, go to Control Panel, then select Network and Sharing center. Click Setup a connection or network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the next dialog, select No, create a new connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the next dialog, select Use my Internet Connection (VPN)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the next dialog, enter the source NAT IP from step 1 and give the connection a name. Check Don't connect now." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the next dialog, enter the user name and password selected in step 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Create." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Go back to the Control Panel and click Network Connections to see the new connection. The connection is not active yet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Right-click the new connection and select Properties. In the Properties dialog, select the Networking tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In Type of VPN, choose L2TP IPsec VPN, then click IPsec settings. Select Use preshared key. Enter the preshared key from Step 1." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The connection is ready for activation. Go back to Control Panel -> Network Connections and double-click the created connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Enter the user name and password from Step 1." -msgstr "" - diff --git a/docs/pot/vcenter-maintenance-mode.pot b/docs/pot/vcenter-maintenance-mode.pot deleted file mode 100644 index 71536afea33..00000000000 --- a/docs/pot/vcenter-maintenance-mode.pot +++ /dev/null @@ -1,80 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "vCenter and Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enter maintenance mode on a vCenter host, both vCenter and &PRODUCT; must be used in concert. &PRODUCT; and vCenter have separate maintenance modes that work closely together." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Place the host into &PRODUCT;'s \"scheduled maintenance\" mode. This does not invoke the vCenter maintenance mode, but only causes VMs to be migrated off the host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the &PRODUCT; maintenance mode is requested, the host first moves into the Prepare for Maintenance state. In this state it cannot be the target of new guest VM starts. Then all VMs will be migrated off the server. Live migration will be used to move VMs off the host. This allows the guests to be migrated to other hosts with no disruption to the guests. After this migration is completed, the host will enter the Ready for Maintenance mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Wait for the \"Ready for Maintenance\" indicator to appear in the UI." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Now use vCenter to perform whatever actions are necessary to maintain the host. During this time, the host cannot be the target of new VM allocations." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the maintenance tasks are complete, take the host out of maintenance mode as follows:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "First use vCenter to exit the vCenter maintenance mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This makes the host ready for &PRODUCT; to reactivate it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Then use &PRODUCT;'s administrator UI to cancel the &PRODUCT; maintenance mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When the host comes back online, the VMs that were migrated off of it may be migrated back to it manually and new VMs can be added." -msgstr "" - diff --git a/docs/pot/verifying-source.pot b/docs/pot/verifying-source.pot deleted file mode 100644 index 9b2d586aacf..00000000000 --- a/docs/pot/verifying-source.pot +++ /dev/null @@ -1,115 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Verifying the downloaded release" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There are a number of mechanisms to check the authenticity and validity of a downloaded release." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Getting the KEYS" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To enable you to verify the GPG signature, you will need to download the KEYS file." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You next need to import those keys, which you can do by running the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# gpg --import KEYS" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "GPG" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; project provides a detached GPG signature of the release. To check the signature, run the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ gpg --verify apache-cloudstack-4.0.0-incubating-src.tar.bz2.asc" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the signature is valid you will see a line of output that contains 'Good signature'." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "MD5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the cryptographic signature, &PRODUCT; has an MD5 checksum that you can use to verify the download matches the release. You can verify this hash by executing the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ gpg --print-md MD5 apache-cloudstack-4.0.0-incubating-src.tar.bz2 | diff - apache-cloudstack-4.0.0-incubating-src.tar.bz2.md5" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If this successfully completes you should see no output. If there is any output from them, then there is a difference between the hash you generated locally and the hash that has been pulled from the server." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "SHA512" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In addition to the MD5 hash, the &PRODUCT; project provides a SHA512 cryptographic hash to aid in assurance of the validity of the downloaded release. You can verify this hash by executing the following command:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "$ gpg --print-md SHA512 apache-cloudstack-4.0.0-incubating-src.tar.bz2 | diff - apache-cloudstack-4.0.0-incubating-src.tar.bz2.sha" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If this command successfully completes you should see no output. If there is any output from them, then there is a difference between the hash you generated locally and the hash that has been pulled from the server." -msgstr "" - diff --git a/docs/pot/virtual-machine-usage-record-format.pot b/docs/pot/virtual-machine-usage-record-format.pot deleted file mode 100644 index 5a8763a481a..00000000000 --- a/docs/pot/virtual-machine-usage-record-format.pot +++ /dev/null @@ -1,110 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Virtual Machine Usage Record Format" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For running and allocated virtual machine usage, the following fields exist in a usage record:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "account – name of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "accountid – ID of the account" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "domainid – ID of the domain in which this account resides" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "zoneid – Zone where the usage occurred" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "description – A string describing what the usage record is tracking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usage – String representation of the usage, including the units of usage (e.g. 'Hrs' for VM running time)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usagetype – A number representing the usage type (see Usage Types)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "rawusage – A number representing the actual usage in hours" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "virtualMachineId – The ID of the virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "name – The name of the virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "offeringid – The ID of the service offering" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "templateid – The ID of the template or the ID of the parent template. The parent template value is present when the current template was created from a volume." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "usageid – Virtual machine" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "type – Hypervisor" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "startdate, enddate – The range of time for which the usage is aggregated; see Dates in the Usage Record" -msgstr "" - diff --git a/docs/pot/virtual-machines.pot b/docs/pot/virtual-machines.pot deleted file mode 100644 index 9991bc8ff61..00000000000 --- a/docs/pot/virtual-machines.pot +++ /dev/null @@ -1,30 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working With Virtual Machines" -msgstr "" - diff --git a/docs/pot/virtual-router.pot b/docs/pot/virtual-router.pot deleted file mode 100644 index d5748aef125..00000000000 --- a/docs/pot/virtual-router.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Virtual Router" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The virtual router is a type of System Virtual Machine. The virtual router is one of the most frequently used service providers in &PRODUCT;. The end user has no direct access to the virtual router. Users can ping the virtual router and take actions that affect it (such as setting up port forwarding), but users do not have SSH access into the virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "There is no mechanism for the administrator to log in to the virtual router. Virtual routers can be restarted by administrators, but this will interrupt public network access and other services for end users. A basic test in debugging networking issues is to attempt to ping the virtual router from a guest VM. Some of the characteristics of the virtual router are determined by its associated system service offering." -msgstr "" - diff --git a/docs/pot/vlan-allocation-eg.pot b/docs/pot/vlan-allocation-eg.pot deleted file mode 100644 index 52608ca0353..00000000000 --- a/docs/pot/vlan-allocation-eg.pot +++ /dev/null @@ -1,130 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VLAN Allocation Example" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLANs are required for public and guest traffic. The following is an example of a VLAN allocation scheme:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN IDs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Traffic type" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Scope" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "less than 500" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management traffic. Reserved for administrative purposes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; software can access this, hypervisors, system VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "500-599" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN carrying public traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "600-799" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLANs carrying guest traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; accounts. Account-specific VLAN is chosen from this pool." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "800-899" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; accounts. Account-specific VLAN chosen by &PRODUCT; admin to assign to that account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "900-999" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN carrying guest traffic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; accounts. Can be scoped by project, domain, or all accounts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "greater than 1000" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reserved for future use" -msgstr "" - diff --git a/docs/pot/vlan-provisioning.pot b/docs/pot/vlan-provisioning.pot deleted file mode 100644 index 406027d3248..00000000000 --- a/docs/pot/vlan-provisioning.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VLAN Provisioning" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; automatically creates and destroys interfaces bridged to VLANs on the hosts. In general the administrator does not need to manage this process." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; manages VLANs differently based on hypervisor type. For XenServer or KVM, the VLANs are created on only the hosts where they will be used and then they are destroyed when all guests that require them have been terminated or moved to another host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For vSphere the VLANs are provisioned on all hosts in the cluster even if there is no guest running on a particular Host that requires the VLAN. This allows the administrator to perform live migration and other functions in vCenter without having to create the VLAN on the destination Host. Additionally, the VLANs are not removed from the Hosts when they are no longer needed." -msgstr "" - diff --git a/docs/pot/vm-lifecycle.pot b/docs/pot/vm-lifecycle.pot deleted file mode 100644 index 0430f66714c..00000000000 --- a/docs/pot/vm-lifecycle.pot +++ /dev/null @@ -1,75 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VM Lifecycle" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual machines can be in the following states:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Once a virtual machine is destroyed, it cannot be recovered. All the resources used by the virtual machine will be reclaimed by the system. This includes the virtual machine’s IP address." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A stop will attempt to gracefully shut down the operating system, which typically involves terminating all the running applications. If the operation system cannot be stopped, it will be forcefully terminated. This has the same effect as pulling the power cord to a physical machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A reboot is a stop followed by a start." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; preserves the state of the virtual machine hard disk until the machine is destroyed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A running virtual machine may fail because of hardware or network issues. A failed virtual machine is in the down state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The system places the virtual machine into the down state if it does not receive the heartbeat from the hypervisor for three minutes." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The user can manually restart the virtual machine from the down state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The system will start the virtual machine from the down state automatically if the virtual machine is marked as HA-enabled." -msgstr "" - diff --git a/docs/pot/vm-storage-migration.pot b/docs/pot/vm-storage-migration.pot deleted file mode 100644 index 4e0ca1f5f1a..00000000000 --- a/docs/pot/vm-storage-migration.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VM Storage Migration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Supported in XenServer, KVM, and VMware." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This procedure is different from moving disk volumes from one VM to another. See Detaching and Moving Volumes ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can migrate a virtual machine’s root disk volume or any additional data disk volume from one storage pool to another in the same zone." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can use the storage migration feature to achieve some commonly desired administration goals, such as balancing the load on storage pools and increasing the reliability of virtual machines by moving them away from any storage pool that is experiencing issues." -msgstr "" - diff --git a/docs/pot/vmware-install.pot b/docs/pot/vmware-install.pot deleted file mode 100644 index c5c97349e5c..00000000000 --- a/docs/pot/vmware-install.pot +++ /dev/null @@ -1,618 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VMware vSphere Installation and Configuration" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to use the VMware vSphere hypervisor to run guest virtual machines, install vSphere on the host(s) in your cloud." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Preparation Checklist for VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For a smoother installation, gather the following information before you start:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Information listed in " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Information listed in " -msgstr "" - -#. Tag: title -#, no-c-format -msgid "vCenter Checklist" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You will need the following information about vCenter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Requirement" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Value" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Notes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter User" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This user must have admin privileges." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter User Password" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Password for the above user." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Datacenter Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name of the datacenter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Cluster Name" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Name of the cluster." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Networking Checklist for VMware" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You will need the following information about VLAN." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN Information" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESXi VLAN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN on which all your ESXi hypervisors reside." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESXI VLAN IP Address" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "IP Address Range in the ESXi VLAN. One address per Virtual Router is used from this range." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESXi VLAN IP Gateway" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESXi VLAN Netmask" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management Server VLAN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN on which the &PRODUCT; Management server is installed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public VLAN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN for the Public Network." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public VLAN Gateway" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public VLAN Netmask" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public VLAN IP Address Range" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Range of Public IP Addresses available for &PRODUCT; use. These addresses will be used for virtual router on &PRODUCT; to route private traffic to external networks." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN Range for Customer use" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A contiguous range of non-routable VLANs. One VLAN will be assigned for each customer." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "vSphere Installation Steps" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you haven't already, you'll need to download and purchase vSphere from the VMware Website (https://www.vmware.com/tryvmware/index.php?p=vmware-vsphere&lp=1) and install it by following the VMware vSphere Installation Guide." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Following installation, perform the following configuration, which are described in the next few sections:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Optional" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ESXi host setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NIC bonding" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure host physical networking, virtual switch, vCenter Management Network, and extended port range" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Multipath storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Prepare storage for iSCSI" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Configure clusters in vCenter and add hosts to them, or add hosts without clusters to vCenter" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "ESXi Host setup" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All ESXi hosts should enable CPU hardware virtualization support in BIOS. Please note hardware virtualization support is not enabled by default on most servers." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Physical Host Networking" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should have a plan for cabling the vSphere hosts. Proper network configuration is required before adding a vSphere host to &PRODUCT;. To configure an ESXi host, you can use vClient to add it as standalone host to vCenter first. Once you see the host appearing in the vCenter inventory tree, click the host node in the inventory tree, and navigate to the Configuration tab." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the host configuration tab, click the \"Hardware/Networking\" link to bring up the networking configuration page as above." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure Virtual Switch" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A default virtual switch vSwitch0 is created. &PRODUCT; requires all ESXi hosts in the cloud to use the same set of virtual switch names. If you change the default virtual switch name, you will need to configure one or more &PRODUCT; configuration variables as well." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Separating Traffic" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; allows you to use vCenter to configure three separate networks per ESXi host. These networks are identified by the name of the vSwitch they are connected to. The allowed networks for configuration are public (for traffic to/from the public internet), guest (for guest-guest traffic), and private (for management and usually storage traffic). You can use the default virtual switch for all three, or create one or two other vSwitches for those traffic types." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you want to separate traffic in this way you should first create and configure vSwitches in vCenter according to the vCenter instructions. Take note of the vSwitch names you have used for each traffic type. You will configure &PRODUCT; to use these vSwitches." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Increasing Ports" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "By default a virtual switch on ESXi hosts is created with 56 ports. We recommend setting it to 4088, the maximum number of ports allowed. To do that, click the \"Properties...\" link for virtual switch (note this is not the Properties link for Networking)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In vSwitch properties dialog, select the vSwitch and click Edit. You should see the following dialog:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In this dialog, you can change the number of switch ports. After you've done that, ESXi hosts are required to reboot in order for the setting to take effect." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure vCenter Management Network" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the vSwitch properties dialog box, you may see a vCenter management network. This same network will also be used as the &PRODUCT; management network. &PRODUCT; requires the vCenter management network to be configured properly. Select the management network item in the dialog, then click Edit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure the following values are set:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VLAN ID set to the desired ID" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vMotion enabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Management traffic enabled." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If the ESXi hosts have multiple VMKernel ports, and ESXi is not using the default value \"Management Network\" as the management network name, you must follow these guidelines to configure the management network port group so that &PRODUCT; can find it:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use one label for the management network port across all ESXi hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the &PRODUCT; UI, go to Configuration - Global Settings and set vmware.management.portgroup to the management network label from the ESXi hosts." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Extend Port Range for &PRODUCT; Console Proxy" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Applies only to VMware vSphere version 4.x)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You need to extend the range of firewall ports that the console proxy works with on the hosts. This is to enable the console proxy to work with VMware-based VMs. The default additional port range is 59000-60000. To extend the port range, log in to the VMware ESX service console on each host and run the following commands:" -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "\n" -"esxcfg-firewall -o 59000-60000,tcp,in,vncextras\n" -"esxcfg-firewall -o 59000-60000,tcp,out,vncextras\n" -" " -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Configure NIC Bonding for vSphere" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NIC bonding on vSphere hosts may be done according to the vSphere installation guide." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Storage Preparation for vSphere (iSCSI only)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use of iSCSI requires preparatory work in vCenter. You must add an iSCSI target and create an iSCSI datastore." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you are using NFS, skip this section." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Enable iSCSI initiator for ESXi hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In vCenter, go to hosts and Clusters/Configuration, and click Storage Adapters link. You will see:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select iSCSI software adapter and click Properties." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the Configure... button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Check Enabled to enable the initiator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click OK to save." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Add iSCSI target" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Under the properties dialog, add the iSCSI target info:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Repeat these steps for all ESXi hosts in the cluster." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Create an iSCSI datastore" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You should now create a VMFS datastore. Follow these steps to do so:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Select Home/Inventory/Datastores." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Right click on the datacenter node." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose Add Datastore... command." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Follow the wizard to create a iSCSI datastore." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This procedure should be done on one host in the cluster. It is not necessary to do this on all hosts." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Multipathing for vSphere (Optional)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Storage multipathing on vSphere nodes may be done according to the vSphere installation guide." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Add Hosts or Configure Clusters (vSphere)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Use vCenter to create a vCenter cluster and add your desired hosts to the cluster. You will later add the entire cluster to &PRODUCT;. (see )." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Applying Hotfixes to a VMware vSphere Host" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disconnect the VMware vSphere cluster from &PRODUCT;. It should remain disconnected long enough to apply the hotfix on the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as root." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Navigate to the VMware cluster, click Actions, and select Unmanage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the cluster status until it shows Unmanaged." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Perform the following on each of the ESXi hosts in the cluster:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Move each of the ESXi hosts in the cluster to maintenance mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Ensure that all the VMs are migrated to other hosts in that cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If there is only one host in that cluster, shutdown all the VMs and move the host into maintenance mode." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Apply the patch on the ESXi host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the host if prompted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Cancel the maintenance mode on the host." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Reconnect the cluster to &PRODUCT;:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Navigate to the VMware cluster, click Actions, and select Manage." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Watch the status to see that all the hosts come up. It might take several minutes for the hosts to come up." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Alternatively, verify the host state is properly synchronized and updated in the &PRODUCT; database." -msgstr "" - diff --git a/docs/pot/vmware-requirements.pot b/docs/pot/vmware-requirements.pot deleted file mode 100644 index 38aab24fb82..00000000000 --- a/docs/pot/vmware-requirements.pot +++ /dev/null @@ -1,210 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "System Requirements for vSphere Hosts" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Software requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere and vCenter, both version 4.1 or 5.0." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vSphere Standard is recommended. Note however that customers need to consider the CPU constraints in place with vSphere licensing. See http://www.vmware.com/files/pdf/vsphere_pricing.pdf and discuss with your VMware sales representative." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter Server Standard is recommended." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Be sure all the hotfixes provided by the hypervisor vendor are applied. Track the release of hypervisor patches through your hypervisor vendor's support channel, and apply patches as soon as possible after they are released. &PRODUCT; will not track or notify you of required hypervisor patches. It is essential that your hosts are completely up to date with the provided hypervisor patches. The hypervisor vendor is likely to refuse to support any system that is not up to date with patches." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Apply All Necessary Hotfixes" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The lack of up-do-date hotfixes can lead to data corruption and lost VMs." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Hardware requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The host must be certified as compatible with vSphere. See the VMware Hardware Compatibility Guide at http://www.vmware.com/resources/compatibility/search.php." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts must be 64-bit and must support HVM (Intel-VT or AMD-V enabled)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts within a cluster must be homogenous. That means the CPUs must be of the same type, count, and feature flags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "64-bit x86 CPU (more cores results in better performance)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Hardware virtualization support required" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "4 GB of memory" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "36 GB of local disk" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "At least 1 NIC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Statically allocated IP Address" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "vCenter Server requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Processor - 2 CPUs 2.0GHz or higher Intel or AMD x86 processors. Processor requirements may be higher if the database runs on the same machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Memory - 3GB RAM. RAM requirements may be higher if your database runs on the same machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Disk storage - 2GB. Disk requirements may be higher if your database runs on the same machine." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Microsoft SQL Server 2005 Express disk requirements. The bundled database requires up to 2GB free disk space to decompress the installation archive." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Networking - 1Gbit or 10Gbit." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information, see \"vCenter Server and the vSphere Client Hardware Requirements\" at http://pubs.vmware.com/vsp40/wwhelp/wwhimpl/js/html/wwhelp.htm#href=install/c_vc_hw.html." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Other requirements:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VMware vCenter Standard Edition 4.1 or 5.0 must be installed and available to manage the vSphere hosts." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "vCenter must be configured to use the standard port 443 so that it can communicate with the &PRODUCT; Management Server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You must re-install VMware ESXi if you are going to re-use a host from a previous install." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; requires VMware vSphere 4.1 or 5.0. VMware vSphere 4.0 is not supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All hosts must be 64-bit and must support HVM (Intel-VT or AMD-V enabled). All hosts within a cluster must be homogenous. That means the CPUs must be of the same type, count, and feature flags." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The &PRODUCT; management network must not be configured as a separate virtual network. The &PRODUCT; management network is the same as the vCenter management network, and will inherit its configuration. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; requires ESXi. ESX is not supported." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All resources used for &PRODUCT; must be used for &PRODUCT; only. &PRODUCT; cannot share instance of ESXi or storage with other management consoles. Do not share the same storage volumes that will be used by &PRODUCT; with a different set of ESXi servers that are not managed by &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Put all target ESXi hypervisors in a cluster in a separate Datacenter in vCenter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The cluster that will be managed by &PRODUCT; should not contain any VMs. Do not run the management server, vCenter or any other VMs on the cluster that is designated for &PRODUCT; use. Create a separate cluster for use of &PRODUCT; and make sure that they are no VMs in this cluster." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All the required VLANS must be trunked into all network switches that are connected to the ESXi hypervisor hosts. These would include the VLANS for Management, Storage, vMotion, and guest VLANs. The guest VLAN (used in Advanced Networking; see Network Setup) is a contiguous range of VLANs that will be managed by &PRODUCT;." -msgstr "" - diff --git a/docs/pot/vmware-topology-req.pot b/docs/pot/vmware-topology-req.pot deleted file mode 100644 index 05e0e55487b..00000000000 --- a/docs/pot/vmware-topology-req.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VMware Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Server and secondary storage VMs must be able to access vCenter and all ESXi hosts in the zone. To allow the necessary access through the firewall, keep port 443 open." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Servers communicate with VMware vCenter servers on port 443 (HTTPs)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Servers communicate with the System VMs on port 3922 (ssh) on the management traffic network." -msgstr "" - diff --git a/docs/pot/volume-deletion-garbage-collection.pot b/docs/pot/volume-deletion-garbage-collection.pot deleted file mode 100644 index 16ca680565e..00000000000 --- a/docs/pot/volume-deletion-garbage-collection.pot +++ /dev/null @@ -1,60 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Volume Deletion and Garbage Collection" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The deletion of a volume does not delete the snapshots that have been created from the volume" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a VM is destroyed, data disk volumes that are attached to the VM are not deleted." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Volumes are permanently destroyed using a garbage collection process. The global configuration variables expunge.delay and expunge.interval determine when the physical deletion of volumes will occur." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "expunge.delay: determines how old the volume must be before it is destroyed, in seconds" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "expunge.interval: determines how often to run the garbage collection check" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Administrators should adjust these values depending on site policies around data retention." -msgstr "" - diff --git a/docs/pot/volume-status.pot b/docs/pot/volume-status.pot deleted file mode 100644 index 08640053661..00000000000 --- a/docs/pot/volume-status.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Volume Status" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a snapshot operation is triggered by means of a recurring snapshot policy, a snapshot is skipped if a volume has remained inactive since its last snapshot was taken. A volume is considered to be inactive if it is either detached or attached to a VM that is not running. &PRODUCT; ensures that at least one snapshot is taken since the volume last became inactive." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a snapshot is taken manually, a snapshot is always created regardless of whether a volume has been active or not." -msgstr "" - diff --git a/docs/pot/vpc.pot b/docs/pot/vpc.pot deleted file mode 100644 index 9841ebca29e..00000000000 --- a/docs/pot/vpc.pot +++ /dev/null @@ -1,240 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "About Virtual Private Clouds" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; Virtual Private Cloud is a private, isolated part of &PRODUCT;. A VPC can have its own virtual network topology that resembles a traditional physical network. You can launch VMs in the virtual network that can have private addresses in the range of your choice, for example: 10.0.0.0/16. You can define network tiers within your VPC network range, which in turn enables you to group similar kinds of instances based on IP address range." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For example, if a VPC has the private range 10.0.0.0/16, its guest networks can have the network ranges 10.0.1.0/24, 10.0.2.0/24, 10.0.3.0/24, and so on." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Major Components of a VPC:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VPC is comprised of the following network components:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPC: A VPC acts as a container for multiple isolated networks that can communicate with each other via its virtual router." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Network Tiers: Each tier acts as an isolated network with its own VLANs and CIDR list, where you can place groups of resources, such as VMs. The tiers are segmented by means of VLANs. The NIC of each tier acts as its gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Virtual Router: A virtual router is automatically created and started when you create a VPC. The virtual router connect the tiers and direct traffic among the public gateway, the VPN gateways, and the NAT instances. For each tier, a corresponding NIC and IP exist in the virtual router. The virtual router provides DNS and DHCP services through its IP." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Public Gateway: The traffic to and from the Internet routed to the VPC through the public gateway. In a VPC, the public gateway is not exposed to the end user; therefore, static routes are not support for the public gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Private Gateway: All the traffic to and from a private network routed to the VPC through the private gateway. For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPN Gateway: The VPC side of a VPN connection." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site-to-Site VPN Connection: A hardware-based VPN connection between your VPC and your datacenter, home network, or co-location facility. For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Customer Gateway: The customer side of a VPN Connection. For more information, see ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "NAT Instance: An instance that provides Port Address Translation for instances to access the Internet via the public gateway. For more information, see ." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Network Architecture in a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In a VPC, the following four basic options of network architectures are present:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPC with a public gateway only" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPC with public and private gateways" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPC with public and private gateways and site-to-site VPN access" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "VPC with a private gateway only and site-to-site VPN access" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Connectivity Options for a VPC" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "You can connect your VPC to:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Internet through the public gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The corporate datacenter by using a site-to-site VPN connection through the VPN gateway." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Both the Internet and your corporate datacenter by using both the public gateway and a VPN gateway." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "VPC Network Considerations" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Consider the following before you create a VPC:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VPC, by default, is created in the enabled state." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A VPC can be created in Advance zone only, and can't belong to more than one zone at a time." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default number of VPCs an account can create is 20. However, you can change it by using the max.account.vpcs global parameter, which controls the maximum number of VPCs an account is allowed to create." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The default number of tiers an account can create within a VPC is 3. You can configure this number by using the vpc.max.networks parameter." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Each tier should have an unique CIDR in the VPC. Ensure that the tier's CIDR should be within the VPC CIDR range." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A tier belongs to only one VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "All network tiers inside the VPC should belong to the same account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "When a VPC is created, by default, a SourceNAT IP is allocated to it. The Source NAT IP is released only when the VPC is removed." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A public IP can be used for only one purpose at a time. If the IP is a sourceNAT, it cannot be used for StaticNAT or port forwarding." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The instances only have a private IP address that you provision. To communicate with the Internet, enable NAT to an instance that you launch in your VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Only new networks can be added to a VPC. The maximum number of networks per VPC is limited by the value you specify in the vpc.max.networks parameter. The default value is three." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The load balancing service can be supported by only one tier inside the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If an IP address is assigned to a tier:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "That IP can't be used by more than one tier at a time in the VPC. For example, if you have tiers A and B, and a public IP1, you can create a port forwarding rule by using the IP either for A or B, but not for both." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "That IP can't be used for StaticNAT, load balancing, or port forwarding rules for another guest network inside the VPC." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Remote access VPN is not supported in VPC networks." -msgstr "" - diff --git a/docs/pot/vpn.pot b/docs/pot/vpn.pot deleted file mode 100644 index d8c1dafdf7d..00000000000 --- a/docs/pot/vpn.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "VPN" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; account owners can create virtual private networks (VPN) to access their virtual machines. If the guest network is instantiated from a network offering that offers the Remote Access VPN service, the virtual router (based on the System VM) is used to provide the service. &PRODUCT; provides a L2TP-over-IPsec-based remote access VPN service to guest virtual networks. Since each network gets its own virtual router, VPNs are not shared across the networks. VPN clients native to Windows, Mac OS X and iOS can be used to connect to the guest networks. The account owner can create and manage users for their VPN. &PRODUCT; does not use its account database for this purpose but uses a separate table. The VPN user database is shared across all the VPNs created by the account owner. All VPN users get access to all VPNs created by the account owner." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Make sure that not all traffic goes through the VPN. That is, the route installed by the VPN should be only for the guest network and not for all traffic." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Road Warrior / Remote Access. Users want to be able to connect securely from a home or office to a private network in the cloud. Typically, the IP address of the connecting client is dynamic and cannot be preconfigured on the VPN server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Site to Site. In this scenario, two private subnets are connected over the public Internet with a secure VPN tunnel. The cloud user’s subnet (for example, an office network) is connected through a gateway to the network in the cloud. The address of the user’s gateway must be preconfigured on the VPN server in the cloud. Note that although L2TP-over-IPsec can be used to set up Site-to-Site VPNs, this is not the primary intent of this feature." -msgstr "" - diff --git a/docs/pot/whatis.pot b/docs/pot/whatis.pot deleted file mode 100644 index 2c3216d27e3..00000000000 --- a/docs/pot/whatis.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "What Is &PRODUCT;?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; is an open source software platform that pools computing resources to build public, private, and hybrid Infrastructure as a Service (IaaS) clouds. &PRODUCT; manages the network, storage, and compute nodes that make up a cloud infrastructure. Use &PRODUCT; to deploy, manage, and configure cloud computing environments." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Typical users are service providers and enterprises. With &PRODUCT;, you can:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up an on-demand, elastic cloud computing service. Service providers can sell self service virtual machine instances, storage volumes, and networking configurations over the Internet." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Set up an on-premise private cloud for use by employees. Rather than managing virtual machines in the same way as physical machines, with &PRODUCT; an enterprise can offer self-service virtual machines to users without involving IT departments." -msgstr "" - diff --git a/docs/pot/whats-in-this-adminguide.pot b/docs/pot/whats-in-this-adminguide.pot deleted file mode 100644 index ba033be316f..00000000000 --- a/docs/pot/whats-in-this-adminguide.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Who Should Read This" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If you have already installed &PRODUCT; or you want to learn more about the ongoing operation and maintenance of a &PRODUCT;-powered cloud, read this documentation. It will help you start using, configuring, and managing the ongoing operation of your cloud." -msgstr "" - diff --git a/docs/pot/whats-new.pot b/docs/pot/whats-new.pot deleted file mode 100644 index 50d7f5af404..00000000000 --- a/docs/pot/whats-new.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "What's New in the API?" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The following describes any new major features of each &PRODUCT; version as it applies to API usage." -msgstr "" - -#. Tag: title -#, no-c-format -msgid "What's New in the API for 4.0" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "What's New in the API for 3.0" -msgstr "" - diff --git a/docs/pot/who-should-read-installation.pot b/docs/pot/who-should-read-installation.pot deleted file mode 100644 index cb90df98e01..00000000000 --- a/docs/pot/who-should-read-installation.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Who Should Read This" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For those who have already gone through a design phase and planned a more sophisticated deployment, or those who are ready to start scaling up a trial installation. With the following procedures, you can start using the more powerful features of &PRODUCT;, such as advanced VLAN networking, high availability, additional network elements such as load balancers and firewalls, and support for multiple hypervisors including Citrix XenServer, KVM, and VMware vSphere." -msgstr "" - diff --git a/docs/pot/windows-installation.pot b/docs/pot/windows-installation.pot deleted file mode 100644 index 23fddc97e32..00000000000 --- a/docs/pot/windows-installation.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Windows OS Installation" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Download the installer, CloudInstanceManager.msi, from Download page and run the installer in the newly created Windows VM." -msgstr "" - diff --git a/docs/pot/work-with-usage.pot b/docs/pot/work-with-usage.pot deleted file mode 100644 index 996b6a0dd3c..00000000000 --- a/docs/pot/work-with-usage.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with Usage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Usage Server is an optional, separately-installed part of &PRODUCT; that provides aggregated usage records which you can use to create billing integration for &PRODUCT;. The Usage Server works by taking data from the events log and creating summary usage records that you can access using the listUsageRecords API call." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The usage records show the amount of resources, such as VM run time or template storage space, consumed by guest instances." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Usage Server runs at least once per day. It can be configured to run multiple times per day." -msgstr "" - diff --git a/docs/pot/working-with-hosts.pot b/docs/pot/working-with-hosts.pot deleted file mode 100644 index 0f12cec59e8..00000000000 --- a/docs/pot/working-with-hosts.pot +++ /dev/null @@ -1,40 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working With Hosts" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Adding Hosts" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Additional hosts can be added at any time to provide more capacity for guest VMs. For requirements and instructions, see ." -msgstr "" - diff --git a/docs/pot/working-with-iso.pot b/docs/pot/working-with-iso.pot deleted file mode 100644 index bf6af43cbc7..00000000000 --- a/docs/pot/working-with-iso.pot +++ /dev/null @@ -1,50 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with ISOs" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports ISOs and their attachment to guest VMs. An ISO is a read-only file that has an ISO/CD-ROM style file system. Users can upload their own ISOs and mount them on their guest VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISOs are uploaded based on a URL. HTTP is the supported protocol. Once the ISO is available via HTTP specify an upload URL such as http://my.web.server/filename.iso." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISOs may be public or private, like templates.ISOs are not hypervisor-specific. That is, a guest on vSphere can mount the exact same image that a guest on KVM can mount." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "ISO images may be stored in the system and made available with a privacy level similar to templates. ISO images are classified as either bootable or not bootable. A bootable ISO image is one that contains an OS image. &PRODUCT; allows a user to boot a guest VM off of an ISO image. Users can also attach ISO images to guest VMs. For example, this enables installing PV drivers into Windows. ISO images are not hypervisor-specific." -msgstr "" - diff --git a/docs/pot/working-with-snapshots.pot b/docs/pot/working-with-snapshots.pot deleted file mode 100644 index 8f051bd468e..00000000000 --- a/docs/pot/working-with-snapshots.pot +++ /dev/null @@ -1,55 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with Snapshots" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Supported for the following hypervisors: XenServer, VMware vSphere, and KVM)" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; supports snapshots of disk volumes. Snapshots are a point-in-time capture of virtual machine disks. Memory and CPU states are not captured." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Snapshots may be taken for volumes, including both root and data disks. The administrator places a limit on the number of stored snapshots per user. Users can create new volumes from the snapshot for recovery of particular files and they can create templates from snapshots to boot from a restored disk." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Users can create snapshots manually or by setting up automatic recurring snapshot policies. Users can also create disk volumes from snapshots, which may be attached to a VM like any other disk volume. Snapshots of both root disks and data disks are supported. However, &PRODUCT; does not currently support booting a VM from a recovered root disk. A disk recovered from snapshot of a root disk is treated as a regular data disk; the data on recovered disk can be accessed by attaching the disk to a VM." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A completed snapshot is copied from primary storage to secondary storage, where it is stored until deleted or purged by newer snapshot." -msgstr "" - diff --git a/docs/pot/working-with-system-vm.pot b/docs/pot/working-with-system-vm.pot deleted file mode 100644 index 8ef8d8148a2..00000000000 --- a/docs/pot/working-with-system-vm.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with System Virtual Machines" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; uses several types of system virtual machines to perform tasks in the cloud. In general &PRODUCT; manages these system VMs and creates, starts, and stops them as needed based on scale and immediate needs. However, the administrator should be aware of them and their roles to assist in debugging issues." -msgstr "" - diff --git a/docs/pot/working-with-templates.pot b/docs/pot/working-with-templates.pot deleted file mode 100644 index fc687a376ef..00000000000 --- a/docs/pot/working-with-templates.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working with Templates" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A template is a reusable configuration for virtual machines. When users launch VMs, they can choose from a list of templates in &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Specifically, a template is a virtual disk image that includes one of a variety of operating systems, optional additional software such as office applications, and settings such as access control to determine who can use the template. Each template is associated with a particular type of hypervisor, which is specified when the template is added to &PRODUCT;." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; ships with a default template. In order to present more choices to users, &PRODUCT; administrators and users can create templates and add them to &PRODUCT;." -msgstr "" - diff --git a/docs/pot/working-with-usage-data.pot b/docs/pot/working-with-usage-data.pot deleted file mode 100644 index 8fdafd8eeaf..00000000000 --- a/docs/pot/working-with-usage-data.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Working With Usage Data" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Usage Server provides aggregated usage records which you can use to create billing integration for the &PRODUCT; platform. The Usage Server works by taking data from the events log and creating summary usage records that you can access using the listUsageRecords API call." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The usage records show the amount of resources, such as VM run time or template storage space, consumed by guest instances. In the special case of bare metal instances, no template storage resources are consumed, but records showing zero usage are still included in the Usage Server's output." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Usage Server runs at least once per day. It can be configured to run multiple times per day. Its behavior is controlled by configuration settings as described in the &PRODUCT; Administration Guide." -msgstr "" - diff --git a/docs/pot/working-with-volumes.pot b/docs/pot/working-with-volumes.pot deleted file mode 100644 index fedfc58ba47..00000000000 --- a/docs/pot/working-with-volumes.pot +++ /dev/null @@ -1,45 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Using Swift for Secondary Storage" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "A volume provides storage to a guest VM. The volume can provide for a root disk or an additional data disk. &PRODUCT; supports additional volumes for guest VMs." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Volumes are created for a specific hypervisor type. A volume that has been attached to guest using one hypervisor type (e.g, XenServer) may not be attached to a guest that is using another hypervisor type (e.g. vSphere, KVM). This is because the different hypervisors use different disk image formats." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "&PRODUCT; defines a volume as a unit of storage available to a guest VM. Volumes are either root disks or data disks. The root disk has \"/\" in the file system and is usually the boot device. Data disks provide for additional storage (e.g. As \"/opt\" or \"D:\"). Every guest VM has a root disk, and VMs can also optionally have a data disk. End users can mount multiple data disks to guest VMs. Users choose data disks from the disk offerings created by administrators. The user can create a template from a volume as well; this is the standard procedure for private template creation. Volumes are hypervisor-specific: a volume from one hypervisor type may not be used on a guest of another hypervisor type." -msgstr "" - diff --git a/docs/pot/xenserver-maintenance-mode.pot b/docs/pot/xenserver-maintenance-mode.pot deleted file mode 100644 index a5d4aabbd09..00000000000 --- a/docs/pot/xenserver-maintenance-mode.pot +++ /dev/null @@ -1,85 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "XenServer and Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For XenServer, you can take a server offline temporarily by using the Maintenance Mode feature in XenCenter. When you place a server into Maintenance Mode, all running VMs are automatically migrated from it to another host in the same pool. If the server is the pool master, a new master will also be selected for the pool. While a server is Maintenance Mode, you cannot create or start any VMs on it." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To place a server in Maintenance Mode:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the Resources pane, select the server, then do one of the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Right-click, then click Enter Maintenance Mode on the shortcut menu." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Server menu, click Enter Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Enter Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The server's status in the Resources pane shows when all running VMs have been successfully migrated off the server." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "To take a server out of Maintenance Mode:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Right-click, then click Exit Maintenance Mode on the shortcut menu." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On the Server menu, click Exit Maintenance Mode" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Exit Maintenance Mode" -msgstr "" - diff --git a/docs/pot/xenserver-topology-req.pot b/docs/pot/xenserver-topology-req.pot deleted file mode 100644 index 765427bb680..00000000000 --- a/docs/pot/xenserver-topology-req.pot +++ /dev/null @@ -1,35 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "XenServer Topology Requirements" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The Management Servers communicate with XenServer hosts on ports 22 (ssh), 80 (HTTP), and 443 (HTTPs)." -msgstr "" - diff --git a/docs/pot/zone-add.pot b/docs/pot/zone-add.pot deleted file mode 100644 index f9944a306ba..00000000000 --- a/docs/pot/zone-add.pot +++ /dev/null @@ -1,155 +0,0 @@ -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2013-02-02T20:12:00\n" -"PO-Revision-Date: 2013-02-02T20:12:00\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Adding a Zone" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "These steps assume you have already logged in to the &PRODUCT; UI. See ." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) If you are going to use Swift for cloud-wide secondary storage, you need to add it before you add zones." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Log in to the &PRODUCT; UI as administrator." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "If this is your first time visiting the UI, you will see the guided tour splash screen. Choose “Experienced user.†The Dashboard appears." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation bar, click Global Settings." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the search box, type swift.enable and click the search button." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click the edit button and set swift.enable to true. edit-icon.png: button to modify data " -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Restart the Management Server." -msgstr "" - -#. Tag: programlisting -#, no-c-format -msgid "# service cloud-management restart" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Refresh the &PRODUCT; UI browser tab and log back in." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "In the left navigation, choose Infrastructure." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "On Zones, click View More." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "(Optional) If you are using Swift storage, click Enable Swift. Provide the following:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "URL. The Swift URL." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Account. The Swift account." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Username. The Swift account’s username." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Key. The Swift key." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Click Add Zone. The zone creation wizard will appear." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Choose one of the following network types:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Basic. For AWS-style networking. Provides a single network where each VM instance is assigned an IP directly from the network. Guest isolation can be provided through layer-3 means such as security groups (IP address source filtering)." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "Advanced. For more sophisticated network topologies. This network model provides the most flexibility in defining guest networks and providing custom network offerings such as firewall, VPN, or load balancer support." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "For more information about the network types, see Network Setup." -msgstr "" - -#. Tag: para -#, no-c-format -msgid "The rest of the steps differ depending on whether you chose Basic or Advanced. Continue with the steps that apply to you:" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - diff --git a/docs/publican-adminguide.cfg b/docs/publican-adminguide.cfg deleted file mode 100644 index 7a8a203a0f6..00000000000 --- a/docs/publican-adminguide.cfg +++ /dev/null @@ -1,30 +0,0 @@ -# Publican configuration file for CloudStack Developer's Guide -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: Admin_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 -condition: admin - - - diff --git a/docs/publican-all.cfg b/docs/publican-all.cfg deleted file mode 100644 index 897f92b4caa..00000000000 --- a/docs/publican-all.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Publican configuration file for CloudStack Complete Documentation Set -# Contains all technical docs except release notes -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: cloudstack -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 -condition: install diff --git a/docs/publican-cloudstack/LICENSE b/docs/publican-cloudstack/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/docs/publican-cloudstack/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/docs/publican-cloudstack/NOTICE b/docs/publican-cloudstack/NOTICE deleted file mode 100644 index c7720bf1819..00000000000 --- a/docs/publican-cloudstack/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache CloudStack -Copyright 2012 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/docs/publican-cloudstack/defaults.cfg b/docs/publican-cloudstack/defaults.cfg deleted file mode 100644 index b288b33af47..00000000000 --- a/docs/publican-cloudstack/defaults.cfg +++ /dev/null @@ -1,21 +0,0 @@ -# Config::Simple 4.59 -# Thu Aug 11 14:07:41 2011 -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -doc_url: "http://cloudstack.apache.org/docs" -prod_url: "http://cloudstack.apache.org" - diff --git a/docs/publican-cloudstack/en-US/Feedback.xml b/docs/publican-cloudstack/en-US/Feedback.xml deleted file mode 100644 index 8aa5f67dc2b..00000000000 --- a/docs/publican-cloudstack/en-US/Feedback.xml +++ /dev/null @@ -1,44 +0,0 @@ - - - - -
- Submitting Feedback and Getting Help - - feedback1 - contact information for this brand - - - If you find a typographical error in this manual, or if you have thought of a way to make this manual better, we would love to hear from you! Please submit a bug: https://issues.apache.org/jira/browse/CLOUDSTACK against the component Doc. - If you have a suggestion for improving the documentation, try to be as specific as possible when describing it. If you have found an error, please include the section number and some of the surrounding text so we can find it easily. - Better yet, feel free to submit a patch if you would like to enhance the documentation. Our documentation is, along with the rest of the &PRODUCT; source code, kept in the project's git repository. - The most efficient way to get help with &PRODUCT; is to ask on the mailing lists. - The Apache CloudStack project has mailing lists for users and developers. These are the - official channels of communication for the project and are the best way to get answers about - using and contributing to CloudStack. It's a good idea to subscribe to the - users@cloudstack.apache.org mailing list if you've deployed or are deploying - CloudStack into production, and even for test deployments. - The CloudStack developer's mailing list (dev@cloudstack.apache.org) is for discussions - about CloudStack development, and is the best list for discussing possible bugs in CloudStack. - Anyone contributing to CloudStack should be on this mailing list. - To posts to the lists, you'll need to be subscribed. See the - CloudStack Web site - for instructions. -
diff --git a/docs/publican-cloudstack/en-US/Legal_Notice.xml b/docs/publican-cloudstack/en-US/Legal_Notice.xml deleted file mode 100644 index 5e30efb07c0..00000000000 --- a/docs/publican-cloudstack/en-US/Legal_Notice.xml +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - - - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - - http://www.apache.org/licenses/LICENSE-2.0 - - - Unless required by applicable law or agreed to in writing, - software distributed under the License is distributed on an - "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - KIND, either express or implied. See the License for the - specific language governing permissions and limitations - under the License. - - - - Apache CloudStack is an effort undergoing incubation at The Apache Software Foundation (ASF). - - - Incubation is required of all newly accepted projects until a further review - indicates that the infrastructure, communications, and decision making - process have stabilized in a manner consistent with other successful ASF - projects. While incubation status is not necessarily a reflection of the - completeness or stability of the code, it does indicate that the project - has yet to be fully endorsed by the ASF. - - - - CloudStack® is a registered trademark of the Apache Software Foundation. - - - - Apache CloudStack, the CloudStack word design, the Apache CloudStack word design, and the cloud monkey logo are trademarks of the - Apache Software Foundation. - - - - diff --git a/docs/publican-cloudstack/en-US/images/1.png b/docs/publican-cloudstack/en-US/images/1.png deleted file mode 100644 index 098e7dfd698..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/1.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/10.png b/docs/publican-cloudstack/en-US/images/10.png deleted file mode 100644 index 1d2ab13127b..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/10.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/11.png b/docs/publican-cloudstack/en-US/images/11.png deleted file mode 100644 index 897afb30257..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/11.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/12.png b/docs/publican-cloudstack/en-US/images/12.png deleted file mode 100644 index b2aa1bddc4f..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/12.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/13.png b/docs/publican-cloudstack/en-US/images/13.png deleted file mode 100644 index c6e0022be06..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/13.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/14.png b/docs/publican-cloudstack/en-US/images/14.png deleted file mode 100644 index 93833719cff..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/14.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/15.png b/docs/publican-cloudstack/en-US/images/15.png deleted file mode 100644 index e50bcb95031..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/15.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/16.png b/docs/publican-cloudstack/en-US/images/16.png deleted file mode 100644 index ff3705d120a..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/16.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/17.png b/docs/publican-cloudstack/en-US/images/17.png deleted file mode 100644 index 594964530b0..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/17.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/18.png b/docs/publican-cloudstack/en-US/images/18.png deleted file mode 100644 index 7e8dbb464e3..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/18.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/19.png b/docs/publican-cloudstack/en-US/images/19.png deleted file mode 100644 index eb43c6fdca1..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/19.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/2.png b/docs/publican-cloudstack/en-US/images/2.png deleted file mode 100644 index e550c21cb11..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/2.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/20.png b/docs/publican-cloudstack/en-US/images/20.png deleted file mode 100644 index 692badc3cd1..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/20.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/21.png b/docs/publican-cloudstack/en-US/images/21.png deleted file mode 100644 index 231735e5aa9..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/21.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/22.png b/docs/publican-cloudstack/en-US/images/22.png deleted file mode 100644 index a77ea0faa5b..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/22.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/23.png b/docs/publican-cloudstack/en-US/images/23.png deleted file mode 100644 index 1802579ef90..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/23.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/24.png b/docs/publican-cloudstack/en-US/images/24.png deleted file mode 100644 index 16e96e7e68b..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/24.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/25.png b/docs/publican-cloudstack/en-US/images/25.png deleted file mode 100644 index 7bfac576852..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/25.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/26.png b/docs/publican-cloudstack/en-US/images/26.png deleted file mode 100644 index 08de0655857..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/26.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/27.png b/docs/publican-cloudstack/en-US/images/27.png deleted file mode 100644 index ac73b8eebd5..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/27.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/28.png b/docs/publican-cloudstack/en-US/images/28.png deleted file mode 100644 index dddaea9e4f8..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/28.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/29.png b/docs/publican-cloudstack/en-US/images/29.png deleted file mode 100644 index f901971df43..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/29.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/3.png b/docs/publican-cloudstack/en-US/images/3.png deleted file mode 100644 index e78d18cb51d..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/3.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/4.png b/docs/publican-cloudstack/en-US/images/4.png deleted file mode 100644 index 525915d6690..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/4.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/5.png b/docs/publican-cloudstack/en-US/images/5.png deleted file mode 100644 index 10ec8807650..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/5.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/6.png b/docs/publican-cloudstack/en-US/images/6.png deleted file mode 100644 index 60c626b3eda..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/6.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/7.png b/docs/publican-cloudstack/en-US/images/7.png deleted file mode 100644 index 7184e2c0d4c..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/7.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/8.png b/docs/publican-cloudstack/en-US/images/8.png deleted file mode 100644 index d951846e9fb..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/8.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/9.png b/docs/publican-cloudstack/en-US/images/9.png deleted file mode 100644 index 5aef6797585..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/9.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/dot.png b/docs/publican-cloudstack/en-US/images/dot.png deleted file mode 100644 index 079add95ded..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/dot.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/dot2.png b/docs/publican-cloudstack/en-US/images/dot2.png deleted file mode 100644 index d9262efbf62..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/dot2.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/h1-bg.png b/docs/publican-cloudstack/en-US/images/h1-bg.png deleted file mode 100644 index a2aad24b329..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/h1-bg.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/image_left.png b/docs/publican-cloudstack/en-US/images/image_left.png deleted file mode 100644 index 007f7b3578c..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/image_left.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/image_right.png b/docs/publican-cloudstack/en-US/images/image_right.png deleted file mode 100644 index 5b67443c2cc..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/image_right.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/important.png b/docs/publican-cloudstack/en-US/images/important.png deleted file mode 100644 index 969562b7bc7..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/important.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/note.png b/docs/publican-cloudstack/en-US/images/note.png deleted file mode 100644 index d04775d9905..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/note.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/stock-go-back.png b/docs/publican-cloudstack/en-US/images/stock-go-back.png deleted file mode 100644 index 00850b21b23..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/stock-go-back.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/stock-go-forward.png b/docs/publican-cloudstack/en-US/images/stock-go-forward.png deleted file mode 100644 index cc2797a4686..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/stock-go-forward.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/stock-go-up.png b/docs/publican-cloudstack/en-US/images/stock-go-up.png deleted file mode 100644 index 1ebf2799c35..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/stock-go-up.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/stock-home.png b/docs/publican-cloudstack/en-US/images/stock-home.png deleted file mode 100644 index 3f0c1906343..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/stock-home.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/title_logo.png b/docs/publican-cloudstack/en-US/images/title_logo.png deleted file mode 100644 index f0b1d20c677..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/title_logo.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/title_logo.svg b/docs/publican-cloudstack/en-US/images/title_logo.svg deleted file mode 100644 index 1d2913bf50c..00000000000 --- a/docs/publican-cloudstack/en-US/images/title_logo.svg +++ /dev/null @@ -1,370 +0,0 @@ - - - - - -image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -open source cloud computing - - - - - - - -™ - - - \ No newline at end of file diff --git a/docs/publican-cloudstack/en-US/images/warning.png b/docs/publican-cloudstack/en-US/images/warning.png deleted file mode 100644 index 94b69d1ff1f..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/warning.png and /dev/null differ diff --git a/docs/publican-cloudstack/en-US/images/watermark-draft.png b/docs/publican-cloudstack/en-US/images/watermark-draft.png deleted file mode 100644 index 0ead5af8bb8..00000000000 Binary files a/docs/publican-cloudstack/en-US/images/watermark-draft.png and /dev/null differ diff --git a/docs/publican-cloudstack/gen_rpm.sh b/docs/publican-cloudstack/gen_rpm.sh deleted file mode 100755 index 6c87141dbf4..00000000000 --- a/docs/publican-cloudstack/gen_rpm.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -cd ../ -tar -czf ~/rpmbuild/SOURCES/publican-cloudstack.tgz publican-cloudstack -cp -a publican-cloudstack/publican-cloudstack.spec ~/rpmbuild/SPECS/ -rpmbuild -ba ~/rpmbuild/SPECS/publican-cloudstack.spec diff --git a/docs/publican-cloudstack/publican-cloudstack.spec b/docs/publican-cloudstack/publican-cloudstack.spec deleted file mode 100644 index 9ec15d0254f..00000000000 --- a/docs/publican-cloudstack/publican-cloudstack.spec +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -%define brand cloudstack - -Name: publican-cloudstack -Summary: Common documentation files for Apache %{brand} -Version: 0.5 -Release: 1%{?dist} -License: ASLv2 -Group: Applications/Text -Buildroot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) -Buildarch: noarch -Source: %{name}.tgz -Requires: publican >= 1.99 -BuildRequires: publican >= 1.99 -URL: http://cloudstack.org - -%description -This package provides common files and templates needed to build documentation -for Apache %{brand} with publican. - -%prep -%setup -qn %{name} - -%build -publican build --formats=xml --langs=en-US --publish - -%install -rm -rf $RPM_BUILD_ROOT -mkdir -p -m755 $RPM_BUILD_ROOT%{_datadir}/publican/Common_Content -publican install_brand --path=$RPM_BUILD_ROOT%{_datadir}/publican/Common_Content - -%clean -rm -rf $RPM_BUILD_ROOT - -%files -%defattr(-,root,root,-) -%doc LICENSE -%doc NOTICE -%{_datadir}/publican/Common_Content/%{brand} - -%changelog -* Tue Feb 29 2013 David Nalley 0.5-1 -- adding trademark information - -* Sun Sep 22 2012 David Nalley 0.4-1 -- added 'Apache' to the description -- moved the issue tracker url to the ASF jira instance - -* Tue Aug 14 2012 Joe Brockmeier 0.3-1 -- Removed unneeded common files distributed with Publican -* Tue Jun 26 2012 David Nalley 0.2-1 -- updated for ASF move -* Sat Aug 11 2011 David Nalley 0.1-1 -- Created Brand diff --git a/docs/publican-cloudstack/publican.cfg b/docs/publican-cloudstack/publican.cfg deleted file mode 100644 index a87c8a78d43..00000000000 --- a/docs/publican-cloudstack/publican.cfg +++ /dev/null @@ -1,24 +0,0 @@ -# Config::Simple 4.59 -# Thu Aug 11 14:07:41 2011 -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -version: "0.1" -xml_lang: "en-US" -release: 0 -type: brand -brand: cloudstack - diff --git a/docs/publican-devguide.cfg b/docs/publican-devguide.cfg deleted file mode 100644 index a8e28aea37b..00000000000 --- a/docs/publican-devguide.cfg +++ /dev/null @@ -1,29 +0,0 @@ -# Publican configuration file for CloudStack Developer's Guide -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: Developers_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 - - - diff --git a/docs/publican-gsoc-2013.cfg b/docs/publican-gsoc-2013.cfg deleted file mode 100644 index 35dc517be12..00000000000 --- a/docs/publican-gsoc-2013.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# Publican configuration file for CloudStack Complete Documentation Set -# Contains all technical docs except release notes -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: CloudStack_GSoC_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 diff --git a/docs/publican-installation.cfg b/docs/publican-installation.cfg deleted file mode 100644 index e94044d4b60..00000000000 --- a/docs/publican-installation.cfg +++ /dev/null @@ -1,30 +0,0 @@ -# Publican configuration file for CloudStack Installation Guide -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: Installation_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 -condition: install - - - diff --git a/docs/publican-plugin-midonet.cfg b/docs/publican-plugin-midonet.cfg deleted file mode 100644 index 6558d99e897..00000000000 --- a/docs/publican-plugin-midonet.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Publican configuration file for CloudStack Complete Documentation Set -# Contains all technical docs except release notes -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: MidoNet_Plugin_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 -condition: install diff --git a/docs/publican-plugin-niciranvp.cfg b/docs/publican-plugin-niciranvp.cfg deleted file mode 100644 index 2e3696dc49e..00000000000 --- a/docs/publican-plugin-niciranvp.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Publican configuration file for CloudStack Complete Documentation Set -# Contains all technical docs except release notes -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: CloudStack_Nicira_NVP_Guide -brand: cloudstack -chunk_first: 1 -chunk_section_depth: 1 -condition: install diff --git a/docs/publican-release-notes.cfg b/docs/publican-release-notes.cfg deleted file mode 100644 index b6af40663bc..00000000000 --- a/docs/publican-release-notes.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# Publican configuration file for CloudStack 4.0 Release Notes -# Config::Simple 4.58 -# Tue May 29 00:57:27 2012 -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -xml_lang: en-US -type: Book -docname: Release_Notes -brand: cloudstack -chunk_first: 0 -chunk_section_depth: 0 -toc_section_depth: 1 diff --git a/docs/qig/en-US/Author_Group.xml b/docs/qig/en-US/Author_Group.xml deleted file mode 100644 index 432ef6fd3ac..00000000000 --- a/docs/qig/en-US/Author_Group.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - Apache - CloudStack - - - diff --git a/docs/qig/en-US/Book_Info.xml b/docs/qig/en-US/Book_Info.xml deleted file mode 100644 index 98cbcb49327..00000000000 --- a/docs/qig/en-US/Book_Info.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Quick Install Guide - Prescriptive instructions for deploying Apache CloudStack - Apache CloudStack - 4.2.0 - 0 - 0 - - - This guide is designed to provide a strict environment to guarantee - a higher degree of success in initial deployments of Apache CloudStack. - All of the elements of the environment will be provided to you. - Apache CloudStack is capable of much more complex configurations, - but they are beyond the scope of this document. - - - - - - - - - - - - - diff --git a/docs/qig/en-US/Chapter.xml b/docs/qig/en-US/Chapter.xml deleted file mode 100644 index 4adf63c207a..00000000000 --- a/docs/qig/en-US/Chapter.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Test Chapter - - This is a test paragraph - -
- Test Section 1 - - This is a test paragraph in a section - -
- -
- Test Section 2 - - This is a test paragraph in Section 2 - - - - listitem text - - - - -
- -
- diff --git a/docs/qig/en-US/Environment.xml b/docs/qig/en-US/Environment.xml deleted file mode 100644 index e48b4051bc3..00000000000 --- a/docs/qig/en-US/Environment.xml +++ /dev/null @@ -1,258 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Environment - - Before you begin, you need to prepare the environment before you install CloudStack. - We will go over the steps to prepare now. - -
- Operating System - - Using the CentOS 6.4 x86_64 minimal install ISO, you'll need to install CentOS - on your hardware. The defaults will generally be acceptable for this installation. - - - Once this installation is complete, you'll want to connect to your freshly - installed machine via SSH as the root user. Note that you should not allow root - logins in a production environment, so be sure to turn off remote logins once you - have finished the installation and configuration. - -
- Configuring the network - - By default the network will not come up on your hardware and you - will need to configure it to work in your environment. Since we - specified that there will be no DHCP server in this environment - we will be manually configuring your network interface. We will - assume, for the purposes of this exercise, that eth0 is the only network - interface that will be connected and used. - - - Connecting via the console you should login as root. Check the - file /etc/sysconfig/network-scripts/ifcfg-eth0, - it will look like this by default: - -DEVICE="eth0" -HWADDR="52:54:00:B9:A6:C0" -NM_CONTROLLED="yes" -ONBOOT="no" - - - - Unfortunately, this configuration will not permit you to connect to the network, - and is also unsuitable for our purposes with CloudStack. We want to - configure that file so that it specifies the IP address, netmask, etc., as shown - in the following example: - - - Hardware Addresses - You should not use the hardware address (aka MAC address) from our example - for your configuration. It is network interface specific, so you should keep the - address already provided in the HWADDR directive. - - - -DEVICE=eth0 -HWADDR=52:54:00:B9:A6:C0 -NM_CONTROLLED=no -ONBOOT=yes -BOOTPROTO=none -IPADDR=172.16.10.2 -NETMASK=255.255.255.0 -GATEWAY=172.16.10.1 -DNS1=8.8.8.8 -DNS2=8.8.4.4 - - - IP Addressing - Throughout this document we are assuming that you will - have a /24 network for your CloudStack implementation. This can be any - RFC 1918 network. However, we are assuming that you will match the - machine address that we are using. Thus we may use - 172.16.10.2 and because - you might be using the 192.168.55.0/24 network you would use - 192.168.55.2 - - - Now that we have the configuration files properly set up, we need to run a - few commands to start up the network - # chkconfig network on - # service network start -
-
- Hostname - - Cloudstack requires that the hostname be properly set. If you used the default - options in the installation, then your hostname is currently set to - localhost.localdomain. To test this we will run: - # hostname --fqdn - At this point it will likely return: - localhost - To rectify this situation - we'll set the hostname by editing the - /etc/hosts file so that it follows a similar format to this example: -127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 -::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 -172.16.10.2 srvr1.cloud.priv - - - After you've modified that file, go ahead and restart the network using: -# service network restart - Now recheck with the hostname --fqdn command and ensure that it returns - a FQDN response -
-
- SELinux - At the moment, for CloudStack to work properly SELinux must be - set to permissive. We want to both configure this for future boots and modify it - in the current running system. - - To configure SELinux to be permissive in the running system we need to run - the following command: - # setenforce 0 - - To ensure that it remains in that state we need to configure the file - /etc/selinux/config to reflect the permissive state, - as shown in this example: - - -# This file controls the state of SELinux on the system. -# SELINUX= can take one of these three values: -# enforcing - SELinux security policy is enforced. -# permissive - SELinux prints warnings instead of enforcing. -# disabled - No SELinux policy is loaded. -SELINUX=permissive -# SELINUXTYPE= can take one of these two values: -# targeted - Targeted processes are protected, -# mls - Multi Level Security protection. -SELINUXTYPE=targeted - - -
-
- NTP - NTP configuration is a necessity for keeping all of the clocks in your cloud - servers in sync. However, NTP is not installed by default. So we'll install and - and configure NTP at this stage. Installation is accomplished as follows: - - # yum -y install ntp - The actual default configuration is fine for our purposes, so we merely need to - enable it and set it to start on boot as follows: - # chkconfig ntpd on - # service ntpd start -
-
- Configuring the CloudStack Package Repository - - We need to configure the machine to use a CloudStack package repository. - - The below repository is not an official Apache CloudStack project repository - - The Apache CloudStack official releases are source code. As such there are no - 'official' binaries available. The full installation guide describes how to take - the source release and generate RPMs and and yum repository. This guide attempts - to keep things as simple as possible, and thus we are using one of the - community-provided yum repositories. - - - - To add the CloudStack repository, create /etc/yum.repos.d/cloudstack.repo and insert the following information. - -[cloudstack] -name=cloudstack -baseurl=http://cloudstack.apt-get.eu/rhel/4.1/ -enabled=1 -gpgcheck=0 - -
-
-
- NFS - - Our configuration is going to use NFS for both primary and secondary - storage. We are going to go ahead and setup two NFS shares for those - purposes. We'll start out by installing - nfs-utils. - - # yum install nfs-utils - - We now need to configure NFS to serve up two different shares. This is handled comparatively easily - in the /etc/exports file. You should ensure that it has the following content: - - -/secondary *(rw,async,no_root_squash) -/primary *(rw,async,no_root_squash) - - - You will note that we specified two directories that don't exist (yet) on the system. - We'll go ahead and create those directories and set permissions appropriately on them with the following commands: - - -# mkdir /primary -# mkdir /secondary - - CentOS 6.x releases use NFSv4 by default. NFSv4 requires that domain setting matches on all clients. - In our case, the domain is cloud.priv, so ensure that the domain setting in /etc/idmapd.conf - is uncommented and set as follows: - Domain = cloud.priv - Now you'll need uncomment the configuration values in the file /etc/sysconfig/nfs - -LOCKD_TCPPORT=32803 -LOCKD_UDPPORT=32769 -MOUNTD_PORT=892 -RQUOTAD_PORT=875 -STATD_PORT=662 -STATD_OUTGOING_PORT=2020 - - Now we need to configure the firewall to permit incoming NFS connections. - Edit the file /etc/sysconfig/iptables - - --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 111 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 111 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 2049 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 32803 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 32769 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 892 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 892 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 875 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 875 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 662 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 662 -j ACCEPT - - Now you can restart the iptables service with the following command: - - # service iptables restart - We now need to configure nfs service to start on boot and actually start it on the host by - executing the following commands: - -# service rpcbind start -# service nfs start -# chkconfig rpcbind on -# chkconfig nfs on - -
-
diff --git a/docs/qig/en-US/Management.xml b/docs/qig/en-US/Management.xml deleted file mode 100644 index 8c6040ffa2b..00000000000 --- a/docs/qig/en-US/Management.xml +++ /dev/null @@ -1,99 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Installation of the management server - - Now it is time to start installing CloudStack's management server - and some of the related components. - -
- Database Installation and Configuration - - We'll start out by installing MySQL and configuring - some options to ensure CloudStack runs well. - - - To install MySQL run the following command: - # yum -y install mysql-server - - With MySQL installed we need to make - a few configuration changes to /etc/my.cnf. - Specifically we need to add the following options to the [mysqld] section: - -innodb_rollback_on_timeout=1 -innodb_lock_wait_timeout=600 -max_connections=350 -log-bin=mysql-bin -binlog-format = 'ROW' - - - - Now that MySQL is properly configured we can - start it and configure it to start on boot as follows: - -# service mysqld start -# chkconfig mysqld on - - - -
- -
- Installation - We are now going to install the management server. We do that by executing the following command: - # yum -y install cloud-client - - With the application itself installed we can now setup the database, we'll do that with the following command - and options: - - # cloudstack-setup-databases cloud:password@localhost --deploy-as=root - When this process is finished, you should see a message like "CloudStack has successfully initialized the database." - - Now that the database has been created, we can take the final step in setting up the management server by issuing the following command: - # cloudstack-setup-management -
-
- System Template Setup - CloudStack uses a number of system VMs to provide functionality for - accessing the console of virtual machines, providing various networking - services, and managing various aspects of storage. This step will - acquire those system images ready for deployment when we bootstrap - your cloud. - - - Now we need to download the system VM template and deploy that to the - share we just mounted. The management server includes a script to properly - manipulate the system VMs images. - - # /usr/share/cloudstack-common/scripts/storage/secondary/cloud-install-sys-tmplt -m /secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F - - - That concludes our setup of the management server. We still need to - configure CloudStack, but we will do that after we get our hypervisor - set up. - -
-
diff --git a/docs/qig/en-US/Overview.xml b/docs/qig/en-US/Overview.xml deleted file mode 100644 index 31915f54475..00000000000 --- a/docs/qig/en-US/Overview.xml +++ /dev/null @@ -1,93 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Overview - - Infrastructure-as-a-Service (IaaS) clouds can be a complex thing to build, - and by definition they have a plethora of options, which often lead to confusion - for even experienced admins who are newcomers to building cloud platforms. - The goal for this runbook is to provide a straightforward set of instructions - to get you up and running with CloudStack with a minimum amount of trouble. - -
- What exactly are we building? - - This runbook will focus on building a CloudStack cloud using KVM with - CentOS 6.4 with NFS storage on a flat layer-2 network utilizing - layer-3 network isolation (aka Security Groups), and doing it all - on a single piece of hardware. - - - KVM, or Kernel-based Virtual Machine is a virtualization technology - for the Linux kernel. KVM supports native virtualization atop - processors with hardware virtualization extensions. - - - Security Groups act as distributed firewalls that control access - to a group of virtual machines. - -
-
- High level overview of the process - - Before we actually get to installing CloudStack, we'll start with - installing our base operating system, and then configuring that to act - as an NFS server for several types of storage. We'll install the - management server, download the systemVMs, and finally install the agent - software. Finally we'll spend a good deal of time configuring the entire - cloud in the CloudStack web interface. - -
-
- Prerequisites - - To complete this runbook you'll need the following items: - - - - At least one computer which supports hardware virtualization. - - - - - The - - CentOS 6.4 x86_64 minimal install CD - - - - - - A /24 network with the gateway being at xxx.xxx.xxx.1, no DHCP should be on this network and - none of the computers running CloudStack will have a dynamic address. Again this is done for - the sake of simplicity. - - - - -
- -
diff --git a/docs/qig/en-US/Preface.xml b/docs/qig/en-US/Preface.xml deleted file mode 100644 index d6ba80edb6d..00000000000 --- a/docs/qig/en-US/Preface.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Preface - - - - - - diff --git a/docs/qig/en-US/Revision_History.xml b/docs/qig/en-US/Revision_History.xml deleted file mode 100644 index 1ff4d772adc..00000000000 --- a/docs/qig/en-US/Revision_History.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Revision History - - - - 0-0 - Fri Jun 28 2013 - - - Initial creation of book by publican - - - - - - - diff --git a/docs/qig/en-US/config.xml b/docs/qig/en-US/config.xml deleted file mode 100644 index 7ff7a72f613..00000000000 --- a/docs/qig/en-US/config.xml +++ /dev/null @@ -1,177 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Configuration - - As we noted before we will be using security groups to provide isolation - and by default that implies that we'll be using a flat layer-2 network. - It also means that the simplicity of our setup means that we can use the - quick installer. - -
- UI Access - - To get access to CloudStack's web interface, merely point your - browser to http://172.16.10.2:8080/client - The default username is 'admin', and the default password is 'password'. - You should see a splash screen that allows you to choose several options - for setting up CloudStack. You should choose the - option. - - - You should now see a prompt requiring you to change the password for - the admin user. Please do so. - -
-
- Setting up a Zone - - A zone is the largest organization entity in CloudStack - and we'll be creating one, this - should be the screen that you see in front of you now. And for us there are 5 pieces of - information that we need. - - - Name - we will set this to the ever-descriptive 'Zone1' for our cloud. - - - Public DNS 1 - we will set this to '8.8.8.8' for our cloud. - - - Public DNS 2 - we will set this to '8.8.4.4' for our cloud. - - - Internal DNS1 - we will also set this to '8.8.8.8' for our cloud. - - - Internal DNS2 - we will also set this to '8.8.8.4' for our cloud. - - - - - Notes about DNS settings - - CloudStack distinguishes between internal and public DNS. Internal - DNS is assumed to be capable of resolving internal-only - hostnames, such as your NFS server’s DNS name. Public DNS is - provided to the guest VMs to resolve public IP addresses. You can - enter the same DNS server for both types, but if you do so, you - must make sure that both internal and public IP addresses can - route to the DNS server. In our specific case we will not use any - names for resources internally, and we have indeed them set to look - to the same external resource so as to not add a namerserver setup - to our list of requirements. - - -
-
- Pod Configuration - Now that we've added a Zone, the next step that comes up is a prompt - for information regading a pod. Which is looking for 4 items. - - - Name - We'll use Pod1 for our cloud. - - - Gateway - We'll use 172.16.10.1 as our gateway - - - Netmask - We'll use 255.255.255.0 - - - Start/end reserved system IPs - we will use 172.16.10.10-172.16.10.20 - - - Guest gateway - We'll use 172.16.10.1 - - - Guest netmask - We'll use 255.255.255.0 - - - Guest start/end IP - We'll use 172.16.10.30-172.16.10.200 - - - -
-
- Cluster - Now that we've added a Zone, we need only add a few more items for configuring the cluster. - - - Name - We'll use Cluster1 - - - Hypervisor - Choose KVM - - - - You should be prompted to add the first host to your cluster at this point. Only a few bits of information are needed. - - - Hostname - we'll use the IP address 172.16.10.2 since we didn't set up a DNS server. - - - Username - we'll use 'root' - - - Password - enter the operating system password for the root user - - - -
- Primary Storage - With your cluster now setup - you should be prompted for primary storage information. Choose NFS as the storage type and then enter the following values in the fields: - - - Name - We'll use 'Primary1' - - - Server - We'll be using the IP address 172.16.10.2 - - - Path - Well define /primary as the path we are using - - - -
-
- Secondary Storage - If this is a new zone, you'll be prompted for secondary storage information - populate it as follows: - - - NFS server - We'll use the IP address 172.16.10.2 - - - Path - We'll use /secondary - - - - Now, click Launch and your cloud should begin setup - it may take several minutes depending on your internet connection speed for setup to finalize. -
-
- - -
- diff --git a/docs/qig/en-US/kvm.xml b/docs/qig/en-US/kvm.xml deleted file mode 100644 index 91ed9d5cee9..00000000000 --- a/docs/qig/en-US/kvm.xml +++ /dev/null @@ -1,142 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - KVM Setup and installation - - KVM is the hypervisor we'll be using - we will recover the initial setup - which has already been done on the hypervisor host and cover installation - of the agent software, you can use the same steps to add additional KVM - nodes to your CloudStack environment. - -
- Prerequisites - - We explicitly are using the management server as a compute node as well, - which means that we have already performed many of the prerequisite steps - when setting up the management server, but we will list them here for - clarity. Those steps are: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - You shouldn't need to do that for the management server, of course, but - any additional hosts will need for you to complete the above steps. - -
- -
- Installation - Installation of the KVM agent is trivial with just a single command, but afterwards we'll need to configure a few things. - # yum -y install cloud-agent -
- KVM Configuration - We have two different parts of KVM to configure, libvirt, and QEMU. -
- QEMU Configuration - - KVM configuration is relatively simple at only a single item. We need to - edit the QEMU VNC configuration. This is done by editing - /etc/libvirt/qemu.conf and ensuring the following - line is present and uncommented. - vnc_listen=0.0.0.0 - -
-
- Libvirt Configuration - - CloudStack uses libvirt for managing virtual machines. Therefore it - is vital that libvirt is configured correctly. Libvirt is a dependency - of cloud-agent and should already be installed. - - - - In order to have live migration working libvirt has to listen - for unsecured TCP connections. We also need to turn off libvirts - attempt to use Multicast DNS advertising. Both of these settings - are in /etc/libvirt/libvirtd.conf - - Set the following paramaters: - listen_tls = 0 - listen_tcp = 1 - tcp_port = "16059" - auth_tcp = "none" - mdns_adv = 0 - - - Turning on "listen_tcp" in libvirtd.conf is not enough, we have to change the parameters as well: - On RHEL or CentOS modify /etc/sysconfig/libvirtd: - Uncomment the following line: - #LIBVIRTD_ARGS="--listen" - On Ubuntu: modify /etc/init/libvirt-bin.conf - Change the following line (at the end of the file): - exec /usr/sbin/libvirtd -d - to (just add -l) - exec /usr/sbin/libvirtd -d -l - - - Restart libvirt - In RHEL or CentOS: - $ service libvirtd restart - In Ubuntu: - $ service libvirt-bin restart - - -
-
- KVM configuration complete - - That concludes our installation and configuration of KVM, and we'll now move to using the CloudStack UI - for the actual configuration of our cloud. - -
-
-
-
diff --git a/docs/qig/en-US/qig.ent b/docs/qig/en-US/qig.ent deleted file mode 100644 index 3b1649a2ba9..00000000000 --- a/docs/qig/en-US/qig.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/qig/en-US/qig.xml b/docs/qig/en-US/qig.xml deleted file mode 100644 index 00dd2e4a1f0..00000000000 --- a/docs/qig/en-US/qig.xml +++ /dev/null @@ -1,36 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - - - - - - - - - diff --git a/docs/runbook/en-US/Author_Group.xml b/docs/runbook/en-US/Author_Group.xml deleted file mode 100644 index ba9e651f876..00000000000 --- a/docs/runbook/en-US/Author_Group.xml +++ /dev/null @@ -1,32 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - Apache - CloudStack - - - diff --git a/docs/runbook/en-US/Book_Info.xml b/docs/runbook/en-US/Book_Info.xml deleted file mode 100644 index ec591446856..00000000000 --- a/docs/runbook/en-US/Book_Info.xml +++ /dev/null @@ -1,52 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Runbook - Prescriptive instructions for deploying Apache CloudStack - Apache CloudStack - 3.0.2 - 0 - 0 - - - These runbooks are designed to provide a strict environment to guarantee - a higher degree of success in initial deployments of Apache CloudStack. - All of the elements of the environment will be provided to you. - Apache CloudStack is capable of much more complex configurations, - but they are beyond the scope of this document. - - - - - - - - - - - - - diff --git a/docs/runbook/en-US/Chapter.xml b/docs/runbook/en-US/Chapter.xml deleted file mode 100644 index 4adf63c207a..00000000000 --- a/docs/runbook/en-US/Chapter.xml +++ /dev/null @@ -1,53 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Test Chapter - - This is a test paragraph - -
- Test Section 1 - - This is a test paragraph in a section - -
- -
- Test Section 2 - - This is a test paragraph in Section 2 - - - - listitem text - - - - -
- -
- diff --git a/docs/runbook/en-US/Environment.xml b/docs/runbook/en-US/Environment.xml deleted file mode 100644 index 781e05de182..00000000000 --- a/docs/runbook/en-US/Environment.xml +++ /dev/null @@ -1,235 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Environment - - Before you begin, you need to prepare the environment before you install CloudStack. - We will go over the steps to prepare now. - -
- Operating System - - Using the CentOS 6.2 x86_64 minimal install ISO, you'll need to install CentOS - on your hardware. The defaults will generally be acceptable for this installation. - - - Once this installation is complete, you'll want to connect to your freshly - installed machine via SSH as the root user. Note that you should not allow root - logins in a production environment, so be sure to turn off remote logins once you - have finished the installation and configuration. - -
- Configuring the network - - By default the network will not come up on your hardware and you - will need to configure it to work in your environment. Since we - specified that there will be no DHCP server in this environment - we will be manually configuring your network interface. We will - assume, for the purposes of this exercise, that eth0 is the only network - interface that will be connected and used. - - - Connecting via the console you should login as root. Check the - file /etc/sysconfig/network-scripts/ifcfg-eth0, - it will look like this by default: - -DEVICE="eth0" -HWADDR="52:54:00:B9:A6:C0" -NM_CONTROLLED="yes" -ONBOOT="no" - - - - Unfortunately, this configuration will not permit you to connect to the network, - and is also unsuitable for our purposes with CloudStack. We want to - configure that file so that it specifies the IP address, netmask, etc., as shown - in the following example: - - - Hardware Addresses - You should not use the hardware address (aka MAC address) from our example - for your configuration. It is network interface specific, so you should keep the - address already provided in the HWADDR directive. - - - -DEVICE=eth0 -HWADDR=52:54:00:B9:A6:C0 -NM_CONTROLLED=no -ONBOOT=yes -BOOTPROTO=none -IPADDR=172.16.10.2 -NETMASK=255.255.255.0 -GATEWAY=172.16.10.1 -DNS1=8.8.8.8 -DNS2=8.8.4.4 - - - IP Addressing - Throughout this document we are assuming that you will - have a /24 network for your CloudStack implementation. This can be any - RFC 1918 network. However, we are assuming that you will match the - machine address that we are using. Thus we may use - 172.16.10.2 and because - you might be using the 192.168.55.0/24 network you would use - 192.168.55.2 - - - Now that we have the configuration files properly set up, we need to run a - few commands to start up the network - # chkconfig network on - # service network start -
-
- Hostname - - Cloudstack requires that the hostname be properly set. If you used the default - options in the installation, then your hostname is currently set to - localhost.localdomain. To test this we will run: - # hostname --fqdn - At this point it will likely return: - localhost - To rectify this situation - we'll set the hostname by editing the - /etc/hosts file so that it follows a similar format to this example: -127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 -172.16.10.2 srvr1.cloud.priv - - - After you've modified that file, go ahead and restart the network using: -# service network restart - Now recheck with the hostname --fqdn command and ensure that it returns - a FQDN response -
-
- SELinux - At the moment, for CloudStack to work properly SELinux must be - set to permissive. We want to both configure this for future boots and modify it - in the current running system. - - To configure SELinux to be permissive in the running system we need to run - the following command: - # setenforce 0 - - To ensure that it remains in that state we need to configure the file - /etc/selinux/config to reflect the permissive state, - as shown in this example: - - -# This file controls the state of SELinux on the system. -# SELINUX= can take one of these three values: -# enforcing - SELinux security policy is enforced. -# permissive - SELinux prints warnings instead of enforcing. -# disabled - No SELinux policy is loaded. -SELINUX=permissive -# SELINUXTYPE= can take one of these two values: -# targeted - Targeted processes are protected, -# mls - Multi Level Security protection. -SELINUXTYPE=targeted - - -
-
- NTP - NTP configuration is a necessity for keeping all of the clocks in your cloud - servers in sync. However, NTP is not installed by default. So we'll install and - and configure NTP at this stage. Installation is accomplished as follows: - - # yum install ntp - The actual default configuration is fine for our purposes, so we merely need to - enable it and set it to start on boot as follows: - # chkconfig ntpd on - # service ntpd start -
-
-
- NFS - - Our configuration is going to use NFS for both primary and secondary - storage. We are going to go ahead and setup two NFS shares for those - purposes. We'll start out by installing - nfs-utils. - - # yum install nfs-utils - - We now need to configure NFS to serve up two different shares. This is handled comparatively easily - in the /etc/exports file. You should ensure that it has the following content: - - -/secondary *(rw,async,no_root_squash) -/primary *(rw,async,no_root_squash) - - - You will note that we specified two directories that don't exist (yet) on the system. - We'll go ahead and create those directories and set permissions appropriately on them with the following commands: - - -# mkdir /primary -# mkdir /secondary - - CentOS 6.x releases use NFSv4 by default. NFSv4 requires that domain setting matches on all clients. - In our case, the domain is cloud.priv, so ensure that the domain setting in /etc/idmapd.conf - is uncommented and set as follows: - Domain = cloud.priv - Now you'll need uncomment the configuration values in the file /etc/sysconfig/nfs - -LOCKD_TCPPORT=32803 -LOCKD_UDPPORT=32769 -MOUNTD_PORT=892 -RQUOTAD_PORT=875 -STATD_PORT=662 -STATD_OUTGOING_PORT=2020 - - Now we need to configure the firewall to permit incoming NFS connections. - Edit the file /etc/sysconfig/iptables - - --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 111 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 111 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 2049 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 32803 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 32769 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 892 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 892 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 875 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 875 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p tcp --dport 662 -j ACCEPT --A INPUT -s 172.16.10.0/24 -m state --state NEW -p udp --dport 662 -j ACCEPT - - Now you can restart the iptables service with the following command: - - # service iptables restart - We now need to configure nfs service to start on boot and actually start it on the host by - executing the following commands: - - # service rpcbind start - # service nfs start - # chkconfig rpcbind on - # chkconfig nfs on - -
- - -
diff --git a/docs/runbook/en-US/Management.xml b/docs/runbook/en-US/Management.xml deleted file mode 100644 index 0df2bf118f9..00000000000 --- a/docs/runbook/en-US/Management.xml +++ /dev/null @@ -1,127 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Installation of the management server - - Now it is time to start installing CloudStack's management server - and some of the related components. - -
- Database Installation and Configuration - - We'll start out by installing MySQL and configuring - some options to ensure CloudStack runs well. - - - To install MySQL run the following command: - # yum -y install mysql-server - - With MySQL installed we need to make - a few configuration changes to /etc/my.cnf. - Specifically we need to add the following options to the [mysqld] section: - -innodb_rollback_on_timeout=1 -innodb_lock_wait_timeout=600 -max_connections=350 -log-bin=mysql-bin -binlog-format = 'ROW' - - - - Now that MySQL is properly configured we can - start it and configure it to start on boot as follows: - - # service mysqld start - # chkconfig mysqld on - - - -
- -
- Extraction - - The next step is to extract the contents of the CloudStack tarball - (mentioned in ) you - downloaded previously. To extract the contents of this tarball use - the following command: - - - # tar -xzvf CloudStack-oss-3.0.2-1-rhel6.2.tar.gz - - For the next few sections you'll need to cd into the first level that was just created. - -
-
- Installation - Now that you are in the directory created by extracting the tarball, it's now time to install. We'll run - ./install.sh and choose option . This will install the management server - and necessary dependencies. - - With the application itself installed we can now setup the database, we'll do that with the following command - and options: - - - # cloud-setup-databases cloud:password@localhost --deploy-as=root - - When this process is finished, you should see a message like "CloudStack has successfully initialized the database." - - Now that the database has been created, we can take the final step in setting up the management server by issuing the following command: - - # cloud-setup-mangament - -
-
- System Template Setup - CloudStack uses a number of system VMs to provide functionality for - accessing the console of virtual machines, providing various networking - services, and managing various aspects of storage. This step will - acquire those system images ready for deployment when we bootstrap - your cloud. - - - The place we are going to download these images to is the secondary - storage share that we setup earlier, so we'll need to mount that share - with the mount command run on the management server: - - - # mount -t nfs 172.16.10.2:/secondary /mnt/secondary - - - Now we need to download the system VM template and deploy that to the - share we just mounted. The management server includes a script to properly - manipulate the system VMs images. - - - # /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F - - - That concludes our setup of the management server. We still need to - configure CloudStack, but we will do that after we get our hypervisor - set up. - -
-
diff --git a/docs/runbook/en-US/Overview.xml b/docs/runbook/en-US/Overview.xml deleted file mode 100644 index f69798fdb67..00000000000 --- a/docs/runbook/en-US/Overview.xml +++ /dev/null @@ -1,100 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Overview - - Infrastructure-as-a-Service (IaaS) clouds can be a complex thing to build, - and by definition they have a plethora of options, which often lead to confusion - for even experienced admins who are newcomers to building cloud platforms. - The goal for this runbook is to provide a straightforward set of instructions - to get you up and running with CloudStack with a minimum amount of trouble. - -
- What exactly are we building? - - This runbook will focus on building a CloudStack cloud using KVM with - CentOS 6.2 with NFS storage on a flat layer-2 network utilizing - layer-3 network isolation (aka Security Groups), and doing it all - on a single piece of hardware. - - - KVM, or Kernel-based Virtual Machine is a virtualization technology - for the Linux kernel. KVM supports native virtualization atop - processors with hardware virtualization extensions. - - - Security Groups act as distributed firewalls that control access - to a group of virtual machines. - -
-
- High level overview of the process - - Before we actually get to installing CloudStack, we'll start with - installing our base operating system, and then configuring that to act - as an NFS server for several types of storage. We'll install the - management server, download the systemVMs, and finally install the agent - software. Finally we'll spend a good deal of time configuring the entire - cloud in the CloudStack web interface. - -
-
- Prerequisites - - To complete this runbook you'll need the following items: - - - - At least one computer which supports hardware virtualization. - - - - - The - - CentOS 6.2 x86_64 minimal install CD - - - - - - A /24 network with the gateway being at xxx.xxx.xxx.1, no DHCP should be on this network and - none of the computers running CloudStack may have a dynamic address. - - - - - Copy of - - CloudStack 3.0.2 for RHEL and CentOS 6.2 - - - - - -
- -
diff --git a/docs/runbook/en-US/Preface.xml b/docs/runbook/en-US/Preface.xml deleted file mode 100644 index 7a622674443..00000000000 --- a/docs/runbook/en-US/Preface.xml +++ /dev/null @@ -1,33 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Preface - - - - - - diff --git a/docs/runbook/en-US/Revision_History.xml b/docs/runbook/en-US/Revision_History.xml deleted file mode 100644 index 4aecafd1d66..00000000000 --- a/docs/runbook/en-US/Revision_History.xml +++ /dev/null @@ -1,42 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Revision History - - - - 0-0 - Mon Jun 25 2012 - - - Initial creation of book by publican - - - - - - - diff --git a/docs/runbook/en-US/Runbook.ent b/docs/runbook/en-US/Runbook.ent deleted file mode 100644 index cdc00b05f62..00000000000 --- a/docs/runbook/en-US/Runbook.ent +++ /dev/null @@ -1,22 +0,0 @@ - - - - - - diff --git a/docs/runbook/en-US/Runbook.xml b/docs/runbook/en-US/Runbook.xml deleted file mode 100644 index 11f195ca828..00000000000 --- a/docs/runbook/en-US/Runbook.xml +++ /dev/null @@ -1,37 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - - - - - - - - - - - diff --git a/docs/runbook/en-US/config.xml b/docs/runbook/en-US/config.xml deleted file mode 100644 index 90c34957fb1..00000000000 --- a/docs/runbook/en-US/config.xml +++ /dev/null @@ -1,177 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - Configuration - - As we noted before we will be using security groups to provide isolation - and by default that implies that we'll be using a flat layer-2 network. - It also means that the simplicity of our setup means that we can use the - quick installer. - -
- UI Access - - To get access to CloudStack's web interface, merely point your - browser to http://172.16.10.2:8080/client - The default username is 'admin', and the default password is 'password'. - You should see a splash screen that allows you to choose several options - for setting up CloudStack. You should choose the - option. - - - You should now see a prompt requiring you to change the password for - the admin user. Please do so. - -
-
- Setting up a Zone - - A zone is the largest organization entity in CloudStack - and we'll be creating one, this - should be the screen that you see in front of you now. And for us there are 5 pieces of - information that we need. - - - Name - we will set this to the ever-descriptive 'Zone1' for our cloud. - - - Public DNS 1 - we will set this to '8.8.8.8' for our cloud. - - - Public DNS 2 - we will set this to '8.8.4.4' for our cloud. - - - Internal DNS1 - we will also set this to '8.8.8.8' for our cloud. - - - Internal DNS2 - we will also set this to '8.8.8.4' for our cloud. - - - - - Notes about DNS settings - - CloudStack distinguishes between internal and public DNS. Internal - DNS is assumed to be capable of resolving internal-only - hostnames, such as your NFS server’s DNS name. Public DNS is - provided to the guest VMs to resolve public IP addresses. You can - enter the same DNS server for both types, but if you do so, you - must make sure that both internal and public IP addresses can - route to the DNS server. In our specific case we will not use any - names for resources internally, and we have indeed them set to look - to the same external resource so as to not add a namerserver setup - to our list of requirements. - - -
-
- Pod Configuration - Now that we've added a Zone, the next step that comes up is a prompt - for information regading a pod. Which is looking for 4 items. - - - Name - We'll use Pod1 for our cloud. - - - Gateway - We'll use 172.16.10.1 as our gateway - - - Netmask - We'll use 255.255.255.0 - - - Start/end reserved system IPs - we will use 172.16.10.10-172.16.10.20 - - - Guest gateway - We'll use 172.16.10.1 - - - Guest netmask - We'll use 255.255.255.0 - - - Guest start/end IP - We'll use 172.16.10.30-172.16.10.200 - - - -
-
- Cluster - Now that we've added a Zone, we need only add a few more items for configuring the cluster. - - - Name - We'll use Cluster1 - - - Hypervisor - Choose KVM - - - - You should be prompted to add the first host to your cluster at this point. Only a few bits of information are needed. - - - Hostname - we'll use the IP address 172.16.10.2 since we didn't set up a DNS server. - - - Username - we'll use 'root' - - - Password - enter the operating system password for the root user - - - -
- Primary Storage - With you cluster now setup - you should be prompted for primary storage information. Choose NFS as the storage type and then enter the following values in the fields: - - - Name - We'll use 'Primary1' - - - Server - We'll be using the IP address 172.16.10.2 - - - Path - Well define /primary as the path we are using - - - -
-
- Secondary Storage - If this is a new zone, you'll be prompted for secondary storage information - populate it as follows: - - - NFS server - We'll use the IP address 172.16.10.2 - - - Path - We'll use /secondary - - - - Now, click Launch and your cloud should begin setup - it may take several minutes depending on your internet connection speed for setup to finalize. -
-
- - -
- diff --git a/docs/runbook/en-US/kvm.xml b/docs/runbook/en-US/kvm.xml deleted file mode 100644 index 03119496a2c..00000000000 --- a/docs/runbook/en-US/kvm.xml +++ /dev/null @@ -1,103 +0,0 @@ - - -%BOOK_ENTITIES; -]> - - - - - - KVM Setup and installation - - KVM is the hypervisor we'll be using - we will recover the initial setup - which has already been done on the hypervisor host and cover installation - of the agent software, you can use the same steps to add additional KVM - nodes to your CloudStack environment. - -
- Prerequisites - - We explicitly are using the management server as a compute node as well, - which means that we have already performed many of the prerequisite steps - when setting up the management server, but we will list them here for - clarity. Those steps are: - - - - - - - - - - - - - - - - - - - - - - - - - You shouldn't need to do that for the management server, of course, but - any additional hosts will need for you to complete the above steps. - -
- -
- Installation - - You'll need to ensure that you are in the directory that was created when - we extracted the the tarball. - - - - You'll be running ./install.sh again and this time - choosing which will install the software necessary for - managing a KVM node. - -
-
- KVM Configuration - - KVM configuration is relatively simple at only a single item. We need to - edit the QEMU VNC configuration. This is done by editing - /etc/libvirt/qemu.conf and ensuring the following - line is present and uncommented. - vnc_listen=0.0.0.0 - - - You can now just restart the libvirt daemon by issuing the following command: - # service libvirt restart - - - That concludes our installation and configuration of KVM, and we'll now move to using the CloudStack UI - for the actual configuration of our cloud. - -
- -
- diff --git a/docs/runbook/zh-CN/Author_Group.po b/docs/runbook/zh-CN/Author_Group.po deleted file mode 100644 index 9643c110d67..00000000000 --- a/docs/runbook/zh-CN/Author_Group.po +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2012-08-04T04:05:40\n" -"PO-Revision-Date: 2012-08-04T04:05:40\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: firstname -#, no-c-format -msgid "Apache" -msgstr "" - -#. Tag: surname -#, no-c-format -msgid "CloudStack" -msgstr "" - diff --git a/docs/runbook/zh-CN/Book_Info.po b/docs/runbook/zh-CN/Book_Info.po deleted file mode 100644 index 66481967b13..00000000000 --- a/docs/runbook/zh-CN/Book_Info.po +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 06:28+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Runbook" -msgstr "è¿è¡Œæ‰‹å†Œ" - -#. Tag: subtitle -#, no-c-format -msgid "Prescriptive instructions for deploying Apache CloudStack" -msgstr "部署Apache CloudStack规范指导" - -#. Tag: productname -#, no-c-format -msgid "Apache CloudStack" -msgstr "Apache CloudStack" - -#. Tag: para -#, no-c-format -msgid "" -"These runbooks are designed to provide a strict environment to guarantee a " -"higher degree of success in initial deployments of Apache CloudStack. All of" -" the elements of the environment will be provided to you. Apache CloudStack " -"is capable of much more complex configurations, but they are beyond the " -"scope of this document." -msgstr "本安装手册用于æä¾›åœ¨ä¸¥æ ¼æŒ‡å®šçš„环境中安装Apache CloudStack,以ä¿è¯åˆå§‹éƒ¨ç½²çš„高æˆåŠŸçŽ‡ï¼Œæ‰€æœ‰ç›¸å…³çš„èµ„æºå’ŒçŽ¯å¢ƒéƒ½ä¼šæä¾›ç»™æ‚¨ã€‚Apache CloudStackçš„é…ç½®å¯ä»¥éžå¸¸å¤æ‚,但这超出了本文档的范围。" diff --git a/docs/runbook/zh-CN/Chapter.po b/docs/runbook/zh-CN/Chapter.po deleted file mode 100644 index 2d9cfae3cb6..00000000000 --- a/docs/runbook/zh-CN/Chapter.po +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# -# AUTHOR , YEAR. -# -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2012-08-04T04:05:40\n" -"PO-Revision-Date: 2012-08-04T04:05:40\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" - -#. Tag: title -#, no-c-format -msgid "Test Chapter" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is a test paragraph" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Test Section 1" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is a test paragraph in a section" -msgstr "" - -#. Tag: title -#, no-c-format -msgid "Test Section 2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "This is a test paragraph in Section 2" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "listitem text" -msgstr "" - diff --git a/docs/runbook/zh-CN/Environment.po b/docs/runbook/zh-CN/Environment.po deleted file mode 100644 index 63bb53549ae..00000000000 --- a/docs/runbook/zh-CN/Environment.po +++ /dev/null @@ -1,494 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 05:51+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Environment" -msgstr "环境准备" - -#. Tag: para -#, no-c-format -msgid "" -"Before you begin, you need to prepare the environment before you install " -"CloudStack. We will go over the steps to prepare now." -msgstr "在开始安装Cloudstack之å‰ï¼Œéœ€è¦å‡†å¤‡çŽ¯å¢ƒï¼Œä»¥ä¸‹å°†è¯¦ç»†åˆ†æ­¥éª¤æè¿°å„准备环节。" - -#. Tag: title -#, no-c-format -msgid "Operating System" -msgstr "æ“作系统" - -#. Tag: para -#, no-c-format -msgid "" -"Using the CentOS 6.2 x86_64 minimal install ISO, you'll need to install " -"CentOS on your hardware. The defaults will generally be acceptable for this " -"installation." -msgstr "使用 CentOS 6.2 x86_64 minimal install 镜åƒï¼Œåœ¨ç‰©ç†ä¸»æœºä¸Šå®‰è£…CentOS,安装过程中接å—默认选项。" - -#. Tag: para -#, no-c-format -msgid "" -"Once this installation is complete, you'll want to connect to your freshly " -"installed machine via SSH as the root user. Note that you should not allow " -"root logins in a production environment, so be sure to turn off remote " -"logins once you have finished the installation and configuration." -msgstr "当安装完æˆåŽï¼Œéœ€è¦ä»¥root身份通过SSH连接新安装的主机,注æ„ä¸è¦ä»¥root账户登录生产环境,请在完æˆå®‰è£…å’Œé…ç½®åŽå…³é—­è¿œç¨‹ç™»å½•。" - -#. Tag: title -#, no-c-format -msgid "Configuring the network" -msgstr "é…置网络" - -#. Tag: para -#, no-c-format -msgid "" -"By default the network will not come up on your hardware and you will need " -"to configure it to work in your environment. Since we specified that there " -"will be no DHCP server in this environment we will be manually configuring " -"your network interface. We will assume, for the purposes of this exercise, " -"that eth0 is the only network interface that will be connected and used." -msgstr "一般情况下网络ä¸ä¼šåœ¨æ–°å®‰è£…的主机上å¯ç”¨ï¼Œæ‚¨éœ€è¦æ ¹æ®çŽ¯å¢ƒè¿›è¡Œé…置。由于网络中ä¸èƒ½å­˜åœ¨ä»»ä½•DHCPæœåŠ¡å™¨ï¼Œæ‚¨éœ€è¦æ‰‹å·¥é…置网络接å£ã€‚为了快速简化安装的目的,这里å‡å®šä¸»æœºä¸Šåªæœ‰eth0一个网络接å£ã€‚" - -#. Tag: para -#, no-c-format -msgid "" -"Connecting via the console you should login as root. Check the file " -"/etc/sysconfig/network-scripts/ifcfg-eth0, it will look" -" like this by default:" -msgstr "以root身份连接主机控制å°ï¼Œæ£€æŸ¥æ–‡ä»¶ /etc/sysconfig/network-scripts/ifcfg-eth0,默认情况,其内容如下所示:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"DEVICE=\"eth0\"\n" -"HWADDR=\"52:54:00:B9:A6:C0\"\n" -"NM_CONTROLLED=\"yes\"\n" -"ONBOOT=\"no\"\n" -" " -msgstr "\nDEVICE=\"eth0\"\nHWADDR=\"52:54:00:B9:A6:C0\"\nNM_CONTROLLED=\"yes\"\nONBOOT=\"no\"\n " - -#. Tag: para -#, no-c-format -msgid "" -"Unfortunately, this configuration will not permit you to connect to the " -"network, and is also unsuitable for our purposes with CloudStack. We want to" -" configure that file so that it specifies the IP address, netmask, etc., as " -"shown in the following example:" -msgstr "但是根æ®ä»¥ä¸Šé…置您无法连接到网络,对于Cloudstackä¹ŸåŒæ ·ä¸é€‚åˆï¼›æ‚¨éœ€ä¿®æ”¹é…置文件,指定IP地å€ï¼Œç½‘络掩ç ç­‰ä¿¡æ¯ï¼Œå¦‚下例所示:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"DEVICE=eth0\n" -"HWADDR=52:54:00:B9:A6:C0\n" -"NM_CONTROLLED=no\n" -"ONBOOT=yes\n" -"BOOTPROTO=none\n" -"IPADDR=172.16.10.2\n" -"NETMASK=255.255.255.0\n" -"GATEWAY=172.16.10.1\n" -" " -msgstr "\nDEVICE=eth0\nHWADDR=52:54:00:B9:A6:C0\nNM_CONTROLLED=no\nONBOOT=yes\nBOOTPROTO=none\nIPADDR=172.16.10.2\nNETMASK=255.255.255.0\nGATEWAY=172.16.10.1\n " - -#. Tag: title -#, no-c-format -msgid "IP Addressing" -msgstr "IP地å€" - -#. Tag: para -#, no-c-format -msgid "" -"Throughout this document we are assuming that you will have a /24 network " -"for your CloudStack implementation. This can be any RFC 1918 network. " -"However, we are assuming that you will match the machine address that we are" -" using. Thus we may use " -"172.16.10.2 and because " -"you might be using the 192.168.55.0/24 network you would use " -"192.168.55.2" -msgstr "本文档å‡å®šæ‚¨æä¾›ä¸€ä¸ªC类网络供Cloudstack使用;该网络å¯ä»¥æ˜¯ä»»ä½•RFC 1918兼容的网络,但这里å‡å®šæ‚¨ä½¿ç”¨çš„IP地å€çš„æœ€åŽä¸€æ®µä¸Žæ‰‹å†Œä¸­ä½¿ç”¨çš„匹é…,例如手册中使用172.16.10.2,如果您使用192.168.55.0/24,请使用192.168.55.2" - -#. Tag: title -#, no-c-format -msgid "Hardware Addresses" -msgstr "物ç†åœ°å€" - -#. Tag: para -#, no-c-format -msgid "" -"You should not use the hardware address (aka MAC address) from our example " -"for your configuration. It is network interface specific, so you should keep" -" the address already provided in the HWADDR directive." -msgstr "请ä¸è¦ä½¿ç”¨ç½‘络é…置例å­ä¸­çš„MAC地å€ï¼Œè¯¥åœ°å€æ¯ä¸ªç½‘å¡å”¯ä¸€ï¼Œè¯·ä¿ç•™æ‚¨é…置文件中HWADDR段已æä¾›çš„内容。" - -#. Tag: para -#, no-c-format -msgid "" -"Now that we have the configuration files properly set up, we need to run a " -"few commands to start up the network" -msgstr "é…置文件准备完毕åŽï¼Œéœ€è¦è¿è¡Œå‘½ä»¤å¯åŠ¨ç½‘ç»œã€‚" - -#. Tag: screen -#, no-c-format -msgid "" -"# chkconfig network " -"on" -msgstr "# chkconfig network on" - -#. Tag: screen -#, no-c-format -msgid "" -"# service network " -"start" -msgstr "# service network start" - -#. Tag: para -#, no-c-format -msgid "" -"This should bring the network up successfully, but we now need to enable " -"name resolution. To do that we will edit " -"/etc/resolv.conf. These instructions will add one of " -"the nameservers from Google, though you are free to add a local nameserver " -"if you wish. Your /etc/resolv.conf should modified to " -"look like:" -msgstr "以上命令应该会æˆåŠŸå¯ç”¨ç½‘络,接下æ¥éœ€è¦å¯ç”¨åŸŸåè§£æžï¼Œç¼–辑文件/etc/resolv.conf,以下指令将添加Googleçš„DNSæœåŠ¡å™¨ï¼Œå½“ç„¶æ‚¨ä¹Ÿå¯ä»¥æ ¹æ®éœ€è¦æ·»åŠ æœ¬åœ°çš„åŸŸåæœåŠ¡å™¨ï¼Œ/etc/resolv.conf 应更改为如下内容:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"nameserver 8.8.8.8\n" -" " -msgstr "\nnameserver 8.8.8.8\n " - -#. Tag: title -#, no-c-format -msgid "Hostname" -msgstr "主机å" - -#. Tag: para -#, no-c-format -msgid "" -"Cloudstack requires that the hostname be properly set. If you used the " -"default options in the installation, then your hostname is currently set to " -"localhost.localdomain. To test this we will run:" -msgstr "Cloudstackè¦æ±‚正确设置主机å,如果按照时您接å—了默认选项,主机å为localhost.localdomain,输入如下命令å¯ä»¥è¿›è¡ŒéªŒè¯" - -#. Tag: screen -#, no-c-format -msgid "# hostname --fqdn" -msgstr "# hostname --fqdn" - -#. Tag: para -#, no-c-format -msgid "At this point it will likely return:" -msgstr "此时应会返回:" - -#. Tag: screen -#, no-c-format -msgid "localhost" -msgstr "localhost" - -#. Tag: para -#, no-c-format -msgid "" -"To rectify this situation - we'll set the hostname by editing the " -"/etc/hosts file so that it follows a similar format to " -"this example:" -msgstr "为了纠正这个问题,需设置主机å,通过编辑/etc/hosts 文件,将其更改为类似如下内容:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\n" -"172.16.10.2 srvr1.cloud.priv\n" -msgstr "\n127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4\n172.16.10.2 srvr1.cloud.priv\n" - -#. Tag: para -#, no-c-format -msgid "" -"After you've modified that file, go ahead and restart the network using:" -msgstr "更改é…置文件åŽï¼Œé‡å¯ç½‘络æœåŠ¡ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"# service network " -"restart" -msgstr "# service network restart" - -#. Tag: para -#, no-c-format -msgid "" -"Now recheck with the hostname --fqdn command and ensure " -"that it returns a FQDN response" -msgstr "通过命令hostname --fqdn釿–°æ£€æŸ¥ä¸»æœºå,此时应返回一个FQDNæ ¼å¼ç»“果。" - -#. Tag: title -#, no-c-format -msgid "SELinux" -msgstr "SELinux" - -#. Tag: para -#, no-c-format -msgid "" -"At the moment, for CloudStack to work properly SELinux must be set to " -"permissive. We want to both configure this for future boots and modify it in" -" the current running system." -msgstr "Cloudstack当å‰ç‰ˆæœ¬éœ€è¦SELinux设置为permissiveæ‰èƒ½æ­£å¸¸å·¥ä½œï¼Œä½ éœ€è¦æ”¹å˜å½“å‰é…ç½®ï¼ŒåŒæ—¶å°†è¯¥é…ç½®æŒä¹…化,使其在主机é‡å¯åŽä»ç„¶ç”Ÿæ•ˆã€‚" - -#. Tag: para -#, no-c-format -msgid "" -"To configure SELinux to be permissive in the running system we need to run " -"the following command:" -msgstr "å°†SELinuxé…置为permissive需执行如下命令:" - -#. Tag: screen -#, no-c-format -msgid "" -"# setenforce 0" -msgstr "# setenforce 0" - -#. Tag: para -#, no-c-format -msgid "" -"To ensure that it remains in that state we need to configure the file " -"/etc/selinux/config to reflect the permissive state, as" -" shown in this example:" -msgstr "为确ä¿å…¶æŒä¹…生效需更改é…置文件/etc/selinux/config,设置为permissive,如下例所示:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"\n" -"# This file controls the state of SELinux on the system.\n" -"# SELINUX= can take one of these three values:\n" -"# enforcing - SELinux security policy is enforced.\n" -"# permissive - SELinux prints warnings instead of enforcing.\n" -"# disabled - No SELinux policy is loaded.\n" -"SELINUX=permissive\n" -"# SELINUXTYPE= can take one of these two values:\n" -"# targeted - Targeted processes are protected,\n" -"# mls - Multi Level Security protection.\n" -"SELINUXTYPE=targeted\n" -" " -msgstr "\n\n# This file controls the state of SELinux on the system.\n# SELINUX= can take one of these three values:\n# enforcing - SELinux security policy is enforced.\n# permissive - SELinux prints warnings instead of enforcing.\n# disabled - No SELinux policy is loaded.\nSELINUX=permissive\n# SELINUXTYPE= can take one of these two values:\n# targeted - Targeted processes are protected,\n# mls - Multi Level Security protection.\nSELINUXTYPE=targeted\n " - -#. Tag: title -#, no-c-format -msgid "NTP" -msgstr "NTP" - -#. Tag: para -#, no-c-format -msgid "" -"NTP configuration is a necessity for keeping all of the clocks in your cloud" -" servers in sync. However, NTP is not installed by default. So we'll install" -" and and configure NTP at this stage. Installation is accomplished as " -"follows:" -msgstr "ä¸ºäº†åŒæ­¥äº‘å¹³å°ä¸­ä¸»æœºçš„æ—¶é—´ï¼Œéœ€è¦é…ç½®NTP,但NTP默认没有安装。因此需è¦å…ˆå®‰è£…NTP,然åŽè¿›è¡Œé…置。通过以下命令进行安装:" - -#. Tag: screen -#, no-c-format -msgid "" -"# yum install ntp" -msgstr "# yum install ntp" - -#. Tag: para -#, no-c-format -msgid "" -"The actual default configuration is fine for our purposes, so we merely need" -" to enable it and set it to start on boot as follows:" -msgstr "使用实际的默认é…置文件å³å¯æ»¡è¶³æœ¬æ–‡æ¡£çš„è¦æ±‚,仅需å¯ç”¨NTP并设置为开机å¯åŠ¨ï¼Œå¦‚ä¸‹æ‰€ç¤ºï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"# chkconfig ntpd " -"on" -msgstr "# chkconfig ntpd on" - -#. Tag: screen -#, no-c-format -msgid "" -"# service ntpd " -"start" -msgstr "# service ntpd start" - -#. Tag: title -#, no-c-format -msgid "NFS" -msgstr "NFS" - -#. Tag: para -#, no-c-format -msgid "" -"Our configuration is going to use NFS for both primary and secondary " -"storage. We are going to go ahead and setup two NFS shares for those " -"purposes. We'll start out by installing nfs-" -"utils." -msgstr "本文档将é…置的环境使用NFSåšä¸ºä¸»å­˜å‚¨å’Œæ¬¡è¦å­˜å‚¨ï¼Œéœ€é…置两个NFS共享目挂载点,在此之å‰éœ€å…ˆå®‰è£…nfs-utils:" - -#. Tag: screen -#, no-c-format -msgid "" -"# yum install nfs-" -"utils" -msgstr "# yum install nfs-utils" - -#. Tag: para -#, no-c-format -msgid "" -"We now need to configure NFS to serve up two different shares. This is " -"handled comparatively easily in the /etc/exports file. " -"You should ensure that it has the following content:" -msgstr "接下æ¥éœ€é…ç½®NFSæä¾›ä¸¤ä¸ªä¸åŒçš„æŒ‚载点,通过编辑/etc/exports文件å³å¯ç®€å•实现,请确ä¿å…¶å†…容如下所示:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"/secondary *(rw,async,no_root_squash)\n" -"/primary *(rw,async,no_root_squash)\n" -" " -msgstr "\n/secondary *(rw,async,no_root_squash)\n/primary *(rw,async,no_root_squash)\n " - -#. Tag: para -#, no-c-format -msgid "" -"You will note that we specified two directories that don't exist (yet) on " -"the system. We'll go ahead and create those directories and set permissions " -"appropriately on them with the following commands:" -msgstr "注æ„é…置文件中指定了两个系统中ä¸å­˜åœ¨çš„目录,下é¢éœ€è¦åˆ›å»ºè¿™äº›ç›®å½•并且设置åˆé€‚çš„æƒé™ï¼Œå¯¹åº”的命令如下所示:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"# mkdir /primary\n" -"# mkdir /secondary\n" -"# chmod 777 /primary\n" -"# chmod 777 /secondary\n" -" " -msgstr "\n# mkdir /primary\n# mkdir /secondary\n# chmod 777 /primary\n# chmod 777 /secondary\n " - -#. Tag: para -#, no-c-format -msgid "" -"CentOS 6.x releases use NFSv4 by default. NFSv4 requires that domain setting" -" matches on all clients. In our case, the domain is cloud.priv, so ensure " -"that the domain setting in /etc/idmapd.conf is " -"uncommented and set as follows:" -msgstr "CentOS 6.x 版本默认使用NFSv4,NFSv4è¦æ±‚所有客户端的域设置匹é…,这里设置为cloud.privä¸ºä¾‹ï¼Œè¯·ç¡®ä¿æ–‡ä»¶/etc/idmapd.conf中的域设置没有被注释掉,并设置为以下内容:" - -#. Tag: screen -#, no-c-format -msgid "Domain = cloud.priv" -msgstr "Domain = cloud.priv" - -#. Tag: para -#, no-c-format -msgid "" -"Now you'll need uncomment the configuration values in the file " -"/etc/sysconfig/nfs" -msgstr "ç„¶åŽæ‚¨éœ€è¦å–消/etc/sysconfig/nfs文件中以下é…置项的注释:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"LOCKD_TCPPORT=32803\n" -"LOCKD_UDPPORT=32769\n" -"MOUNTD_PORT=892\n" -"RQUOTAD_PORT=875\n" -"STATD_PORT=662\n" -"STATD_OUTGOING_PORT=2020\n" -" " -msgstr "\nLOCKD_TCPPORT=32803\nLOCKD_UDPPORT=32769\nMOUNTD_PORT=892\nRQUOTAD_PORT=875\nSTATD_PORT=662\nSTATD_OUTGOING_PORT=2020\n " - -#. Tag: para -#, no-c-format -msgid "" -"Now we need to configure the firewall to permit incoming NFS connections. " -"Edit the file /etc/sysconfig/iptables" -msgstr "接下æ¥è¿˜éœ€é…置防ç«å¢™ï¼Œå…许NFS连接。编辑文件/etc/sysconfig/iptables:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"-A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT\n" -"-A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT\n" -" " -msgstr "\n-A INPUT -m state --state NEW -p udp --dport 111 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 111 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 2049 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 32803 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 32769 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 892 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 892 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 875 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 875 -j ACCEPT\n-A INPUT -m state --state NEW -p tcp --dport 662 -j ACCEPT\n-A INPUT -m state --state NEW -p udp --dport 662 -j ACCEPT\n " - -#. Tag: para -#, no-c-format -msgid "Now you can restart the iptables service with the following command:" -msgstr "é€šè¿‡ä»¥ä¸‹å‘½ä»¤é‡æ–°å¯åЍiptablesæœåŠ¡ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"# service iptables " -"restart" -msgstr "# service iptables restart" - -#. Tag: para -#, no-c-format -msgid "" -"We now need to configure nfs service to start on boot and actually start it " -"on the host by executing the following commands:" -msgstr "最åŽéœ€è¦é…ç½®NFSæœåŠ¡ä¸ºå¼€æœºè‡ªå¯åŠ¨ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # service rpcbind start\n" -" # service nfs start\n" -" # chkconfig rpcbind on\n" -" # chkconfig nfs on\n" -" " -msgstr "\n # service rpcbind start\n # service nfs start\n # chkconfig rpcbind on\n # chkconfig nfs on\n " diff --git a/docs/runbook/zh-CN/Management.po b/docs/runbook/zh-CN/Management.po deleted file mode 100644 index 63fc1497104..00000000000 --- a/docs/runbook/zh-CN/Management.po +++ /dev/null @@ -1,235 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 06:17+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Installation of the management server" -msgstr "å®‰è£…ç®¡ç†æœåС噍" - -#. Tag: para -#, no-c-format -msgid "" -"Now it is time to start installing CloudStack's management server and some " -"of the related components." -msgstr "现在需è¦å®‰è£…CloudStackç®¡ç†æœåŠ¡å™¨å’Œç›¸å…³çš„ç»„ä»¶ã€‚" - -#. Tag: title -#, no-c-format -msgid "Database Installation and Configuration" -msgstr "æ•°æ®åº“安装和é…ç½®" - -#. Tag: para -#, no-c-format -msgid "" -"We'll start out by installing MySQL and " -"configuring some options to ensure CloudStack runs well." -msgstr "首先安装MySQL,并对它进行é…置,以确ä¿CloudStackè¿è¡Œæ­£å¸¸ã€‚" - -#. Tag: para -#, no-c-format -msgid "To install MySQL run the following command:" -msgstr "è¿è¡Œä»¥ä¸‹å‘½ä»¤ï¼Œå®‰è£…MySQL :" - -#. Tag: screen -#, no-c-format -msgid "" -"# yum -y install mysql-" -"server" -msgstr "# yum -y install mysql-server" - -#. Tag: para -#, no-c-format -msgid "" -"With MySQL installed we need to make a few " -"configuration changes to /etc/my.cnf. Specifically we " -"need to add the following options to the [mysqld] section:" -msgstr "MySQL安装完æˆåŽï¼Œéœ€æ›´æ”¹å…¶é…置文件/etc/my.cnf,在[mysqld]下添加如下内容:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -"innodb_rollback_on_timeout=1\n" -"innodb_lock_wait_timeout=600\n" -"max_connections=350\n" -"log-bin=mysql-bin\n" -"binlog-format = 'ROW' \n" -" " -msgstr "\ninnodb_rollback_on_timeout=1\ninnodb_lock_wait_timeout=600\nmax_connections=350\nlog-bin=mysql-bin\nbinlog-format = 'ROW' \n " - -#. Tag: para -#, no-c-format -msgid "" -"Now that MySQL is properly configured we can " -"start it and configure it to start on boot as follows:" -msgstr "é…ç½®MySQL完æˆåŽï¼Œå¯åŠ¨å®ƒå¹¶é…置为开机自å¯åŠ¨ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # service mysqld start\n" -" # chkconfig mysqld on\n" -" " -msgstr "\n # service mysqld start\n # chkconfig mysqld on\n " - -#. Tag: title -#, no-c-format -msgid "Extraction" -msgstr "解压安装包" - -#. Tag: para -#, no-c-format -msgid "" -"The next step is to extract the contents of the CloudStack tarball " -"(mentioned in ) you " -"downloaded previously. To extract the contents of this tarball use the " -"following command:" -msgstr "下一步是解压缩之å‰ä¸‹è½½çš„CloudStack安装包(è§),通过以下命令进行解压缩:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # tar -xzvf CloudStack-oss-3.0.2-1-rhel6.2.tar.gz\n" -" " -msgstr "\n # tar -xzvf CloudStack-oss-3.0.2-1-rhel6.2.tar.gz\n " - -#. Tag: para -#, no-c-format -msgid "" -"For the next few sections you'll need to cd into the " -"first level that was just created." -msgstr "ä»¥ä¸‹ç« èŠ‚éœ€è¦æ‚¨cd 进入刚创建的目录中" - -#. Tag: title -#, no-c-format -msgid "Installation" -msgstr "安装" - -#. Tag: para -#, no-c-format -msgid "" -"Now that you are in the directory created by extracting the tarball, it's " -"now time to install. We'll run ./install.sh and choose " -"option . This will install the management server and " -"necessary dependencies." -msgstr "当您进入解压缩åŽåˆ›å»ºçš„目录中,下一步是安装。执行./install.sh并选择选项åŽï¼Œç®¡ç†æœåŠ¡å™¨å’Œç›¸å…³ä¾èµ–会自动安装。" - -#. Tag: para -#, no-c-format -msgid "" -"With the application itself installed we can now setup the database, we'll " -"do that with the following command and options:" -msgstr "å¹³å°ç³»ç»Ÿæœ¬èº«å®‰è£…åŽï¼Œéœ€åˆå§‹åŒ–æ•°æ®åº“,通过以下命令和选项完æˆï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # cloud-setup-databases cloud:password@localhost --deploy-as=root\n" -" " -msgstr "\n # cloud-setup-databases cloud:password@localhost --deploy-as=root\n " - -#. Tag: para -#, no-c-format -msgid "" -"When this process is finished, you should see a message like \"CloudStack " -"has successfully initialized the database.\"" -msgstr "当该过程结æŸåŽï¼Œæ‚¨åº”该å¯ä»¥çœ‹åˆ°ç±»ä¼¼ä¿¡æ¯ï¼š\"CloudStack has successfully initialized the database.\"" - -#. Tag: para -#, no-c-format -msgid "" -"Now that the database has been created, we can take the final step in " -"setting up the management server by issuing the following command:" -msgstr "æ•°æ®åº“创建åŽï¼Œæœ€åŽä¸€æ­¥æ˜¯é…ç½®ç®¡ç†æœåŠ¡å™¨ï¼Œé€šè¿‡å¦‚ä¸‹å‘½ä»¤æ‰§è¡Œï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # cloud-setup-mangament\n" -" " -msgstr "\n # cloud-setup-mangament\n " - -#. Tag: title -#, no-c-format -msgid "System Template Setup" -msgstr "系统模æ¿é…ç½®" - -#. Tag: para -#, no-c-format -msgid "" -"CloudStack uses a number of system VMs to provide functionality for " -"accessing the console of virtual machines, providing various networking " -"services, and managing various aspects of storage. This step will acquire " -"those system images ready for deployment when we bootstrap your cloud." -msgstr "CloudStack通过一系列系统虚拟机æä¾›åŠŸèƒ½ï¼Œå¦‚è®¿é—®è™šæ‹ŸæœºæŽ§åˆ¶å°ï¼Œå¦‚æä¾›å„类网络æœåŠ¡ï¼Œä»¥åŠç®¡ç†æ¬¡è¦å­˜å‚¨çš„中的å„类资æºã€‚该步骤会获å–系统虚拟机模æ¿ï¼Œç”¨äºŽäº‘å¹³å°å¼•导åŽç³»ç»Ÿè™šæ‹Ÿæœºçš„部署。" - -#. Tag: para -#, no-c-format -msgid "" -"The place we are going to download these images to is the secondary storage " -"share that we setup earlier, so we'll need to mount that share with the " -"mount command run on the management server:" -msgstr "模æ¿ä¸‹è½½åŽå­˜æ”¾çš„ä½ç½®æ˜¯ä¹‹å‰é…置的次è¦å­˜å‚¨ç›®å½•,需先使用mount å‘½ä»¤æŒ‚è½½äºŒçº§å­˜å‚¨ï¼Œåœ¨ç®¡ç†æœåŠ¡å™¨ä¸Šè¿è¡Œå¦‚下命令:" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # mount -t nfs 172.16.10.2:/secondary /mnt/secondary\n" -" " -msgstr "\n # mount -t nfs 172.16.10.2:/secondary /mnt/secondary\n " - -#. Tag: para -#, no-c-format -msgid "" -"Now we need to download the system VM template and deploy that to the share " -"we just mounted. The management server includes a script to properly " -"manipulate the system VMs images." -msgstr "ç„¶åŽéœ€è¦ä¸‹è½½ç³»ç»Ÿè™šæ‹Ÿæœºæ¨¡æ¿ï¼Œå¹¶æŠŠè¿™äº›æ¨¡æ¿éƒ¨ç½²äºŽåˆšæ‰åˆ›å»ºçš„æ¬¡è¦å­˜å‚¨ä¸­ï¼›ç®¡ç†æœåŠ¡å™¨åŒ…å«ä¸€ä¸ªè„šæœ¬å¯ä»¥æ­£ç¡®çš„æ“ä½œç³»ç»Ÿè™šæ‹Ÿæœºæ¨¡æ¿ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"\n" -" # /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F\n" -" " -msgstr "\n # /usr/lib64/cloud/agent/scripts/storage/secondary/cloud-install-sys-tmplt -m /mnt/secondary -u http://download.cloud.com/templates/acton/acton-systemvm-02062012.qcow2.bz2 -h kvm -F\n " - -#. Tag: para -#, no-c-format -msgid "" -"That concludes our setup of the management server. We still need to " -"configure CloudStack, but we will do that after we get our hypervisor set " -"up." -msgstr "ä»¥ä¸Šæ˜¯ç®¡ç†æœåŠ¡å™¨çš„å®‰è£…å’Œé…置过程;在é…ç½®CloudStack之å‰ï¼Œéœ€å¯ç”¨hypervisor" diff --git a/docs/runbook/zh-CN/Overview.po b/docs/runbook/zh-CN/Overview.po deleted file mode 100644 index 1bfd88f8645..00000000000 --- a/docs/runbook/zh-CN/Overview.po +++ /dev/null @@ -1,130 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 09:27+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Overview" -msgstr "概述" - -#. Tag: para -#, no-c-format -msgid "" -"Infrastructure-as-a-Service (IaaS) clouds can be a complex thing to build, " -"and by definition they have a plethora of options, which often lead to " -"confusion for even experienced admins who are newcomers to building cloud " -"platforms. The goal for this runbook is to provide a straightforward set of " -"instructions to get you up and running with CloudStack with a minimum amount" -" of trouble." -msgstr "æ­å»ºåŸºç¡€è®¾æ–½å³æœåŠ¡ (Infrastructure-as-a-Service, IaaS)äº‘å¹³å°æ˜¯ä¸€ä»¶å¤æ‚的工作,它æä¾›äº†è¿‡äºŽå¤šçš„选项,以至于甚至是ç»éªŒä¸°å¯Œçš„管ç†å‘˜åœ¨åˆšå¼€å§‹æ­å»ºäº‘平尿—¶ä¹Ÿä¼šæ„Ÿåˆ°å›°æƒ‘。该æ“作手册的目标是æä¾›ä¸€ç³»åˆ—直观的说明以帮助您æ­å»ºå¹¶è¿è¡Œä¸€ä¸ªæœ€ç®€å•çš„Cloudstack环境。" - -#. Tag: title -#, no-c-format -msgid "What exactly are we building?" -msgstr "目标部署环境" - -#. Tag: para -#, no-c-format -msgid "" -"This runbook will focus on building a CloudStack cloud using KVM with CentOS" -" 6.2 with NFS storage on a flat layer-2 network utilizing layer-3 network " -"isolation (aka Security Groups), and doing it all on a single piece of " -"hardware." -msgstr "该æ“作手册将é‡ç‚¹ä»‹ç»å¦‚何æ­å»ºå¦‚下Cloudstack云平å°:\n使用CentOS 6.2 KVM\n部署于æ‰å¹³äºŒå±‚网络\n使用三层网络隔离(安全组)\n所有资æºé›†ä¸­äºŽä¸€å°ç‰©ç†ä¸»æœº" - -#. Tag: para -#, no-c-format -msgid "" -"KVM, or Kernel-based Virtual Machine is a virtualization technology for the " -"Linux kernel. KVM supports native virtualization atop processors with " -"hardware virtualization extensions." -msgstr "KVM (Kernel-based Virtual Machine) 是一ç§é’ˆå¯¹LInux内核的虚拟化技术。KVMæ”¯æŒæœ¬åœ°è™šæ‹ŸåŒ–,主机的CPU处ç†å™¨éœ€æ”¯æŒç¡¬ä»¶è™šæ‹ŸåŒ–扩展。" - -#. Tag: para -#, no-c-format -msgid "" -"Security Groups act as distributed firewalls that control access to a group " -"of virtual machines." -msgstr "安全组起到类似分布å¼é˜²ç«å¢™çš„作用,它å¯ä»¥å¯¹ä¸€ç»„虚拟机进行访问控制。" - -#. Tag: title -#, no-c-format -msgid "High level overview of the process" -msgstr "整体过程概述" - -#. Tag: para -#, no-c-format -msgid "" -"Before we actually get to installing CloudStack, we'll start with installing" -" our base operating system, and then configuring that to act as an NFS " -"server for several types of storage. We'll install the management server, " -"download the systemVMs, and finally install the agent software. Finally " -"we'll spend a good deal of time configuring the entire cloud in the " -"CloudStack web interface." -msgstr "在实际安装Cloudstack之å‰ï¼Œéœ€è¦å…ˆå®‰è£…基本的æ“作系统,并将其é…置为一å°NFSæœåŠ¡å™¨ç”¨äºŽæä¾›å„类存储资æºã€‚接下æ¥çš„æ­¥éª¤æ˜¯å®‰è£…ç®¡ç†æœåŠ¡å™¨ï¼Œä¸‹è½½ç³»ç»Ÿè™šæ‹Ÿæœºæ¨¡æ¿ï¼Œå®‰è£…agnet;最åŽå°†æè¿°å¦‚何使用Cloudstack Web界é¢é…置整个云平å°ã€‚" - -#. Tag: title -#, no-c-format -msgid "Prerequisites" -msgstr "先决æ¡ä»¶" - -#. Tag: para -#, no-c-format -msgid "To complete this runbook you'll need the following items:" -msgstr "å®Œæˆæ­¤æ“作手册您需è¦ä»¥ä¸‹èµ„æºï¼š" - -#. Tag: para -#, no-c-format -msgid "At least one computer which supports hardware virtualization." -msgstr "è‡³å°‘ä¸€å°æ”¯æŒç¡¬ä»¶è™šæ‹ŸåŒ–的主机" - -#. Tag: para -#, no-c-format -msgid "" -"The " -" CentOS 6.2 x86_64 minimal install CD " -msgstr " CentOS 6.2 x86_64 minimal install CD " - -#. Tag: para -#, no-c-format -msgid "" -"A /24 network with the gateway being at xxx.xxx.xxx.1, no DHCP should be on " -"this network and none of the computers running CloudStack may have a dynamic" -" address." -msgstr "一个C类网络,网关为 xxx.xxx.xxx.1,网络中ä¸èƒ½å­˜åœ¨DHCPæœåŠ¡å™¨ï¼Œæ‰€æœ‰è¿è¡ŒCloudstackçš„ä¸»æœºéœ€ä½¿ç”¨é™æ€IP地å€ã€‚" - -#. Tag: para -#, no-c-format -msgid "" -"Copy of CloudStack 3.0.2 for RHEL" -" and CentOS 6.2 " -msgstr "安装包 CloudStack 3.0.2 for RHEL and CentOS 6.2 " diff --git a/docs/runbook/zh-CN/Preface.po b/docs/runbook/zh-CN/Preface.po deleted file mode 100644 index 43f8b31f5a2..00000000000 --- a/docs/runbook/zh-CN/Preface.po +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 04:53+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Preface" -msgstr "å‰è¨€" diff --git a/docs/runbook/zh-CN/Revision_History.po b/docs/runbook/zh-CN/Revision_History.po deleted file mode 100644 index 7b8b91da94b..00000000000 --- a/docs/runbook/zh-CN/Revision_History.po +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 04:52+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Revision History" -msgstr "修订历å²" - -#. Tag: member -#, no-c-format -msgid "Initial creation of book by publican" -msgstr "最åˆåˆ›å»ºç‰ˆæœ¬" diff --git a/docs/runbook/zh-CN/config.po b/docs/runbook/zh-CN/config.po deleted file mode 100644 index 507b635328e..00000000000 --- a/docs/runbook/zh-CN/config.po +++ /dev/null @@ -1,280 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-08-06T15:22:17\n" -"PO-Revision-Date: 2012-08-07 04:40+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "Configuration" -msgstr "云平å°é…ç½®" - -#. Tag: para -#, no-c-format -msgid "" -"As we noted before we will be using security groups to provide isolation and" -" by default that implies that we'll be using a flat layer-2 network. It also" -" means that the simplicity of our setup means that we can use the quick " -"installer." -msgstr "如上文所述,该手册所æè¿°çš„环境将使用安全组æä¾›ç½‘络隔离,这æ„å‘³ç€æ‚¨çš„安装环境仅需è¦ä¸€ä¸ªæ‰å¹³çš„äºŒå±‚ç½‘ç»œï¼ŒåŒæ ·æ„味ç€è¾ƒä¸ºç®€å•çš„é…置和快速的安装。" - -#. Tag: title -#, no-c-format -msgid "UI Access" -msgstr "访问用户界é¢" - -#. Tag: para -#, no-c-format -msgid "" -"To get access to CloudStack's web interface, merely point your browser to " -"http://172.16.10.2:8080/client The default " -"username is 'admin', and the default password is 'password'. You should see " -"a splash screen that allows you to choose several options for setting up " -"CloudStack. You should choose the " -" option." -msgstr "访问CloudStack用户界é¢ï¼Œä»…需通过æµè§ˆå™¨è®¿é—®: http://172.16.10.2:8080/client 默认的用户å为â€adminâ€œï¼Œé»˜è®¤å¯†ç æ˜¯â€œpasswordâ€ã€‚第一次登录åŽå¯ä»¥çœ‹åˆ°æ¬¢è¿Žç•Œé¢ï¼Œè¯¥ç•Œé¢æä¾›ä¸¤ä¸ªé…ç½®CloudStack的选项,请选择“继续执行基本安装â€ã€‚" - -#. Tag: para -#, no-c-format -msgid "" -"You should now see a prompt requiring you to change the password for the " -"admin user. Please do so." -msgstr "此时您会看到æç¤ºï¼Œè¦æ±‚为admin用户更改密ç ï¼Œè¯·æ›´æ–°å¯†ç åŽç»§ç»­ã€‚" - -#. Tag: title -#, no-c-format -msgid "Setting up a Zone" -msgstr "é…置区域" - -#. Tag: para -#, no-c-format -msgid "" -"A zone is the largest organization entity in CloudStack - and we'll be " -"creating one, this should be the screen that you see in front of you now. " -"And for us there are 5 pieces of information that we need." -msgstr "区域是CloudStackå¹³å°ä¸­æœ€å¤§çš„组织å•ä½ï¼Œä¸‹é¢å°†ä¼šè®²è¿°å¦‚何创建一个区域;此时å±å¹•中显示的是区域添加页é¢ï¼Œè¿™é‡Œéœ€è¦æ‚¨æä¾›ä»¥ä¸‹5项信æ¯ï¼š" - -#. Tag: para -#, no-c-format -msgid "Name - we will set this to the ever-descriptive 'Zone1' for our cloud." -msgstr "åç§° - æä¾›æè¿°æ€§çš„å称,这里以\"Zone1\"为例" - -#. Tag: para -#, no-c-format -msgid "Public DNS 1 - we will set this to '8.8.8.8' for our cloud." -msgstr "DNS1 - 设置为 8.8.8.8" - -#. Tag: para -#, no-c-format -msgid "Public DNS 2 - we will set this to '8.8.4.4' for our cloud." -msgstr "DNS2 - 设置 为8.8.4.4" - -#. Tag: para -#, no-c-format -msgid "Internal DNS1 - we will also set this to '8.8.8.8' for our cloud." -msgstr "内部DNS1 - åŒæ ·è®¾ç½®ä¸º 8.8.8.8" - -#. Tag: para -#, no-c-format -msgid "Internal DNS2 - we will also set this to '8.8.8.4' for our cloud." -msgstr "内部DNS2 - åŒæ ·è®¾ç½®ä¸º 8.8.4.4" - -#. Tag: title -#, no-c-format -msgid "Notes about DNS settings" -msgstr "关于DNS设置" - -#. Tag: para -#, no-c-format -msgid "" -"CloudStack distinguishes between internal and public DNS. Internal DNS is " -"assumed to be capable of resolving internal-only hostnames, such as your NFS" -" server’s DNS name. Public DNS is provided to the guest VMs to resolve " -"public IP addresses. You can enter the same DNS server for both types, but " -"if you do so, you must make sure that both internal and public IP addresses " -"can route to the DNS server. In our specific case we will not use any names " -"for resources internally, and we have indeed them set to look to the same " -"external resource so as to not add a namerserver setup to our list of " -"requirements." -msgstr "CloudStack区分内部和外部DNS。内部DNS用于解æžä»…内部使用的主机å,例如NFSæœåŠ¡å™¨çš„ä¸»æœºå;外部DNS用于为用户虚拟机æä¾›å¤–网IP地å€è§£æžã€‚您å¯ä»¥ä¸ºä»¥ä¸Šä¸¤ç§ç±»åž‹è¾“入相åŒçš„DNSæœåŠ¡å™¨ï¼Œä½†å¿…é¡»ç¡®è®¤å†…éƒ¨å’Œå¤–ç½‘IP地å€éƒ½å­˜åœ¨åˆ°è¾¾è¯¥DNSæœåŠ¡å™¨çš„è·¯ç”±ã€‚æœ¬æ‰‹å†Œæè¿°çš„环境中,内部资æºä¸ä½¿ç”¨ä¸»æœºå,因此这里将其设置为与外部DNS一致以简化安装,从而ä¸å¿…为此å†å®‰è£…一å°DNSæœåŠ¡å™¨ã€‚" - -#. Tag: title -#, no-c-format -msgid "Pod Configuration" -msgstr "é…ç½®æä¾›ç‚¹" - -#. Tag: para -#, no-c-format -msgid "" -"Now that we've added a Zone, the next step that comes up is a prompt for " -"information regading a pod. Which is looking for 4 items." -msgstr "åˆ°è¿™é‡Œæ‚¨å·²ç»æ·»åŠ äº†ä¸€ä¸ªåŒºåŸŸï¼Œä¸‹ä¸€æ­¥åŽä¼šæ˜¾ç¤ºæä¾›ç‚¹çš„相关信æ¯ï¼Œä»¥åŠæ·»åŠ æä¾›ç‚¹æ‰€éœ€ä¿¡æ¯ï¼š" - -#. Tag: para -#, no-c-format -msgid "Name - We'll use Pod1 for our cloud." -msgstr "åç§° - 这里填写“Pod1â€ä¸ºä¾‹" - -#. Tag: para -#, no-c-format -msgid "" -"Gateway - We'll use 172.16.10.1 as our gateway" -msgstr "网关 - 输入 172.16.10.1 " - -#. Tag: para -#, no-c-format -msgid "Netmask - We'll use 255.255.255.0" -msgstr "ç½‘ç»œæŽ©ç  - 输入 255.255.255.0" - -#. Tag: para -#, no-c-format -msgid "Start/end reserved system IPs - we will use 172.16.10.10-172.16.10.20" -msgstr "IP范围 - 输入172.16.10.10-172.16.10.20为例" - -#. Tag: para -#, no-c-format -msgid "Guest gateway - We'll use 172.16.10.1" -msgstr "用户网关 - 使用 172.16.10.1 " - -#. Tag: para -#, no-c-format -msgid "Guest netmask - We'll use 255.255.255.0" -msgstr "ç”¨æˆ·æŽ©ç  - 输入 255.255.255.0" - -#. Tag: para -#, no-c-format -msgid "" -"Guest start/end IP - We'll use " -"172.16.10.30-172.16.10.200" -msgstr "IP范围 - 这里使用 172.16.10.30-172.16.10.200" - -#. Tag: title -#, no-c-format -msgid "Cluster" -msgstr "集群" - -#. Tag: para -#, no-c-format -msgid "" -"Now that we've added a Zone, we need only add a few more items for " -"configuring the cluster." -msgstr "添加区域和æä¾›ç‚¹ä¹‹åŽï¼Œä»…需æä¾›ä»¥ä¸‹ä¿¡æ¯ä»¥é…置集群:" - -#. Tag: para -#, no-c-format -msgid "Name - We'll use Cluster1" -msgstr "åç§° - 这里使用 Cluster1 为例" - -#. Tag: para -#, no-c-format -msgid "Hypervisor - Choose KVM" -msgstr "Hypervisor - 选择KVM" - -#. Tag: para -#, no-c-format -msgid "" -"You should be prompted to add the first host to your cluster at this point. " -"Only a few bits of information are needed." -msgstr "此时å‘导会æç¤ºæ‚¨ä¸ºé›†ç¾¤æ·»åŠ ç¬¬ä¸€å°ä¸»æœºï¼Œéœ€æä¾›å¦‚下信æ¯ï¼š" - -#. Tag: para -#, no-c-format -msgid "" -"Hostname - we'll use the IP address 172.16.10.2 " -"since we didn't set up a DNS server." -msgstr "主机åç§° - 由于没有é…置内部DNSæœåŠ¡ï¼Œè¿™é‡Œä½¿ç”¨IP地å€172.16.10.2" - -#. Tag: para -#, no-c-format -msgid "Username - we'll use 'root'" -msgstr "用户å - 输入 root" - -#. Tag: para -#, no-c-format -msgid "Password - enter the operating system password for the root user" -msgstr "å¯†ç  - 输入æ“作系统中root用户的密ç " - -#. Tag: title -#, no-c-format -msgid "Primary Storage" -msgstr "主存储" - -#. Tag: para -#, no-c-format -msgid "" -"With you cluster now setup - you should be prompted for primary storage " -"information. Choose NFS as the storage type and then enter the following " -"values in the fields:" -msgstr "集群é…置过程中需æä¾›ä¸»å­˜å‚¨ä¿¡æ¯ï¼Œå­˜å‚¨ç±»åž‹é€‰æ‹©NFS,并æä¾›ä»¥ä¸‹ä¿¡æ¯ï¼š" - -#. Tag: para -#, no-c-format -msgid "Name - We'll use 'Primary1'" -msgstr "åç§° - 输入‘Primary1'" - -#. Tag: para -#, no-c-format -msgid "" -"Server - We'll be using the IP address " -"172.16.10.2" -msgstr "åç§° - 这里输入IP地å€172.16.10.2" - -#. Tag: para -#, no-c-format -msgid "Path - Well define /primary as the path we are using" -msgstr "路径 - 输入/primary" - -#. Tag: title -#, no-c-format -msgid "Secondary Storage" -msgstr "次è¦å­˜å‚¨" - -#. Tag: para -#, no-c-format -msgid "" -"If this is a new zone, you'll be prompted for secondary storage information " -"- populate it as follows:" -msgstr "如果添加的区域是一个新的区域,您需æä¾›æ¬¡è¦å­˜å‚¨ç›¸å…³ä¿¡æ¯ï¼š" - -#. Tag: para -#, no-c-format -msgid "" -"NFS server - We'll use the IP address 172.16.10.2" -msgstr "NFSæœåС噍 - 输入IPåœ°å€ 172.16.10.2" - -#. Tag: para -#, no-c-format -msgid "Path - We'll use /secondary" -msgstr "路径 - 输入 /secondary " - -#. Tag: para -#, no-c-format -msgid "" -"Now, click Launch and your cloud should begin setup - it may take several " -"minutes depending on your internet connection speed for setup to finalize." -msgstr "现在,点击“å¯åЍâ€ç„¶åŽæ‚¨çš„云平å°å°†å¼€å§‹é…置,ä¾èµ–于您实际的网络速度,é…置过程å¯èƒ½è€—时几分钟" diff --git a/docs/runbook/zh-CN/kvm.po b/docs/runbook/zh-CN/kvm.po deleted file mode 100644 index 906f82eea55..00000000000 --- a/docs/runbook/zh-CN/kvm.po +++ /dev/null @@ -1,145 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -msgid "" -msgstr "" -"Project-Id-Version: Apache CloudStack Runbook\n" -"Report-Msgid-Bugs-To: http://bugs.cloudstack.org/\n" -"POT-Creation-Date: 2012-07-10T15:13:22\n" -"PO-Revision-Date: 2012-08-03 09:24+0000\n" -"Last-Translator: micexia \n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Language: zh_CN\n" -"Plural-Forms: nplurals=1; plural=0\n" - -#. Tag: title -#, no-c-format -msgid "KVM Setup and installation" -msgstr "KVMé…置和安装" - -#. Tag: para -#, no-c-format -msgid "" -"KVM is the hypervisor we'll be using - we will recover the initial setup " -"which has already been done on the hypervisor host and cover installation of" -" the agent software, you can use the same steps to add additional KVM nodes " -"to your CloudStack environment." -msgstr "本文档使用KVM作为hypervisor,下文将回顾最如何é…ç½®hypervisor主机,其中大部分é…置工作已在é…置管ç†èŠ‚ç‚¹æ—¶å®Œæˆï¼›æŽ¥ä¸‹æ¥æè¿°å¦‚何安装agent。您å¯ä»¥åº”用相åŒçš„æ­¥éª¤æ·»åŠ é¢å¤–çš„KVM节点到CloudStack环境中。" - -#. Tag: title -#, no-c-format -msgid "Prerequisites" -msgstr "先决æ¡ä»¶" - -#. Tag: para -#, no-c-format -msgid "" -"We explicitly are using the management server as a compute node as well, " -"which means that we have already performed many of the prerequisite steps " -"when setting up the management server, but we will list them here for " -"clarity. Those steps are:" -msgstr "本文档æè¿°çš„çŽ¯å¢ƒä½¿ç”¨ç®¡ç†æœåŠ¡å™¨åŒæ—¶ä½œä¸ºè®¡ç®—节点,这æ„味ç€å¾ˆå¤šå…ˆå†³æ­¥éª¤å·²ç»åœ¨æ­å»ºç®¡ç†æœåŠ¡å™¨æ—¶å®Œæˆï¼›ä½†ä¸ºäº†æ¸…æ™°èµ·è§ï¼Œä»ç„¶åˆ—出相关步骤:" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -msgstr "" - -#. Tag: para -#, no-c-format -msgid "" -"You shouldn't need to do that for the management server, of course, but any " -"additional hosts will need for you to complete the above steps." -msgstr "您ä¸éœ€è¦åœ¨ç®¡ç†æœåŠ¡å™¨ä¸Šæ‰§è¡Œè¿™äº›æ­¥éª¤ï¼Œå½“ç„¶ï¼Œå¦‚æžœæ‚¨éœ€è¦æ·»åŠ é¢å¤–的主机以上步骤ä»ç„¶éœ€è¦æ‰§è¡Œã€‚" - -#. Tag: title -#, no-c-format -msgid "Installation" -msgstr "安装" - -#. Tag: para -#, no-c-format -msgid "" -"You'll need to ensure that you are in the directory that was created when we" -" extracted the the tarball. " -msgstr "确认您已进入解压安装包åŽäº§ç”Ÿçš„目录。" - -#. Tag: para -#, no-c-format -msgid "" -"You'll be running ./install.sh again and this time " -"choosing which will install the software necessary for " -"managing a KVM node." -msgstr "冿¬¡è¿è¡Œ./install.sh,这一次选择安装选项,这会安装管ç†KVM所需的相关软件包。" - -#. Tag: title -#, no-c-format -msgid "KVM Configuration" -msgstr "KVMé…ç½®" - -#. Tag: para -#, no-c-format -msgid "" -"KVM configuration is relatively simple at only a single item. We need to " -"edit the QEMU VNC configuration. This is done by editing " -"/etc/libvirt/qemu.conf and ensuring the following line " -"is present and uncommented." -msgstr "KVMçš„é…置相对简å•,仅需一项é…置;编辑QEMU VNCé…置文件/etc/libvirt/qemu.conf,并确ä¿ä»¥ä¸‹å†…容存在并且没有被注释掉。" - -#. Tag: screen -#, no-c-format -msgid "vnc_listen=0.0.0.0" -msgstr "vnc_listen=0.0.0.0" - -#. Tag: para -#, no-c-format -msgid "" -"You can now just restart the libvirt daemon by issuing the following " -"command:" -msgstr "此时您å¯ä»¥é‡å¯libvirtæœåŠ¡ï¼Œé€šè¿‡ä»¥ä¸‹å‘½ä»¤ï¼š" - -#. Tag: screen -#, no-c-format -msgid "" -"# service libvirt " -"restart" -msgstr "# service libvirt restart" - -#. Tag: para -#, no-c-format -msgid "" -"That concludes our installation and configuration of KVM, and we'll now move" -" to using the CloudStack UI for the actual configuration of our cloud." -msgstr "以上内容是KVM的安装和é…置,下é¢å°†ä»‹ç»å¦‚何使用CloudStack用户界é¢é…置云平å°ã€‚" diff --git a/docs/settx.sh b/docs/settx.sh deleted file mode 100755 index 55063250cf9..00000000000 --- a/docs/settx.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -#Licensed to the Apache Software Foundation (ASF) under one -#or more contributor license agreements. See the NOTICE file -#distributed with this work for additional information -#regarding copyright ownership. The ASF licenses this file -#to you under the Apache License, Version 2.0 (the -#"License"); you may not use this file except in compliance -#with the License. You may obtain a copy of the License at -#http://www.apache.org/licenses/LICENSE-2.0 -#Unless required by applicable law or agreed to in writing, -#software distributed under the License is distributed on an -#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -#KIND, either express or implied. See the License for the -#specific language governing permissions and limitations -#under the License. - -for file in `ls pot | grep .pot`; do - resource=`basename $file .pot` - echo $resource - tx set -t PO --auto-local -r ACS_DOCS.$resource "/$resource.po" \ - --source-lang=en \ - --source-file "pot/$resource.pot" --execute -done diff --git a/engine/api/resources/META-INF/cloudstack/core/spring-engine-api-core-context.xml b/engine/api/resources/META-INF/cloudstack/core/spring-engine-api-core-context.xml new file mode 100644 index 00000000000..1cb8829839d --- /dev/null +++ b/engine/api/resources/META-INF/cloudstack/core/spring-engine-api-core-context.xml @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + + diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java index 6deb6c1afc0..85fbf73cb3b 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataMotionStrategy.java @@ -26,9 +26,9 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.host.Host; public interface DataMotionStrategy { - boolean canHandle(DataObject srcData, DataObject destData); + StrategyPriority canHandle(DataObject srcData, DataObject destData); - boolean canHandle(Map volumeMap, Host srcHost, Host destHost); + StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost); Void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback); diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java index 1e893db6bb5..c881570ba12 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreLifeCycle.java @@ -37,4 +37,6 @@ public interface DataStoreLifeCycle { boolean cancelMaintain(DataStore store); boolean deleteDataStore(DataStore store); + + boolean migrateToObjectStore(DataStore store); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java index 08844536264..7fbec0ad35f 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java @@ -19,6 +19,7 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import java.util.List; + import com.cloud.storage.DataStoreRole; public interface DataStoreManager { @@ -37,4 +38,6 @@ public interface DataStoreManager { DataStore getImageCacheStore(long zoneId); List listImageStores(); + + List listImageCacheStores(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java index ca0cc2c970a..b812f6efd99 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java @@ -28,4 +28,6 @@ public interface EndPointSelector { EndPoint select(DataStore store); List selectAll(DataStore store); + + EndPoint select(Scope scope, Long storeId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotDataFactory.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotDataFactory.java index 0b8d1f104e1..d5255f40407 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotDataFactory.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotDataFactory.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; + import com.cloud.storage.DataStoreRole; public interface SnapshotDataFactory { @@ -26,4 +28,6 @@ public interface SnapshotDataFactory { SnapshotInfo getSnapshot(DataObject obj, DataStore store); SnapshotInfo getSnapshot(long snapshotId, DataStoreRole role); + + List listSnapshotOnCache(long snapshotId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java index 8d6b76010fe..a0ef7dd1273 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java @@ -32,4 +32,6 @@ public interface SnapshotInfo extends DataObject, Snapshot { Long getDataCenterId(); ObjectInDataStoreStateMachine.State getStatus(); + + boolean isRevertable(); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java index d594a0728cb..e953eb6e21b 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotService.java @@ -24,5 +24,5 @@ public interface SnapshotService { boolean deleteSnapshot(SnapshotInfo snapshot); - boolean revertSnapshot(SnapshotInfo snapshot); + boolean revertSnapshot(Long snapshotId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java index 86ae532e2dc..cf30f5912ea 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotStrategy.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.engine.subsystem.api.storage; @@ -19,11 +19,20 @@ package org.apache.cloudstack.engine.subsystem.api.storage; import com.cloud.storage.Snapshot; public interface SnapshotStrategy { + enum SnapshotOperation { + TAKE, + BACKUP, + DELETE, + REVERT + } + SnapshotInfo takeSnapshot(SnapshotInfo snapshot); SnapshotInfo backupSnapshot(SnapshotInfo snapshot); boolean deleteSnapshot(Long snapshotId); - boolean canHandle(Snapshot snapshot); + boolean revertSnapshot(Long snapshotId); + + StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java new file mode 100644 index 00000000000..91bcc1fc12f --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StorageStrategyFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; + +import com.cloud.host.Host; +import com.cloud.storage.Snapshot; +import com.cloud.vm.snapshot.VMSnapshot; + +public interface StorageStrategyFactory { + + DataMotionStrategy getDataMotionStrategy(DataObject srcData, DataObject destData); + + DataMotionStrategy getDataMotionStrategy(Map volumeMap, Host srcHost, Host destHost); + + SnapshotStrategy getSnapshotStrategy(Snapshot snapshot, SnapshotOperation op); + + VMSnapshotStrategy getVmSnapshotStrategy(VMSnapshot vmSnapshot); + +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriority.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriority.java new file mode 100644 index 00000000000..12f2a6a4970 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriority.java @@ -0,0 +1,25 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage; + +public enum StrategyPriority { + CANT_HANDLE, + DEFAULT, + HYPERVISOR, + PLUGIN, + HIGHEST +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java index 0b78da058ed..801c4427f50 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateDataFactory.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; + import com.cloud.storage.DataStoreRole; public interface TemplateDataFactory { @@ -28,4 +30,8 @@ public interface TemplateDataFactory { TemplateInfo getTemplate(long templateId, DataStoreRole storeRole); TemplateInfo getTemplate(long templateId, DataStoreRole storeRole, Long zoneId); + + TemplateInfo getReadyTemplateOnCache(long templateId); + + List listTemplateOnCache(long templateId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java index 4950597963d..185d2545eb7 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/TemplateService.java @@ -36,7 +36,7 @@ public interface TemplateService { } public TemplateInfo getTemplate() { - return this.template; + return template; } } @@ -54,6 +54,8 @@ public interface TemplateService { AsyncCallFuture prepareTemplateOnPrimary(TemplateInfo srcTemplate, StoragePool pool); + void syncTemplateToRegionStore(long templateId, DataStore store); + void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId); void handleTemplateSync(DataStore store); @@ -62,5 +64,7 @@ public interface TemplateService { void addSystemVMTemplatesToSecondary(DataStore store); + void associateTemplateToZone(long templateId, Long zoneId); + void associateCrosszoneTemplatesToZone(long dcId); } diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java new file mode 100644 index 00000000000..c2a0dedc430 --- /dev/null +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VMSnapshotStrategy.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.engine.subsystem.api.storage; + +import com.cloud.vm.snapshot.VMSnapshot; + +public interface VMSnapshotStrategy { + VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot); + boolean deleteVMSnapshot(VMSnapshot vmSnapshot); + boolean revertVMSnapshot(VMSnapshot vmSnapshot); + StrategyPriority canHandle(VMSnapshot vmSnapshot); +} diff --git a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeDataFactory.java b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeDataFactory.java index 99e3b596071..3de0b5b4c73 100644 --- a/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeDataFactory.java +++ b/engine/api/src/org/apache/cloudstack/engine/subsystem/api/storage/VolumeDataFactory.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; + import com.cloud.storage.DataStoreRole; public interface VolumeDataFactory { @@ -28,4 +30,6 @@ public interface VolumeDataFactory { VolumeInfo getVolume(long volumeId, DataStoreRole storeRole); VolumeInfo getVolume(long volumeId); + + List listVolumeOnCache(long volumeId); } diff --git a/engine/components-api/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml b/engine/components-api/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml new file mode 100644 index 00000000000..fd7f96752f9 --- /dev/null +++ b/engine/components-api/resources/META-INF/cloudstack/core/spring-engine-components-api-core-context.xml @@ -0,0 +1,30 @@ + + + + diff --git a/engine/components-api/src/com/cloud/configuration/ConfigurationManager.java b/engine/components-api/src/com/cloud/configuration/ConfigurationManager.java index 5e1b9b58902..03a549f1a35 100755 --- a/engine/components-api/src/com/cloud/configuration/ConfigurationManager.java +++ b/engine/components-api/src/com/cloud/configuration/ConfigurationManager.java @@ -210,7 +210,7 @@ public interface ConfigurationManager { NetworkOfferingVO createNetworkOffering(String name, String displayText, TrafficType trafficType, String tags, boolean specifyVlan, Availability availability, Integer networkRate, Map> serviceProviderMap, boolean isDefault, Network.GuestType type, boolean systemOnly, Long serviceOfferingId, boolean conserveMode, Map> serviceCapabilityMap, - boolean specifyIpRanges, boolean isPersistent, Map details, boolean egressDefaultPolicy, Integer maxconn); + boolean specifyIpRanges, boolean isPersistent, Map details, boolean egressDefaultPolicy, Integer maxconn, boolean enableKeepAlive); Vlan createVlanAndPublicIpRange(long zoneId, long networkId, long physicalNetworkId, boolean forVirtualNetwork, Long podId, String startIP, String endIP, String vlanGateway, String vlanNetmask, String vlanId, Account vlanOwner, String startIPv6, String endIPv6, String vlanIp6Gateway, String vlanIp6Cidr) throws InsufficientCapacityException, ConcurrentOperationException, InvalidParameterValueException; diff --git a/engine/components-api/src/org/apache/cloudstack/context/ServerContexts.java b/engine/components-api/src/org/apache/cloudstack/context/ServerContexts.java deleted file mode 100644 index b9c249ce620..00000000000 --- a/engine/components-api/src/org/apache/cloudstack/context/ServerContexts.java +++ /dev/null @@ -1,67 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package org.apache.cloudstack.context; - -import org.apache.cloudstack.framework.jobs.AsyncJob; - -import com.cloud.utils.db.Transaction; - -/** - * ServerContextInitializer is responsible for properly setting up the - * contexts that all of the CloudStack code expects. This includes - * - CallContext - * - JobContext - * - TransactionContext - */ -public class ServerContexts { - public static void registerUserContext(long userId, long accountId) { - Transaction txn = Transaction.open(Thread.currentThread().getName()); - CallContext context = CallContext.register(userId, accountId); - context.putContextParameter("Transaction", txn); -// AsyncJobExecutionContext.registerPseudoExecutionContext(userId, accountId); - } - - public static void unregisterUserContext() { - CallContext context = CallContext.unregister(); - if (context != null) { -// AsyncJobExecutionContext.unregister(); - Transaction txn = (Transaction)context.getContextParameter("Transaction"); - txn.close(Thread.currentThread().getName()); - } - } - - /** - * Use this method to initialize the internal background threads. - */ - public static void registerSystemContext() { - Transaction txn = Transaction.open(Thread.currentThread().getName()); - CallContext context = CallContext.registerSystemCallContextOnceOnly(); - context.putContextParameter("Transaction", txn); -// AsyncJobExecutionContext.registerPseudoExecutionContext(Account.ACCOUNT_ID_SYSTEM, User.UID_SYSTEM); - } - - public static void unregisterSystemContext() { - CallContext context = CallContext.unregister(); -// AsyncJobExecutionContext.unregister(); - Transaction txn = (Transaction)context.getContextParameter("Transaction"); - txn.close(Thread.currentThread().getName()); - } - - public static void registerJobContext(long userId, long accountId, AsyncJob job) { - CallContext.register(userId, accountId); - } -} diff --git a/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml new file mode 100644 index 00000000000..b5c4254abaa --- /dev/null +++ b/engine/orchestration/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java index 67deba0d648..ff35255c7db 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/AgentAttache.java @@ -31,6 +31,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.agent.Listener; @@ -100,6 +101,7 @@ public abstract class AgentAttache { }; protected final long _id; + protected String _name = null; protected final ConcurrentHashMap _waitForList; protected final LinkedList _requests; protected Long _currentSequence; @@ -120,9 +122,9 @@ public abstract class AgentAttache { Arrays.sort(s_commandsNotAllowedInConnectingMode); } - - protected AgentAttache(AgentManagerImpl agentMgr, final long id, boolean maintenance) { + protected AgentAttache(AgentManagerImpl agentMgr, final long id, final String name, boolean maintenance) { _id = id; + _name = name; _waitForList = new ConcurrentHashMap(); _currentSequence = null; _maintenance = maintenance; @@ -163,7 +165,7 @@ public abstract class AgentAttache { if (_maintenance) { for (final Command cmd : cmds) { if (Arrays.binarySearch(s_commandsAllowedInMaintenanceMode, cmd.getClass().toString()) < 0) { - throw new AgentUnavailableException("Unable to send " + cmd.getClass().toString() + " because agent is in maintenance mode", _id); + throw new AgentUnavailableException("Unable to send " + cmd.getClass().toString() + " because agent " + _name + " is in maintenance mode", _id); } } } @@ -171,7 +173,7 @@ public abstract class AgentAttache { if (_status == Status.Connecting) { for (final Command cmd : cmds) { if (Arrays.binarySearch(s_commandsNotAllowedInConnectingMode, cmd.getClass().toString()) >= 0) { - throw new AgentUnavailableException("Unable to send " + cmd.getClass().toString() + " because agent is in connecting mode", _id); + throw new AgentUnavailableException("Unable to send " + cmd.getClass().toString() + " because agent " + _name + " is in connecting mode", _id); } } } @@ -241,6 +243,10 @@ public abstract class AgentAttache { return _id; } + public String getName() { + return _name; + } + public int getQueueSize() { return _requests.size(); } @@ -349,7 +355,7 @@ public abstract class AgentAttache { synchronized(this) { try { if (isClosed()) { - throw new AgentUnavailableException("The link to the agent has been closed", _id); + throw new AgentUnavailableException("The link to the agent " + _name + " has been closed", _id); } if (req.executeInSequence() && _currentSequence != null) { @@ -497,14 +503,14 @@ public abstract class AgentAttache { */ protected abstract boolean isClosed(); - protected class Alarm implements Runnable { + protected class Alarm extends ManagedContextRunnable { long _seq; public Alarm(long seq) { _seq = seq; } @Override - public void run() { + protected void runInContext() { try { Listener listener = unregisterListener(_seq); if (listener != null) { diff --git a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java index 6c88f6e1279..3e684cc9fd4 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/AgentManagerImpl.java @@ -38,13 +38,12 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -97,9 +96,9 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.HypervisorVersionChangedException; @@ -357,7 +356,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @DB protected boolean noDbTxn() { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); return !txn.dbTxnStarted(); } @@ -386,7 +385,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl throw new AgentUnavailableException("agent not logged into this management server", hostId); } - Request req = new Request(hostId, _nodeId, cmds, commands.stopOnError(), true); + Request req = new Request(hostId, agent.getName(), _nodeId, cmds, commands.stopOnError(), true); req.setSequence(agent.getNextSequence()); Answer[] answers = agent.send(req, timeout); notifyAnswersToMonitors(hostId, req.getSequence(), answers); @@ -439,7 +438,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl if (cmds.length == 0) { throw new AgentUnavailableException("Empty command set for agent " + agent.getId(), agent.getId()); } - Request req = new Request(hostId, _nodeId, cmds, commands.stopOnError(), true); + Request req = new Request(hostId, agent.getName(), _nodeId, cmds, commands.stopOnError(), true); req.setSequence(agent.getNextSequence()); agent.send(req, listener); @@ -681,7 +680,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl // } s_logger.debug("create DirectAgentAttache for " + host.getId()); - DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), resource, host.isInMaintenanceStates(), this); + DirectAgentAttache attache = new DirectAgentAttache(this, host.getId(), host.getName(), resource, host.isInMaintenanceStates(), this); AgentAttache old = null; synchronized (_agents) { @@ -844,7 +843,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return true; } - protected class DisconnectTask implements Runnable { + protected class DisconnectTask extends ManagedContextRunnable { AgentAttache _attache; Status.Event _event; boolean _investigate; @@ -856,7 +855,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } @Override - public void run() { + protected void runInContext() { try { if (_investigate == true) { handleDisconnectWithInvestigation(_attache, _event); @@ -971,7 +970,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl protected AgentAttache createAttacheForConnect(HostVO host, Link link) throws ConnectionException { s_logger.debug("create ConnectedAgentAttache for " + host.getId()); - AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), link, host.isInMaintenanceStates()); + AgentAttache attache = new ConnectedAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; @@ -1018,7 +1017,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl return attache; } - protected class SimulateStartTask implements Runnable { + protected class SimulateStartTask extends ManagedContextRunnable { ServerResource resource; Map details; long id; @@ -1030,8 +1029,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { try { if (s_logger.isDebugEnabled()) { s_logger.debug("Simulating start for resource " + resource.getName() + " id " + id); @@ -1055,13 +1053,11 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } catch (Exception e) { s_logger.warn("Unable to simulate start on resource " + id + " name " + resource.getName(), e); - } finally { - ServerContexts.unregisterSystemContext(); } } } - protected class HandleAgentConnectTask implements Runnable { + protected class HandleAgentConnectTask extends ManagedContextRunnable { Link _link; Command[] _cmds; Request _request; @@ -1072,22 +1068,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _request = request; } - @Override - public void run() { - ServerContexts.registerSystemContext(); - try { - _request.logD("Processing the first command "); - StartupCommand[] startups = new StartupCommand[_cmds.length]; - for (int i = 0; i < _cmds.length; i++) { - startups[i] = (StartupCommand)_cmds[i]; - } + protected void runInContext() { + _request.logD("Processing the first command "); + StartupCommand[] startups = new StartupCommand[_cmds.length]; + for (int i = 0; i < _cmds.length; i++) { + startups[i] = (StartupCommand)_cmds[i]; + } - AgentAttache attache = handleConnectedAgent(_link, startups, _request); - if (attache == null) { - s_logger.warn("Unable to create attache for agent: " + _request); - } - } finally { - ServerContexts.unregisterSystemContext(); + AgentAttache attache = handleConnectedAgent(_link, startups, _request); + if (attache == null) { + s_logger.warn("Unable to create attache for agent: " + _request); } } } @@ -1263,7 +1253,7 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl @Override protected void doTask(final Task task) throws Exception { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { final Type type = task.getType(); if (type == Task.Type.DATA) { @@ -1440,16 +1430,16 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl _pingMap.put(agentId, InaccurateClock.getTimeInSeconds()); } - protected class MonitorTask implements Runnable { + protected class MonitorTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { s_logger.trace("Agent Monitor is started."); try { List behindAgents = findAgentsBehindOnPing(); for (Long agentId : behindAgents) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getId(), Op.EQ, agentId); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getId(), Op.EQ, agentId); HostVO h = sc.find(); if (h != null) { ResourceState resourceState = h.getResourceState(); @@ -1470,8 +1460,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl } } - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getResourceState(), Op.IN, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getResourceState(), Op.IN, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); List hosts = sc.list(); for (HostVO host : hosts) { diff --git a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentAttache.java index 058a90475fd..5bcde6be712 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentAttache.java @@ -50,14 +50,14 @@ public class ClusteredAgentAttache extends ConnectedAgentAttache implements Rout s_clusteredAgentMgr = agentMgr; } - public ClusteredAgentAttache(AgentManagerImpl agentMgr, long id) { - super(agentMgr, id, null, false); + public ClusteredAgentAttache(AgentManagerImpl agentMgr, long id, String name) { + super(agentMgr, id, name, null, false); _forward = true; _transferRequests = new LinkedList(); } - public ClusteredAgentAttache(AgentManagerImpl agentMgr, long id, Link link, boolean maintenance) { - super(agentMgr, id, link, maintenance); + public ClusteredAgentAttache(AgentManagerImpl agentMgr, long id, String name, Link link, boolean maintenance) { + super(agentMgr, id, name, link, maintenance); _forward = link == null; _transferRequests = new LinkedList(); } diff --git a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index 61066ec7dbc..8681263347e 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -32,7 +32,6 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.Timer; -import java.util.TimerTask; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.ScheduledExecutorService; @@ -44,14 +43,13 @@ import javax.naming.ConfigurationException; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; -import org.apache.log4j.Logger; - -import com.google.gson.Gson; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -85,45 +83,42 @@ import com.cloud.host.Status.Event; import com.cloud.resource.ServerResource; import com.cloud.serializer.GsonHelper; import com.cloud.utils.DateUtil; -import com.cloud.utils.Profiler; import com.cloud.utils.concurrency.NamedThreadFactory; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.nio.Link; import com.cloud.utils.nio.Task; +import com.google.gson.Gson; @Local(value = { AgentManager.class, ClusteredAgentRebalanceService.class }) public class ClusteredAgentManagerImpl extends AgentManagerImpl implements ClusterManagerListener, ClusteredAgentRebalanceService { final static Logger s_logger = Logger.getLogger(ClusteredAgentManagerImpl.class); - private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("Cluster-AgentTransferExecutor")); + private static final ScheduledExecutorService s_transferExecutor = Executors.newScheduledThreadPool(2, new NamedThreadFactory("Cluster-AgentRebalancingExecutor")); private final long rebalanceTimeOut = 300000; // 5 mins - after this time remove the agent from the transfer list public final static long STARTUP_DELAY = 5000; public final static long SCAN_INTERVAL = 90000; // 90 seconds, it takes 60 sec for xenserver to fail login public final static int ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION = 5; // 5 seconds protected Set _agentToTransferIds = new HashSet(); - Gson _gson; - - @Inject - protected ClusterManager _clusterMgr = null; - protected HashMap _peers; protected HashMap _sslEngines; private final Timer _timer = new Timer("ClusteredAgentManager Timer"); - + private final Timer _agentLbTimer = new Timer("ClusteredAgentManager AgentRebalancing Timer"); + boolean _agentLbHappened = false; + + @Inject + protected ClusterManager _clusterMgr = null; @Inject protected ManagementServerHostDao _mshostDao; @Inject protected HostTransferMapDao _hostTransferDao; - - // @com.cloud.utils.component.Inject(adapter = AgentLoadBalancerPlanner.class) - @Inject protected List _lbPlanners; - - @Inject ConfigurationDao _configDao; + @Inject + protected List _lbPlanners; + @Inject + ConfigurationDao _configDao; @Inject ConfigDepot _configDepot; @@ -168,9 +163,10 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust if (s_logger.isDebugEnabled()) { s_logger.debug("Scheduled direct agent scan task to run at an interval of " + ScanInterval.value() + " seconds"); } - - // schedule transfer scan executor - if agent LB is enabled + + // Schedule tasks for agent rebalancing if (isAgentRebalanceEnabled()) { + s_transferExecutor.scheduleAtFixedRate(getAgentRebalanceScanTask(), 60000, 60000, TimeUnit.MILLISECONDS); s_transferExecutor.scheduleAtFixedRate(getTransferScanTask(), 60000, ClusteredAgentRebalanceService.DEFAULT_TRANSFER_CHECK_INTERVAL, TimeUnit.MILLISECONDS); } @@ -232,9 +228,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } - private class DirectAgentScanTimerTask extends TimerTask { + private class DirectAgentScanTimerTask extends ManagedContextTimerTask { @Override - public void run() { + protected void runInContext() { try { runDirectAgentScanTimerTask(); } catch (Throwable e) { @@ -250,7 +246,8 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust protected AgentAttache createAttache(long id) { s_logger.debug("create forwarding ClusteredAgentAttache for " + id); - final AgentAttache attache = new ClusteredAgentAttache(this, id); + HostVO host = _hostDao.findById(id); + final AgentAttache attache = new ClusteredAgentAttache(this, id, host.getName()); AgentAttache old = null; synchronized (_agents) { old = _agents.get(id); @@ -265,7 +262,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected AgentAttache createAttacheForConnect(HostVO host, Link link) { s_logger.debug("create ClusteredAgentAttache for " + host.getId()); - final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), link, host.isInMaintenanceStates()); + final AgentAttache attache = new ClusteredAgentAttache(this, host.getId(), host.getName(), link, host.isInMaintenanceStates()); link.attach(attache); AgentAttache old = null; synchronized (_agents) { @@ -284,7 +281,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust // return new DummyAttache(this, host.getId(), false); // } s_logger.debug("create ClusteredDirectAgentAttache for " + host.getId()); - final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), _nodeId, resource, host.isInMaintenanceStates(), this); + final DirectAgentAttache attache = new ClusteredDirectAgentAttache(this, host.getId(), host.getName(), _nodeId, resource, host.isInMaintenanceStates(), this); AgentAttache old = null; synchronized (_agents) { old = _agents.get(host.getId()); @@ -571,6 +568,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } } _timer.cancel(); + _agentLbTimer.cancel(); //cancel all transfer tasks s_transferExecutor.shutdownNow(); @@ -593,7 +591,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust @Override protected void doTask(final Task task) throws Exception { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { if (task.getType() != Task.Type.DATA) { super.doTask(task); @@ -747,7 +745,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust _timer.schedule(new AgentLoadBalancerTask(), 30000); } - public class AgentLoadBalancerTask extends TimerTask { + public class AgentLoadBalancerTask extends ManagedContextTimerTask { protected volatile boolean cancelled = false; public AgentLoadBalancerTask() { @@ -765,7 +763,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } @Override - public synchronized void run() { + protected synchronized void runInContext() { try { if (!cancelled) { startRebalanceAgents(); @@ -783,9 +781,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust public void startRebalanceAgents() { s_logger.debug("Management server " + _nodeId + " is asking other peers to rebalance their agents"); List allMS = _mshostDao.listBy(ManagementServerHost.State.Up); - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getManagementServerId(), Op.NNULL); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List allManagedAgents = sc.list(); int avLoad = 0; @@ -926,9 +924,9 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } private Runnable getTransferScanTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { try { if (s_logger.isTraceEnabled()) { s_logger.trace("Clustered agent transfer scan check, management server id:" + _nodeId); @@ -1174,7 +1172,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } - protected class RebalanceTask implements Runnable { + protected class RebalanceTask extends ManagedContextRunnable { Long hostId = null; Long currentOwnerId = null; Long futureOwnerId = null; @@ -1187,7 +1185,7 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } @Override - public void run() { + protected void runInContext() { try { if (s_logger.isDebugEnabled()) { s_logger.debug("Rebalancing host id=" + hostId); @@ -1354,44 +1352,52 @@ public class ClusteredAgentManagerImpl extends AgentManagerImpl implements Clust } public boolean rebalanceAgent(long agentId, Event event, long currentOwnerId, long futureOwnerId) throws AgentUnavailableException, OperationTimedoutException { - return _rebalanceService.executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event); + return executeRebalanceRequest(agentId, currentOwnerId, futureOwnerId, event); } public boolean isAgentRebalanceEnabled() { return EnableLB.value(); } - - private ClusteredAgentRebalanceService _rebalanceService; - - boolean _agentLbHappened = false; - public void agentrebalance() { - Profiler profilerAgentLB = new Profiler(); - profilerAgentLB.start(); - //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold - if (EnableLB.value() && !_agentLbHappened) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getManagementServerId(), Op.NNULL); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); - List allManagedRoutingAgents = sc.list(); - - sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); - List allAgents = sc.list(); - double allHostsCount = allAgents.size(); - double managedHostsCount = allManagedRoutingAgents.size(); - if (allHostsCount > 0.0) { - double load = managedHostsCount / allHostsCount; - if (load >= ConnectedAgentThreshold.value()) { - s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); - _rebalanceService.scheduleRebalanceAgents(); - _agentLbHappened = true; - } else { - s_logger.trace("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value()); + + + private Runnable getAgentRebalanceScanTask() { + return new ManagedContextRunnable() { + @Override + protected void runInContext() { + try { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Agent rebalance task check, management server id:" + _nodeId); } + //initiate agent lb task will be scheduled and executed only once, and only when number of agents loaded exceeds _connectedAgentsThreshold + if (!_agentLbHappened) { + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getManagementServerId(), Op.NNULL); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); + List allManagedRoutingAgents = sc.list(); + + sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); + List allAgents = sc.list(); + double allHostsCount = allAgents.size(); + double managedHostsCount = allManagedRoutingAgents.size(); + if (allHostsCount > 0.0) { + double load = managedHostsCount / allHostsCount; + if (load >= ConnectedAgentThreshold.value()) { + s_logger.debug("Scheduling agent rebalancing task as the average agent load " + load + " is more than the threshold " + ConnectedAgentThreshold.value()); + scheduleRebalanceAgents(); + _agentLbHappened = true; + } else { + s_logger.debug("Not scheduling agent rebalancing task as the averages load " + load + " is less than the threshold " + ConnectedAgentThreshold.value()); + } + } + } + } catch (Throwable e) { + s_logger.error("Problem with the clustered agent transfer scan check!", e); } } - profilerAgentLB.stop(); - } + }; +} + @Override public void rescan() { diff --git a/engine/orchestration/src/com/cloud/agent/manager/ClusteredDirectAgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/ClusteredDirectAgentAttache.java index 9012433b4d7..692f6ad67de 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/ClusteredDirectAgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ClusteredDirectAgentAttache.java @@ -28,8 +28,8 @@ public class ClusteredDirectAgentAttache extends DirectAgentAttache implements R private final ClusteredAgentManagerImpl _mgr; private final long _nodeId; - public ClusteredDirectAgentAttache(AgentManagerImpl agentMgr, long id, long mgmtId, ServerResource resource, boolean maintenance, ClusteredAgentManagerImpl mgr) { - super(agentMgr, id, resource, maintenance, mgr); + public ClusteredDirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, long mgmtId, ServerResource resource, boolean maintenance, ClusteredAgentManagerImpl mgr) { + super(agentMgr, id, name, resource, maintenance, mgr); _mgr = mgr; _nodeId = mgmtId; } diff --git a/engine/orchestration/src/com/cloud/agent/manager/ConnectedAgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/ConnectedAgentAttache.java index e5d2867b96d..8ee47d55c26 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/ConnectedAgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/ConnectedAgentAttache.java @@ -33,8 +33,8 @@ public class ConnectedAgentAttache extends AgentAttache { protected Link _link; - public ConnectedAgentAttache(AgentManagerImpl agentMgr, final long id, final Link link, boolean maintenance) { - super(agentMgr, id, maintenance); + public ConnectedAgentAttache(AgentManagerImpl agentMgr, final long id, final String name, final Link link, boolean maintenance) { + super(agentMgr, id, name, maintenance); _link = link; } @@ -83,7 +83,7 @@ public class ConnectedAgentAttache extends AgentAttache { assert _link == null : "Duh...Says you....Forgot to call disconnect()!"; synchronized (this) { if (_link != null) { - s_logger.warn("Lost attache " + _id); + s_logger.warn("Lost attache " + _id + "(" + _name + ")"); disconnect(Status.Alert); } } diff --git a/engine/orchestration/src/com/cloud/agent/manager/DirectAgentAttache.java b/engine/orchestration/src/com/cloud/agent/manager/DirectAgentAttache.java index 5b5d8d21289..7d3f7659639 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/DirectAgentAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/DirectAgentAttache.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; @@ -43,8 +44,8 @@ public class DirectAgentAttache extends AgentAttache { AgentManagerImpl _mgr; long _seq = 0; - public DirectAgentAttache(AgentManagerImpl agentMgr, long id, ServerResource resource, boolean maintenance, AgentManagerImpl mgr) { - super(agentMgr, id, maintenance); + public DirectAgentAttache(AgentManagerImpl agentMgr, long id, String name, ServerResource resource, boolean maintenance, AgentManagerImpl mgr) { + super(agentMgr, id, name, maintenance); _resource = resource; _mgr = mgr; } @@ -52,7 +53,7 @@ public class DirectAgentAttache extends AgentAttache { @Override public void disconnect(Status state) { if (s_logger.isDebugEnabled()) { - s_logger.debug("Processing disconnect " + _id); + s_logger.debug("Processing disconnect " + _id + "(" + _name + ")"); } for (ScheduledFuture future : _futures) { @@ -118,7 +119,7 @@ public class DirectAgentAttache extends AgentAttache { assert _resource == null : "Come on now....If you're going to dabble in agent code, you better know how to close out our resources. Ever considered why there's a method called disconnect()?"; synchronized (this) { if (_resource != null) { - s_logger.warn("Lost attache for " + _id); + s_logger.warn("Lost attache for " + _id + "(" + _name + ")"); disconnect(Status.Alert); } } @@ -127,21 +128,21 @@ public class DirectAgentAttache extends AgentAttache { } } - protected class PingTask implements Runnable { + protected class PingTask extends ManagedContextRunnable { @Override - public synchronized void run() { + protected synchronized void runInContext() { try { ServerResource resource = _resource; if (resource != null) { PingCommand cmd = resource.getCurrentStatus(_id); if (cmd == null) { - s_logger.warn("Unable to get current status on " + _id); + s_logger.warn("Unable to get current status on " + _id + "(" + _name + ")"); _mgr.disconnectWithInvestigation(DirectAgentAttache.this, Event.AgentDisconnected); return; } if (s_logger.isDebugEnabled()) { - s_logger.debug("Ping from " + _id); + s_logger.debug("Ping from " + _id + "(" + _name + ")"); } long seq = _seq++; @@ -151,7 +152,7 @@ public class DirectAgentAttache extends AgentAttache { _mgr.handleCommands(DirectAgentAttache.this, seq, new Command[]{cmd}); } else { - s_logger.debug("Unable to send ping because agent is disconnected " + _id); + s_logger.debug("Unable to send ping because agent is disconnected " + _id + "(" + _name + ")"); } } catch (Exception e) { s_logger.warn("Unable to complete the ping task", e); @@ -160,7 +161,7 @@ public class DirectAgentAttache extends AgentAttache { } - protected class Task implements Runnable { + protected class Task extends ManagedContextRunnable { Request _req; public Task(Request req) { @@ -168,7 +169,7 @@ public class DirectAgentAttache extends AgentAttache { } @Override - public void run() { + protected void runInContext() { long seq = _req.getSequence(); try { ServerResource resource = _resource; diff --git a/engine/orchestration/src/com/cloud/agent/manager/DummyAttache.java b/engine/orchestration/src/com/cloud/agent/manager/DummyAttache.java index 182c1b85d59..2c768480ffe 100755 --- a/engine/orchestration/src/com/cloud/agent/manager/DummyAttache.java +++ b/engine/orchestration/src/com/cloud/agent/manager/DummyAttache.java @@ -23,8 +23,8 @@ import com.cloud.host.Status; public class DummyAttache extends AgentAttache { - public DummyAttache(AgentManagerImpl agentMgr, long id, boolean maintenance) { - super(agentMgr, id, maintenance); + public DummyAttache(AgentManagerImpl agentMgr, long id, String name, boolean maintenance) { + super(agentMgr, id, name, maintenance); } diff --git a/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java b/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java index 535ba07cd0b..1d744b79228 100755 --- a/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java +++ b/engine/orchestration/src/com/cloud/cluster/agentlb/ClusterBasedAgentLoadBalancerPlanner.java @@ -26,7 +26,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -36,9 +35,8 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteriaService; @Component @Local(value=AgentLoadBalancerPlanner.class) @@ -49,9 +47,9 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements @Override public List getHostsToRebalance(long msId, int avLoad) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); - sc.addAnd(sc.getEntity().getManagementServerId(), Op.EQ, msId); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); + sc.and(sc.entity().getManagementServerId(), Op.EQ, msId); List allHosts = sc.list(); if (allHosts.size() <= avLoad) { @@ -59,15 +57,15 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements return null; } - sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getManagementServerId(), Op.EQ, msId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); + sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getManagementServerId(), Op.EQ, msId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); List directHosts = sc.list(); if (directHosts.isEmpty()) { s_logger.debug("No direct agents in status " + Status.Up + " exist for the management server " + msId + "; so it doesn't participate in agent rebalancing process"); return null; - } + } Map> hostToClusterMap = new HashMap>(); @@ -91,7 +89,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements int hostsLeft = directHosts.size(); List hostsToReturn = new ArrayList(); - s_logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + + s_logger.debug("Management server " + msId + " can give away " + hostsToGive + " as it currently owns " + allHosts.size() + " and the average agent load in the system is " + avLoad + "; finalyzing list of hosts to give away..."); for (Long cluster : hostToClusterMap.keySet()) { List hostsInCluster = hostToClusterMap.get(cluster); @@ -105,7 +103,7 @@ public class ClusterBasedAgentLoadBalancerPlanner extends AdapterBase implements continue; } else { break; - } + } } else { s_logger.debug("Taking all " + hostsInCluster.size() + " hosts: " + hostsInCluster + " from cluster id=" + cluster); hostsToReturn.addAll(hostsInCluster); diff --git a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java index 24f0795fcc1..b74b4c55bac 100755 --- a/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/com/cloud/vm/VirtualMachineManagerImpl.java @@ -35,8 +35,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -47,10 +45,11 @@ import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.utils.identity.ManagementServerNode; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -85,7 +84,6 @@ import com.cloud.agent.api.UnPlugNicCommand; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NicTO; import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.agent.api.to.VolumeTO; import com.cloud.agent.manager.Commands; import com.cloud.agent.manager.allocator.HostAllocator; import com.cloud.alert.AlertManager; @@ -164,6 +162,9 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; @@ -257,7 +258,6 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac this._hostAllocators = _hostAllocators; } - @Inject protected List _storagePoolAllocators; @Inject @@ -283,20 +283,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac static final ConfigKey StartRetry = new ConfigKey(Integer.class, "start.retry", "Advanced", "10", "Number of times to retry create and start commands", true); static final ConfigKey VmOpWaitInterval = new ConfigKey("Advanced", Integer.class, "vm.op.wait.interval", "120", - "Time (in seconds) to wait before checking if a previous operation has succeeded", true); + "Time (in seconds) to wait before checking if a previous operation has succeeded", true); static final ConfigKey VmOpLockStateRetry = new ConfigKey("Advanced", Integer.class, "vm.op.lock.state.retry", "5", - "Times to retry locking the state of a VM for operations, -1 means forever", true); + "Times to retry locking the state of a VM for operations, -1 means forever", true); static final ConfigKey VmOpCleanupInterval = new ConfigKey("Advanced", Long.class, "vm.op.cleanup.interval", "86400", - "Interval to run the thread that cleans up the vm operations (in seconds)", false); + "Interval to run the thread that cleans up the vm operations (in seconds)", false); static final ConfigKey VmOpCleanupWait = new ConfigKey("Advanced", Long.class, "vm.op.cleanup.wait", "3600", - "Time (in seconds) to wait before cleanuping up any vm work items", true); + "Time (in seconds) to wait before cleanuping up any vm work items", true); static final ConfigKey VmOpCancelInterval = new ConfigKey("Advanced", Long.class, "vm.op.cancel.interval", "3600", - "Time (in seconds) to wait before cancelling a operation", false); + "Time (in seconds) to wait before cancelling a operation", false); static final ConfigKey VmDestroyForcestop = new ConfigKey("Advanced", Boolean.class, "vm.destroy.forcestop", "false", - "On destroy, force-stop takes this value ", true); + "On destroy, force-stop takes this value ", true); static final ConfigKey ClusterDeltaSyncInterval = new ConfigKey("Advanced", Integer.class, "sync.interval", "60", "Cluster Delta sync interval in seconds", - false); + false); ScheduledExecutorService _executor = null; @@ -311,12 +311,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override @DB - public void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, Pair rootDiskOffering, - LinkedHashMap dataDiskOfferings, LinkedHashMap auxiliaryNetworks, DeploymentPlan plan, + public void allocate(String vmInstanceName, final VirtualMachineTemplate template, ServiceOffering serviceOffering, final Pair rootDiskOffering, + LinkedHashMap dataDiskOfferings, final LinkedHashMap auxiliaryNetworks, DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException { VMInstanceVO vm = _vmDao.findVMByInstanceName(vmInstanceName); - Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); + final Account owner = _entityMgr.findById(Account.class, vm.getAccountId()); if (s_logger.isDebugEnabled()) { s_logger.debug("Allocating entries for VM: " + vm); @@ -327,52 +327,52 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac vm.setPodId(plan.getPodId()); } assert (plan.getClusterId() == null && plan.getPoolId() == null) : "We currently don't support cluster and pool preset yet"; - vm = _vmDao.persist(vm); + final VMInstanceVO vmFinal = _vmDao.persist(vm); + final LinkedHashMap dataDiskOfferingsFinal = dataDiskOfferings == null ? + new LinkedHashMap() : dataDiskOfferings; - VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vm, template, serviceOffering, null, null); - Transaction txn = Transaction.currentTxn(); - txn.start(); + final VirtualMachineProfileImpl vmProfile = new VirtualMachineProfileImpl(vmFinal, template, serviceOffering, null, null); + + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientCapacityException { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Allocating nics for " + vmFinal); + } + + try { + _networkMgr.allocate(vmProfile, auxiliaryNetworks); + } catch (ConcurrentOperationException e) { + throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); + } + + if (s_logger.isDebugEnabled()) { + s_logger.debug("Allocating disks for " + vmFinal); + } + + if (template.getFormat() == ImageFormat.ISO) { + volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vmFinal, template, owner); + } else if (template.getFormat() == ImageFormat.BAREMETAL) { + // Do nothing + } else { + volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vmFinal.getId(), rootDiskOffering.first(), template, vmFinal, owner); + } + + for (Map.Entry offering : dataDiskOfferingsFinal.entrySet()) { + volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vmFinal.getId(), offering.getKey(), offering.getValue(), vmFinal, template, owner); + } + } + }); if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating nics for " + vm); - } - - try { - _networkMgr.allocate(vmProfile, auxiliaryNetworks); - } catch (ConcurrentOperationException e) { - throw new CloudRuntimeException("Concurrent operation while trying to allocate resources for the VM", e); - } - - if (dataDiskOfferings == null) { - dataDiskOfferings = new LinkedHashMap(0); - } - - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocating disks for " + vm); - } - - if (template.getFormat() == ImageFormat.ISO) { - volumeMgr.allocateRawVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), rootDiskOffering.second(), vm, template, owner); - } else if (template.getFormat() == ImageFormat.BAREMETAL) { - // Do nothing - } else { - volumeMgr.allocateTemplatedVolume(Type.ROOT, "ROOT-" + vm.getId(), rootDiskOffering.first(), template, vm, owner); - } - - for (Map.Entry offering : dataDiskOfferings.entrySet()) { - volumeMgr.allocateRawVolume(Type.DATADISK, "DATA-" + vm.getId(), offering.getKey(), offering.getValue(), vm, template, owner); - } - - txn.commit(); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Allocation completed for VM: " + vm); + s_logger.debug("Allocation completed for VM: " + vmFinal); } } @Override public void allocate(String vmInstanceName, VirtualMachineTemplate template, ServiceOffering serviceOffering, LinkedHashMap networks, - DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException { + DeploymentPlan plan, HypervisorType hyperType) throws InsufficientCapacityException { allocate(vmInstanceName, template, serviceOffering, new Pair(serviceOffering, null), null, networks, plan, hyperType); } @@ -436,7 +436,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineGuru guru = getVmGuru(vm); guru.finalizeExpunge(vm); //remove the overcommit detials from the uservm details - _uservmDetailsDao.deleteDetails(vm.getId()); + _uservmDetailsDao.removeDetails(vm.getId()); // send hypervisor-dependent commands before removing List finalizeExpungeCommands = hvGuru.finalizeExpunge(vm); @@ -551,37 +551,41 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } @DB - protected Ternary changeToStartState(VirtualMachineGuru vmGuru, VMInstanceVO vm, User caller, Account account) + protected Ternary changeToStartState(VirtualMachineGuru vmGuru, final VMInstanceVO vm, final User caller, final Account account) throws ConcurrentOperationException { long vmId = vm.getId(); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Starting, vm.getType(), vm.getId()); int retry = VmOpLockStateRetry.value(); while (retry-- != 0) { - Transaction txn = Transaction.currentTxn(); - Ternary result = null; - txn.start(); try { - Journal journal = new Journal.LogJournal("Creating " + vm, s_logger); - work = _workDao.persist(work); - ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); + final ItWorkVO workFinal = work; + Ternary result = + Transaction.execute(new TransactionCallbackWithException, NoTransitionException>() { + @Override + public Ternary doInTransaction(TransactionStatus status) throws NoTransitionException { + Journal journal = new Journal.LogJournal("Creating " + vm, s_logger); + ItWorkVO work = _workDao.persist(workFinal); + ReservationContextImpl context = new ReservationContextImpl(work.getId(), journal, caller, account); - if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); + if (stateTransitTo(vm, Event.StartRequested, null, work.getId())) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Successfully transitioned to start state for " + vm + " reservation id = " + work.getId()); + } + return new Ternary(vm, context, work); + } + + return new Ternary(null, null, work); } - result = new Ternary(vm, context, work); - txn.commit(); + }); + + work = result.third(); + if (result.first() != null) return result; - } } catch (NoTransitionException e) { if (s_logger.isDebugEnabled()) { s_logger.debug("Unable to transition into Starting state due to " + e.getMessage()); } - } finally { - if (result == null) { - txn.rollback(); - } } VMInstanceVO instance = _vmDao.findById(vmId); @@ -645,13 +649,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void advanceStart(String vmUuid, Map params) throws InsufficientCapacityException, ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { advanceStart(vmUuid, params, null); } @Override public void advanceStart(String vmUuid, Map params, DeploymentPlan planToDeploy) throws InsufficientCapacityException, - ConcurrentOperationException, ResourceUnavailableException { + ConcurrentOperationException, ResourceUnavailableException { CallContext cctxt = CallContext.current(); Account account = cctxt.getCallingAccount(); User caller = cctxt.getCallingUser(); @@ -680,10 +684,10 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (planToDeploy != null && planToDeploy.getDataCenterId() != 0) { if (s_logger.isDebugEnabled()) { s_logger.debug("advanceStart: DeploymentPlan is provided, using dcId:" + planToDeploy.getDataCenterId() + ", podId: " + planToDeploy.getPodId() + ", clusterId: " + - planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId()); + planToDeploy.getClusterId() + ", hostId: " + planToDeploy.getHostId() + ", poolId: " + planToDeploy.getPoolId()); } plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), - planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx); + planToDeploy.getPoolId(), planToDeploy.getPhysicalNetworkId(), ctx); } HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); @@ -741,20 +745,20 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // planner if (s_logger.isDebugEnabled()) { s_logger.debug("Cannot satisfy the deployment plan passed in since the ready Root volume is in different cluster. volume's cluster: " + - rootVolClusterId + ", cluster specified: " + clusterIdSpecified); + rootVolClusterId + ", cluster specified: " + clusterIdSpecified); } throw new ResourceUnavailableException( - "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, - Cluster.class, clusterIdSpecified); + "Root volume is ready in different cluster, Deployment plan provided cannot be satisfied, unable to create a deployment for " + vm, + Cluster.class, clusterIdSpecified); } } plan = new DataCenterDeployment(planToDeploy.getDataCenterId(), planToDeploy.getPodId(), planToDeploy.getClusterId(), planToDeploy.getHostId(), - vol.getPoolId(), null, ctx); + vol.getPoolId(), null, ctx); } else { plan = new DataCenterDeployment(rootVolDcId, rootVolPodId, rootVolClusterId, null, vol.getPoolId(), null, ctx); if (s_logger.isDebugEnabled()) { s_logger.debug(vol + " is READY, changing deployment plan to use this pool's dcId: " + rootVolDcId + " , podId: " + rootVolPodId + - " , and clusterId: " + rootVolClusterId); + " , and clusterId: " + rootVolClusterId); } planChangedByVolume = true; } @@ -781,7 +785,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac continue; } throw new InsufficientServerCapacityException("Unable to create a deployment for " + vmProfile, DataCenter.class, plan.getDataCenterId(), - areAffinityGroupsAssociated(vmProfile)); + areAffinityGroupsAssociated(vmProfile)); } if (dest != null) { @@ -796,18 +800,12 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac ClusterDetailsVO cluster_detail_ram = _clusterDetailsDao.findDetail(cluster_id, "memoryOvercommitRatio"); //storing the value of overcommit in the vm_details table for doing a capacity check in case the cluster overcommit ratio is changed. if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") == null && - ((Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f))) { - UserVmDetailVO vmDetail_cpu = new UserVmDetailVO(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue()); - UserVmDetailVO vmDetail_ram = new UserVmDetailVO(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue()); - _uservmDetailsDao.persist(vmDetail_cpu); - _uservmDetailsDao.persist(vmDetail_ram); + ((Float.parseFloat(cluster_detail_cpu.getValue()) > 1f || Float.parseFloat(cluster_detail_ram.getValue()) > 1f))) { + _uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue()); + _uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue()); } else if (_uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio") != null) { - UserVmDetailVO vmDetail_cpu = _uservmDetailsDao.findDetail(vm.getId(), "cpuOvercommitRatio"); - vmDetail_cpu.setValue(cluster_detail_cpu.getValue()); - UserVmDetailVO vmDetail_ram = _uservmDetailsDao.findDetail(vm.getId(), "memoryOvercommitRatio"); - vmDetail_ram.setValue(cluster_detail_ram.getValue()); - _uservmDetailsDao.update(vmDetail_cpu.getId(), vmDetail_cpu); - _uservmDetailsDao.update(vmDetail_ram.getId(), vmDetail_ram); + _uservmDetailsDao.addDetail(vm.getId(), "cpuOvercommitRatio", cluster_detail_cpu.getValue()); + _uservmDetailsDao.addDetail(vm.getId(), "memoryOvercommitRatio", cluster_detail_ram.getValue()); } vmProfile.setCpuOvercommitRatio(Float.parseFloat(cluster_detail_cpu.getValue())); vmProfile.setMemoryOvercommitRatio(Float.parseFloat(cluster_detail_ram.getValue())); @@ -839,6 +837,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachineTO vmTO = hvGuru.implement(vmProfile); + handlePath(vmTO.getDisks(), vm.getHypervisorType()); + cmds = new Commands(Command.OnError.Stop); cmds.addCommand(new StartCommand(vmTO, dest.getHost(), getExecuteInSequence())); @@ -857,6 +857,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac startAnswer = cmds.getAnswer(StartAnswer.class); if (startAnswer != null && startAnswer.getResult()) { + handlePath(vmTO.getDisks(), startAnswer.getIqnToPath()); String host_guid = startAnswer.getHost_guid(); if (host_guid != null) { HostVO finalHost = _resourceMgr.findHostByGuid(host_guid); @@ -867,7 +868,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if (vmGuru.finalizeStart(vmProfile, destHostId, cmds, ctx)) { syncDiskChainChange(startAnswer); - + if (!changeState(vm, Event.OperationSucceeded, destHostId, work, Step.Done)) { throw new ConcurrentOperationException("Unable to transition to a new state."); } @@ -884,6 +885,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac StopCommand cmd = new StopCommand(vm, getExecuteInSequence()); StopAnswer answer = (StopAnswer)_agentMgr.easySend(destHostId, cmd); + if ( answer != null ) { + String hypervisortoolsversion = answer.getHypervisorToolsVersion(); + if (hypervisortoolsversion != null) { + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + _userVmDao.loadDetails(userVm); + userVm.setDetail("hypervisortoolsversion", hypervisortoolsversion); + _userVmDao.saveDetails(userVm); + } + } + } + if (answer == null || !answer.getResult()) { s_logger.warn("Unable to stop " + vm + " due to " + (answer != null ? answer.getDetails() : "no answers")); _haMgr.scheduleStop(vm, destHostId, WorkType.ForceStop); @@ -957,17 +970,66 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac throw new CloudRuntimeException("Unable to start instance '" + vm.getHostName() + "' (" + vm.getUuid() + "), see management server log for details"); } } - + + // for managed storage on KVM, need to make sure the path field of the volume in question is populated with the IQN + private void handlePath(DiskTO[] disks, HypervisorType hypervisorType) { + if (hypervisorType != HypervisorType.KVM) { + return; + } + + if (disks != null) { + for (DiskTO disk : disks) { + Map details = disk.getDetails(); + boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + + if (isManaged && disk.getPath() == null) { + Long volumeId = disk.getData().getId(); + VolumeVO volume = _volsDao.findById(volumeId); + + disk.setPath(volume.get_iScsiName()); + volume.setPath(volume.get_iScsiName()); + + _volsDao.update(volumeId, volume); + } + } + } + } + + // for managed storage on XenServer and VMware, need to update the DB with a path if the VDI/VMDK file was newly created + private void handlePath(DiskTO[] disks, Map iqnToPath) { + if (disks != null) { + for (DiskTO disk : disks) { + Map details = disk.getDetails(); + boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + + if (isManaged && disk.getPath() == null) { + Long volumeId = disk.getData().getId(); + VolumeVO volume = _volsDao.findById(volumeId); + String iScsiName = volume.get_iScsiName(); + String path = iqnToPath.get(iScsiName); + + volume.setPath(path); + + _volsDao.update(volumeId, volume); + } + } + } + } + private void syncDiskChainChange(StartAnswer answer) { - VirtualMachineTO vmSpec = answer.getVirtualMachine(); - - for(DiskTO disk : vmSpec.getDisks()) { - if(disk.getType() != Volume.Type.ISO) { - VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); - - volumeMgr.updateVolumeDiskChain(vol.getId(), vol.getPath(), vol.getChainInfo()); - } - } + VirtualMachineTO vmSpec = answer.getVirtualMachine(); + + for(DiskTO disk : vmSpec.getDisks()) { + if(disk.getType() != Volume.Type.ISO) { + VolumeObjectTO vol = (VolumeObjectTO)disk.getData(); + VolumeVO volume = _volsDao.findById(vol.getId()); + + // Use getPath() from VolumeVO to get a fresh copy of what's in the DB. + // Before doing this, in a certain situation, getPath() from VolumeObjectTO + // returned null instead of an actual path (because it was out of date with the DB). + volumeMgr.updateVolumeDiskChain(vol.getId(), volume.getPath(), vol.getChainInfo()); + } + } } @Override @@ -989,7 +1051,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VirtualMachine vm = profile.getVirtualMachine(); StopCommand stop = new StopCommand(vm, getExecuteInSequence()); try { - Answer answer = _agentMgr.send(vm.getHostId(), stop); + StopAnswer answer = (StopAnswer) _agentMgr.send(vm.getHostId(), stop); + if ( answer != null ) { + String hypervisortoolsversion = answer.getHypervisorToolsVersion(); + if (hypervisortoolsversion != null) { + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + _userVmDao.loadDetails(userVm); + userVm.setDetail("hypervisortoolsversion", hypervisortoolsversion); + _userVmDao.saveDetails(userVm); + } + } + } if (!answer.getResult()) { s_logger.debug("Unable to stop VM due to " + answer.getDetails()); return false; @@ -1185,6 +1258,18 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac StopAnswer answer = null; try { answer = (StopAnswer)_agentMgr.send(vm.getHostId(), stop); + + if ( answer != null ) { + String hypervisortoolsversion = answer.getHypervisorToolsVersion(); + if (hypervisortoolsversion != null) { + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + _userVmDao.loadDetails(userVm); + userVm.setDetail("hypervisortoolsversion", hypervisortoolsversion); + _userVmDao.saveDetails(userVm); + } + } + } stopped = answer.getResult(); if (!stopped) { throw new CloudRuntimeException("Unable to stop the virtual machine due to " + answer.getDetails()); @@ -1203,10 +1288,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } catch (AgentUnavailableException e) { s_logger.warn("Unable to stop vm, agent unavailable: " + e.toString()); - throw e; } catch (OperationTimedoutException e) { s_logger.warn("Unable to stop vm, operation timed out: " + e.toString()); - throw e; } finally { if (!stopped) { if (!cleanUpEvenIfUnableToStop) { @@ -1485,7 +1568,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac boolean migrated = false; try { boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows); + MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to); mc.setHostGuid(dest.getHost().getGuid()); try { @@ -1530,8 +1613,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac _networkMgr.rollbackNicForMigration(vmSrc, profile); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + - fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + - dest.getPod().getName(), "Migrate Command failed. Please check logs."); + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + + dest.getPod().getName(), "Migrate Command failed. Please check logs."); try { _agentMgr.send(dstHostId, new Commands(cleanup(vm)), null); } catch (AgentUnavailableException ae) { @@ -1564,8 +1647,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (_poolHostDao.findByPoolHost(pool.getId(), host.getId()) == null || pool.isLocal() != diskOffering.getUseLocalStorage()) { // Cannot find a pool for the volume. Throw an exception. throw new CloudRuntimeException("Cannot migrate volume " + volume + " to storage pool " + pool + " while migrating vm to host " + host + - ". Either the pool is not accessible from the " + - "host or because of the offering with which the volume is created it cannot be placed on " + "the given pool."); + ". Either the pool is not accessible from the " + + "host or because of the offering with which the volume is created it cannot be placed on " + "the given pool."); } else if (pool.getId() == currentPool.getId()) { // If the pool to migrate too is the same as current pool, remove the volume from the list of // volumes to be migrated. @@ -1597,7 +1680,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (!currentPoolAvailable && !volumeToPool.containsKey(volume)) { // Cannot find a pool for the volume. Throw an exception. throw new CloudRuntimeException("Cannot find a storage pool which is available for volume " + volume + " while migrating virtual machine " + - profile.getVirtualMachine() + " to host " + host); + profile.getVirtualMachine() + " to host " + host); } } } @@ -1633,7 +1716,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void migrateWithStorage(String vmUuid, long srcHostId, long destHostId, Map volumeToPool) throws ResourceUnavailableException, - ConcurrentOperationException { + ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); HostVO srcHost = _hostDao.findById(srcHostId); @@ -1653,7 +1736,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // a vm and not migrating a vm with storage. if (volumeToPool.isEmpty()) { throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destHost + - " doesn't involve migrating the volumes."); + " doesn't involve migrating the volumes."); } short alertType = AlertManager.ALERT_TYPE_USERVM_MIGRATE; @@ -1706,8 +1789,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (!migrated) { s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, srcHost.getDataCenterId(), srcHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + srcHost.getName() + - " in zone " + dc.getName() + " and pod " + dc.getName(), - "Migrate Command failed. Please check logs."); + " in zone " + dc.getName() + " and pod " + dc.getName(), + "Migrate Command failed. Please check logs."); try { _agentMgr.send(destHostId, new Commands(cleanup(vm.getInstanceName())), null); stateTransitTo(vm, Event.OperationFailed, srcHostId); @@ -1838,9 +1921,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - protected class CleanupTask implements Runnable { + protected class CleanupTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { s_logger.trace("VM Operation Thread Running"); try { _workDao.cleanup(VmOpCleanupWait.value()); @@ -1875,7 +1958,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void advanceReboot(String vmUuid, Map params) throws InsufficientCapacityException, ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); DataCenter dc = _entityMgr.findById(DataCenter.class, vm.getDataCenterId()); @@ -1924,7 +2007,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // sync VM Snapshots related transient states List vmSnapshotsInTrasientStates = _vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging, VMSnapshot.State.Reverting, - VMSnapshot.State.Creating); + VMSnapshot.State.Creating); if (vmSnapshotsInTrasientStates.size() > 1) { s_logger.info("Found vm " + vm.getInstanceName() + " with VM snapshots in transient states, needs to sync VM snapshot state"); if (!_vmSnapshotMgr.syncVMSnapshot(vm, hostId)) { @@ -1999,7 +2082,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac return commands; } - public void deltaSync(Map> newStates) { + + + public void deltaSync(Map> newStates) { Map states = convertToInfos(newStates); for (Map.Entry entry : states.entrySet()) { @@ -2034,8 +2119,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - public void fullSync(final long clusterId, Map> newStates) { - if (newStates == null) + public void fullSync(final long clusterId, Map> newStates) { + if (newStates==null) return; Map infos = convertToInfos(newStates); Set set_vms = Collections.synchronizedSet(new HashSet()); @@ -2047,7 +2132,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // sync VM Snapshots related transient states List vmSnapshotsInExpungingStates = _vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging, VMSnapshot.State.Creating, - VMSnapshot.State.Reverting); + VMSnapshot.State.Reverting); if (vmSnapshotsInExpungingStates.size() > 0) { s_logger.info("Found vm " + vm.getInstanceName() + " in state. " + vm.getState() + ", needs to sync VM snapshot state"); Long hostId = null; @@ -2065,9 +2150,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } if ((info == null && (vm.getState() == State.Running || vm.getState() == State.Starting)) || - (info != null && (info.state == State.Running && vm.getState() == State.Starting))) { + (info != null && (info.state == State.Running && vm.getState() == State.Starting))) { s_logger.info("Found vm " + vm.getInstanceName() + " in inconsistent state. " + vm.getState() + " on CS while " + (info == null ? "Stopped" : "Running") + - " on agent"); + " on agent"); info = new AgentVmInfo(vm.getInstanceName(), vm, State.Stopped); // Bug 13850- grab outstanding work item if any for this VM state so that we mark it as DONE after we change VM state, else it will remain pending @@ -2104,7 +2189,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac e.printStackTrace(); } } else if (info != null && - (vm.getState() == State.Stopped || vm.getState() == State.Stopping || vm.isRemoved() || vm.getState() == State.Destroyed || vm.getState() == State.Expunging)) { + (vm.getState() == State.Stopped || vm.getState() == State.Stopping || vm.isRemoved() || vm.getState() == State.Destroyed || vm.getState() == State.Expunging)) { Host host = _hostDao.findByGuid(info.getHostUuid()); if (host != null) { s_logger.warn("Stopping a VM which is stopped/stopping/destroyed/expunging " + info.name); @@ -2123,19 +2208,19 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } } else - // host id can change - if (info != null && vm.getState() == State.Running) { - // check for host id changes - Host host = _hostDao.findByGuid(info.getHostUuid()); - if (host != null && (vm.getHostId() == null || host.getId() != vm.getHostId())) { - s_logger.info("Found vm " + vm.getInstanceName() + " with inconsistent host in db, new host is " + host.getId()); - try { - stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, host.getId()); - } catch (NoTransitionException e) { - s_logger.warn(e.getMessage()); + // host id can change + if (info != null && vm.getState() == State.Running) { + // check for host id changes + Host host = _hostDao.findByGuid(info.getHostUuid()); + if (host != null && (vm.getHostId() == null || host.getId() != vm.getHostId())) { + s_logger.info("Found vm " + vm.getInstanceName() + " with inconsistent host in db, new host is " + host.getId()); + try { + stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, host.getId()); + } catch (NoTransitionException e) { + s_logger.warn(e.getMessage()); + } } } - } /* else if(info == null && vm.getState() == State.Stopping) { //Handling CS-13376 s_logger.warn("Marking the VM as Stopped as it was still stopping on the CS" +vm.getName()); vm.setState(State.Stopped); // Setting the VM as stopped on the DB and clearing it from the host @@ -2164,24 +2249,27 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } - protected Map convertToInfos(final Map> newStates) { + + protected Map convertToInfos(final Map> newStates) { final HashMap map = new HashMap(); if (newStates == null) { return map; } boolean is_alien_vm = true; long alien_vm_count = -1; - for (Map.Entry> entry : newStates.entrySet()) { + for (Map.Entry> entry : newStates.entrySet()) { is_alien_vm = true; String name = entry.getKey(); VMInstanceVO vm = _vmDao.findVMByInstanceName(name); if (vm != null) { - map.put(vm.getId(), new AgentVmInfo(entry.getKey(), vm, entry.getValue().second(), entry.getValue().first())); + map.put(vm.getId(), new AgentVmInfo(entry.getKey(), vm, entry.getValue().second(), + entry.getValue().first(), entry.getValue().third())); is_alien_vm = false; } // alien VMs if (is_alien_vm) { - map.put(alien_vm_count--, new AgentVmInfo(entry.getKey(), null, entry.getValue().second(), entry.getValue().first())); + map.put(alien_vm_count--, new AgentVmInfo(entry.getKey(), null, entry.getValue().second(), + entry.getValue().first(), entry.getValue().third())); s_logger.warn("Found an alien VM " + entry.getKey()); } } @@ -2259,15 +2347,33 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac String hostDesc = "name: " + hostVO.getName() + " (id:" + hostVO.getId() + "), availability zone: " + dcVO.getName() + ", pod: " + podVO.getName(); _alertMgr.sendAlert(alertType, vm.getDataCenterId(), vm.getPodIdToDeployIn(), "VM (name: " + vm.getInstanceName() + ", id: " + vm.getId() + ") stopped on host " + - hostDesc + " due to storage failure", - "Virtual Machine " + vm.getInstanceName() + " (id: " + vm.getId() + ") running on host [" + vm.getHostId() + "] stopped due to storage failure."); + hostDesc + " due to storage failure", + "Virtual Machine " + vm.getInstanceName() + " (id: " + vm.getId() + ") running on host [" + vm.getHostId() + "] stopped due to storage failure."); + } + // track hypervsion tools version + if( info.hvtoolsversion != null && !info.hvtoolsversion.isEmpty() ) { + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + _userVmDao.loadDetails(userVm); + userVm.setDetail("hypervisortoolsversion", info.hvtoolsversion); + _userVmDao.saveDetails(userVm); + } + } + // track hypervsion tools version + if( info.hvtoolsversion != null && !info.hvtoolsversion.isEmpty() ) { + if (vm.getType() == VirtualMachine.Type.User) { + UserVmVO userVm = _userVmDao.findById(vm.getId()); + _userVmDao.loadDetails(userVm); + userVm.setDetail("hypervisortoolsversion", info.hvtoolsversion); + _userVmDao.saveDetails(userVm); + } } if (trackExternalChange) { if (serverState == State.Starting) { if (vm.getHostId() != null && vm.getHostId() != hostId) { s_logger.info("CloudStack is starting VM on host " + vm.getHostId() + ", but status report comes from a different host " + hostId + - ", skip status sync for vm: " + vm.getInstanceName()); + ", skip status sync for vm: " + vm.getInstanceName()); return null; } } @@ -2292,7 +2398,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (serverState == State.Starting) { if (vm.getHostId() != null && vm.getHostId() != hostId) { s_logger.info("CloudStack is starting VM on host " + vm.getHostId() + ", but status report comes from a different host " + hostId + - ", skip status sync for vm: " + vm.getInstanceName()); + ", skip status sync for vm: " + vm.getInstanceName()); return null; } } @@ -2308,7 +2414,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (vm.getHostId() == null || hostId != vm.getHostId()) { if (s_logger.isDebugEnabled()) { s_logger.debug("detected host change when VM " + vm + " is at running state, VM could be live-migrated externally from host " + vm.getHostId() + - " to host " + hostId); + " to host " + hostId); } stateTransitTo(vm, VirtualMachine.Event.AgentReportMigrated, hostId); @@ -2406,7 +2512,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } private void ensureVmRunningContext(long hostId, VMInstanceVO vm, Event cause) throws OperationTimedoutException, ResourceUnavailableException, NoTransitionException, - InsufficientAddressCapacityException { + InsufficientAddressCapacityException { VirtualMachineGuru vmGuru = getVmGuru(vm); s_logger.debug("VM state is starting on full sync so updating it to running"); @@ -2435,7 +2541,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac for (NicVO nic : nics) { Network network = _networkModel.getNetwork(nic.getNetworkId()); NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, _networkModel.isSecurityGroupSupportedInNetwork(network), - _networkModel.getNetworkTag(profile.getHypervisorType(), network)); + _networkModel.getNetworkTag(profile.getHypervisorType(), network)); profile.addNic(nicProfile); } @@ -2546,9 +2652,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long agentId = agent.getId(); if (agent.getHypervisorType() == HypervisorType.XenServer) { // only for Xen - StartupRoutingCommand startup = (StartupRoutingCommand)cmd; - HashMap> allStates = startup.getClusterVMStateChanges(); - if (allStates != null) { + StartupRoutingCommand startup = (StartupRoutingCommand) cmd; + HashMap> allStates = startup.getClusterVMStateChanges(); + if (allStates != null){ fullSync(clusterId, allStates); } @@ -2591,9 +2697,9 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } } - protected class TransitionTask implements Runnable { + protected class TransitionTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { GlobalLock lock = GlobalLock.getInternLock("TransitionChecking"); if (lock == null) { s_logger.debug("Couldn't get the global lock"); @@ -2627,22 +2733,35 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac public String name; public State state; public String hostUuid; + public String hvtoolsversion; public VMInstanceVO vm; - public AgentVmInfo(String name, VMInstanceVO vm, State state, String host) { + + @SuppressWarnings("unchecked") + public AgentVmInfo(String name, VMInstanceVO vm, State state, String host, String hvtoolsversion) { this.name = name; this.state = state; this.vm = vm; - hostUuid = host; + this.hostUuid = host; + this.hvtoolsversion= hvtoolsversion; + + } + + public AgentVmInfo(String name, VMInstanceVO vm, State state, String host) { + this(name, vm, state, host, null); } public AgentVmInfo(String name, VMInstanceVO vm, State state) { - this(name, vm, state, null); + this(name, vm, state, null, null); } public String getHostUuid() { return hostUuid; } + + public String getHvtoolsversion() { + return hvtoolsversion; + } } @Override @@ -2661,7 +2780,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac if (!(vmInstance.getState().equals(State.Stopped) || vmInstance.getState().equals(State.Running))) { s_logger.warn("Unable to upgrade virtual machine " + vmInstance.toString() + " in state " + vmInstance.getState()); throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + " " + " in state " + vmInstance.getState() + - "; make sure the virtual machine is stopped/running"); + "; make sure the virtual machine is stopped/running"); } // Check if the service offering being upgraded to is what the VM is already running with @@ -2671,7 +2790,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } throw new InvalidParameterValueException("Not upgrading vm " + vmInstance.toString() + " since it already " + "has the requested service offering (" + - newServiceOffering.getName() + ")"); + newServiceOffering.getName() + ")"); } ServiceOfferingVO currentServiceOffering = _offeringDao.findByIdIncludingRemoved(vmInstance.getServiceOfferingId()); @@ -2689,8 +2808,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // offering if (currentServiceOffering.getUseLocalStorage() != newServiceOffering.getUseLocalStorage()) { throw new InvalidParameterValueException("Unable to upgrade virtual machine " + vmInstance.toString() + - ", cannot switch between local storage and shared storage service offerings. Current offering " + "useLocalStorage=" + - currentServiceOffering.getUseLocalStorage() + ", target offering useLocalStorage=" + newServiceOffering.getUseLocalStorage()); + ", cannot switch between local storage and shared storage service offerings. Current offering " + "useLocalStorage=" + + currentServiceOffering.getUseLocalStorage() + ", target offering useLocalStorage=" + newServiceOffering.getUseLocalStorage()); } // if vm is a system vm, check if it is a system service offering, if yes return with error as it cannot be used for user vms @@ -2701,7 +2820,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac // Check that there are enough resources to upgrade the service offering if (!isVirtualMachineUpgradable(vmInstance, newServiceOffering)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine, not enough resources available " + "for an offering of " + newServiceOffering.getCpu() + - " cpu(s) at " + newServiceOffering.getSpeed() + " Mhz, and " + newServiceOffering.getRamSize() + " MB of memory"); + " cpu(s) at " + newServiceOffering.getSpeed() + " Mhz, and " + newServiceOffering.getRamSize() + " MB of memory"); } // Check that the service offering being upgraded to has all the tags of the current service offering @@ -2709,8 +2828,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac List newTags = StringUtils.csvTagsToList(newServiceOffering.getTags()); if (!newTags.containsAll(currentTags)) { throw new InvalidParameterValueException("Unable to upgrade virtual machine; the new service offering " + "does not have all the tags of the " + - "current service offering. Current service offering tags: " + currentTags + "; " + "new service " + "offering tags: " + - newTags); + "current service offering. Current service offering tags: " + currentTags + "; " + "new service " + "offering tags: " + + newTags); } } @@ -2727,7 +2846,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public NicProfile addVmToNetwork(VirtualMachine vm, Network network, NicProfile requested) throws ConcurrentOperationException, ResourceUnavailableException, - InsufficientCapacityException { + InsufficientCapacityException { CallContext cctx = CallContext.current(); s_logger.debug("Adding vm " + vm + " to network " + network + "; requested nic profile " + requested); @@ -2763,7 +2882,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac long isDefault = (nic.isDefaultNic()) ? 1 : 0; // insert nic's Id into DB as resource_name UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vmVO.getAccountId(), vmVO.getDataCenterId(), vmVO.getId(), - Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vmVO.getUuid()); + Long.toString(nic.getId()), network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vmVO.getUuid()); return nic; } else { s_logger.warn("Failed to plug nic to the vm " + vm + " in network " + network); @@ -2819,7 +2938,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkModel.getNetworkRate(network.getId(), vm.getId()), - _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); + _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); //1) Unplug the nic if (vm.getState() == State.Running) { @@ -2830,7 +2949,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.debug("Nic is unplugged successfully for vm " + vm + " in network " + network); long isDefault = (nic.isDefaultNic()) ? 1 : 0; UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_REMOVE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), Long.toString(nic.getId()), - network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid()); + network.getNetworkOfferingId(), null, isDefault, VirtualMachine.class.getName(), vm.getUuid()); } else { s_logger.warn("Failed to unplug nic for the vm " + vm + " from network " + network); return false; @@ -2902,7 +3021,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac try { NicProfile nicProfile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), _networkModel.getNetworkRate(network.getId(), vm.getId()), - _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); + _networkModel.isSecurityGroupSupportedInNetwork(network), _networkModel.getNetworkTag(vmProfile.getVirtualMachine().getHypervisorType(), network)); //1) Unplug the nic if (vm.getState() == State.Running) { @@ -2939,7 +3058,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public void findHostAndMigrate(String vmUuid, Long newSvcOfferingId, ExcludeList excludes) throws InsufficientCapacityException, ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); if (vm == null) { @@ -3076,7 +3195,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac boolean migrated = false; try { boolean isWindows = _guestOsCategoryDao.findById(_guestOsDao.findById(vm.getGuestOSId()).getCategoryId()).getName().equalsIgnoreCase("Windows"); - MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows); + MigrateCommand mc = new MigrateCommand(vm.getInstanceName(), dest.getHost().getPrivateIpAddress(), isWindows, to); mc.setHostGuid(dest.getHost().getGuid()); try { @@ -3124,8 +3243,8 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.info("Migration was unsuccessful. Cleaning up: " + vm); _alertMgr.sendAlert(alertType, fromHost.getDataCenterId(), fromHost.getPodId(), "Unable to migrate vm " + vm.getInstanceName() + " from host " + - fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + - dest.getPod().getName(), "Migrate Command failed. Please check logs."); + fromHost.getName() + " in zone " + dest.getDataCenter().getName() + " and pod " + + dest.getPod().getName(), "Migrate Command failed. Please check logs."); try { _agentMgr.send(dstHostId, new Commands(cleanup(vm.getInstanceName())), null); } catch (AgentUnavailableException ae) { @@ -3145,7 +3264,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac } public boolean plugNic(Network network, NicTO nic, VirtualMachineTO vm, ReservationContext context, DeployDestination dest) throws ConcurrentOperationException, - ResourceUnavailableException, InsufficientCapacityException { + ResourceUnavailableException, InsufficientCapacityException { boolean result = true; VMInstanceVO router = _vmDao.findById(vm.getId()); @@ -3168,14 +3287,14 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Unable to apply PlugNic, vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply PlugNic on the backend," + " vm " + vm + " is not in the right state", DataCenter.class, - router.getDataCenterId()); + router.getDataCenterId()); } return result; } public boolean unplugNic(Network network, NicTO nic, VirtualMachineTO vm, ReservationContext context, DeployDestination dest) throws ConcurrentOperationException, - ResourceUnavailableException { + ResourceUnavailableException { boolean result = true; VMInstanceVO router = _vmDao.findById(vm.getId()); @@ -3201,7 +3320,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac s_logger.warn("Unable to apply unplug nic, Vm " + router + " is not in the right state " + router.getState()); throw new ResourceUnavailableException("Unable to apply unplug nic on the backend," + " vm " + router + " is not in the right state", DataCenter.class, - router.getDataCenterId()); + router.getDataCenterId()); } return result; @@ -3209,7 +3328,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac @Override public VMInstanceVO reConfigureVm(String vmUuid, ServiceOffering oldServiceOffering, boolean reconfiguringOnExistingHost) throws ResourceUnavailableException, - ConcurrentOperationException { + ConcurrentOperationException { VMInstanceVO vm = _vmDao.findByUuid(vmUuid); long newServiceofferingId = vm.getServiceOfferingId(); @@ -3220,7 +3339,7 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac Float cpuOvercommitRatio = CapacityManager.CpuOverprovisioningFactor.valueIn(hostVo.getClusterId()); long minMemory = (long)(newServiceOffering.getRamSize() / memoryOvercommitRatio); ScaleVmCommand reconfigureCmd = new ScaleVmCommand(vm.getInstanceName(), newServiceOffering.getCpu(), (int)(newServiceOffering.getSpeed() / cpuOvercommitRatio), - newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse()); + newServiceOffering.getSpeed(), minMemory * 1024L * 1024L, newServiceOffering.getRamSize() * 1024L * 1024L, newServiceOffering.getLimitCpuUse()); Long dstHostId = vm.getHostId(); ItWorkVO work = new ItWorkVO(UUID.randomUUID().toString(), _nodeId, State.Running, vm.getType(), vm.getId()); @@ -3273,4 +3392,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac VmOpWaitInterval}; } + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + + @Inject + public void setStoragePoolAllocators(List storagePoolAllocators) { + this._storagePoolAllocators = storagePoolAllocators; + } + } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java index 204b832ab4a..e784295b1d3 100755 --- a/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/cloud/entity/api/VMEntityManagerImpl.java @@ -99,7 +99,6 @@ public class VMEntityManagerImpl implements VMEntityManager { @Inject protected VirtualMachineManager _itMgr; - @Inject protected List _planners; @Inject @@ -257,4 +256,13 @@ public class VMEntityManagerImpl implements VMEntityManager { return true; } + public List getPlanners() { + return _planners; + } + + @Inject + public void setPlanners(List planners) { + this._planners = planners; + } + } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DcDetailsDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DcDetailsDaoImpl.java index 2ace8a0fbcb..43385488d12 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DcDetailsDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/DcDetailsDaoImpl.java @@ -29,7 +29,7 @@ import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component(value="EngineDcDetailsDao") @Local(value=DcDetailsDao.class) @@ -83,7 +83,7 @@ public class DcDetailsDaoImpl extends GenericDaoBase implement @Override public void persist(long dcId, Map details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = DcSearch.create(); sc.setParameters("dcId", dcId); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java index c02bed0ff70..4251baf06c6 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineClusterDaoImpl.java @@ -45,7 +45,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @@ -162,7 +162,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase @Override public Map> getPodClusterIdMap(List clusterIds){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; Map> result = new HashMap>(); @@ -202,7 +202,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase @Override public List listDisabledClusters(long zoneId, Long podId) { GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.selectFields(clusterIdSearch.entity().getId()); clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); if(podId != null){ clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); @@ -224,12 +224,12 @@ public class EngineClusterDaoImpl extends GenericDaoBase public List listClustersWithDisabledPods(long zoneId) { GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); - disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); + disabledPodIdSearch.selectFields(disabledPodIdSearch.entity().getId()); disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.selectFields(clusterIdSearch.entity().getId()); clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); clusterIdSearch.done(); @@ -243,7 +243,7 @@ public class EngineClusterDaoImpl extends GenericDaoBase @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); EngineClusterVO cluster = createForUpdate(); cluster.setName(null); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java index 5d8ef8d3243..de710532d98 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineDataCenterDaoImpl.java @@ -40,7 +40,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SequenceFetcher; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.net.NetUtils; @@ -216,7 +216,7 @@ public class EngineDataCenterDaoImpl extends GenericDaoBase implements EngineHostDao { private static final Logger s_logger = Logger.getLogger(EngineHostDaoImpl.class); @@ -269,7 +269,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem AvailHypevisorInZone.done(); HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectField(HostsInStatusSearch.entity().getId()); + HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); @@ -336,7 +336,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem @Override @DB public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = UnmanagedDirectConnectSearch.create(); sc.setParameters("lastPinged", lastPingSecondsAfter); @@ -356,7 +356,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem @Override @DB public List findAndUpdateApplianceToLoad(long lastPingSecondsAfter, long managementServerId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = UnmanagedApplianceSearch.create(); @@ -495,7 +495,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem @DB @Override public List findLostHosts(long timeout) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; @@ -546,7 +546,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem public EngineHostVO persist(EngineHostVO host) { final String InsertSequenceSql = "INSERT INTO op_host(id) VALUES(?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); EngineHostVO dbHost = super.persist(host); @@ -572,7 +572,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem @Override @DB public boolean update(Long hostId, EngineHostVO host) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); boolean persisted = super.update(hostId, host); @@ -598,7 +598,7 @@ public class EngineHostDaoImpl extends GenericDaoBase implem ArrayList l = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ; PreparedStatement pstmt = null; try { diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java index 638d0bded05..2bcfdd148f9 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostPodDaoImpl.java @@ -41,7 +41,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component(value="EngineHostPodDao") @Local(value={EngineHostPodDao.class}) @@ -97,7 +97,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase HashMap> currentPodCidrSubnets = new HashMap>(); String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet rs = stmt.executeQuery(); @@ -123,7 +123,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); EngineHostPodVO pod = createForUpdate(); pod.setName(null); @@ -138,7 +138,7 @@ public class EngineHostPodDaoImpl extends GenericDaoBase @Override public List listDisabledPods(long zoneId) { GenericSearchBuilder podIdSearch = createSearchBuilder(Long.class); - podIdSearch.selectField(podIdSearch.entity().getId()); + podIdSearch.selectFields(podIdSearch.entity().getId()); podIdSearch.and("dataCenterId", podIdSearch.entity().getDataCenterId(), Op.EQ); podIdSearch.and("allocationState", podIdSearch.entity().getAllocationState(), Op.EQ); podIdSearch.done(); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java index e0ae778911c..e0a6dceac31 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostDetailsDaoImpl.java @@ -31,7 +31,7 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component(value="EngineHostDetailsDao") @@ -96,7 +96,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement public void persist(long hostId, Map details) { final String InsertOrUpdateSql = "INSERT INTO `cloud`.`host_details` (host_id, name, value) VALUES (?,?,?) ON DUPLICATE KEY UPDATE value=?"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (Map.Entry detail : details.entrySet()) { diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostTagsDaoImpl.java b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostTagsDaoImpl.java index a70b7d1b234..17d6f811a61 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostTagsDaoImpl.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/HostTagsDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.host.HostTagVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component(value="EngineHostTagsDao") @Local(value=HostTagsDao.class) @@ -56,7 +56,7 @@ public class HostTagsDaoImpl extends GenericDaoBase implements @Override public void persist(long hostId, List hostTags) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = HostSearch.create(); diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java index d8c3b8e5132..5636e0783b4 100755 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/NetworkOrchestrator.java @@ -37,15 +37,14 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.region.PortableIpDao; import com.cloud.agent.AgentManager; @@ -168,6 +167,10 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria.Op; @@ -347,7 +350,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @DB public boolean configure(final String name, final Map params) throws ConfigurationException { // populate providers - Map> defaultSharedNetworkOfferingProviders = new HashMap>(); + final Map> defaultSharedNetworkOfferingProviders = new HashMap>(); Set defaultProviders = new HashSet(); defaultProviders.add(Network.Provider.VirtualRouter); @@ -355,7 +358,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra defaultSharedNetworkOfferingProviders.put(Service.Dns, defaultProviders); defaultSharedNetworkOfferingProviders.put(Service.UserData, defaultProviders); - Map> defaultIsolatedNetworkOfferingProviders = defaultSharedNetworkOfferingProviders; + final Map> defaultIsolatedNetworkOfferingProviders = defaultSharedNetworkOfferingProviders; defaultIsolatedNetworkOfferingProviders.put(Service.Dhcp, defaultProviders); defaultIsolatedNetworkOfferingProviders.put(Service.Dns, defaultProviders); defaultIsolatedNetworkOfferingProviders.put(Service.UserData, defaultProviders); @@ -366,7 +369,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra defaultIsolatedNetworkOfferingProviders.put(Service.PortForwarding, defaultProviders); defaultIsolatedNetworkOfferingProviders.put(Service.Vpn, defaultProviders); - Map> defaultSharedSGEnabledNetworkOfferingProviders = new HashMap>(); + final Map> defaultSharedSGEnabledNetworkOfferingProviders = new HashMap>(); defaultSharedSGEnabledNetworkOfferingProviders.put(Service.Dhcp, defaultProviders); defaultSharedSGEnabledNetworkOfferingProviders.put(Service.Dns, defaultProviders); defaultSharedSGEnabledNetworkOfferingProviders.put(Service.UserData, defaultProviders); @@ -374,7 +377,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra sgProviders.add(Provider.SecurityGroupProvider); defaultSharedSGEnabledNetworkOfferingProviders.put(Service.SecurityGroup, sgProviders); - Map> defaultIsolatedSourceNatEnabledNetworkOfferingProviders = new HashMap>(); + final Map> defaultIsolatedSourceNatEnabledNetworkOfferingProviders = new HashMap>(); defaultProviders.clear(); defaultProviders.add(Network.Provider.VirtualRouter); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.Dhcp, defaultProviders); @@ -388,9 +391,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.PortForwarding, defaultProviders); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.Vpn, defaultProviders); - Map> defaultVPCOffProviders = new HashMap>(); + final Map> defaultVPCOffProviders = new HashMap>(); defaultProviders.clear(); - defaultProviders.add(Network.Provider.VirtualRouter); + defaultProviders.add(Network.Provider.VPCVirtualRouter); defaultVPCOffProviders.put(Service.Dhcp, defaultProviders); defaultVPCOffProviders.put(Service.Dns, defaultProviders); defaultVPCOffProviders.put(Service.UserData, defaultProviders); @@ -402,133 +405,134 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra defaultVPCOffProviders.put(Service.PortForwarding, defaultProviders); defaultVPCOffProviders.put(Service.Vpn, defaultProviders); - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + NetworkOfferingVO offering = null; + //#1 - quick cloud network offering + if (_networkOfferingDao.findByUniqueName(NetworkOffering.QuickCloudNoServices) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.QuickCloudNoServices, "Offering for QuickCloud with no services", TrafficType.Guest, null, true, + Availability.Optional, null, new HashMap>(), true, Network.GuestType.Shared, false, null, true, null, true, false, null, + false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#2 - SG enabled network offering + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedNetworkOfferingWithSGService) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedNetworkOfferingWithSGService, "Offering for Shared Security group enabled networks", + TrafficType.Guest, null, true, Availability.Optional, null, defaultSharedNetworkOfferingProviders, true, Network.GuestType.Shared, false, null, true, null, true, + false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#3 - shared network offering with no SG service + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedNetworkOffering) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedNetworkOffering, "Offering for Shared networks", TrafficType.Guest, null, true, + Availability.Optional, null, defaultSharedNetworkOfferingProviders, true, Network.GuestType.Shared, false, null, true, null, true, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#4 - default isolated offering with Source nat service + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService, + "Offering for Isolated networks with Source Nat service enabled", TrafficType.Guest, null, false, Availability.Required, null, + defaultIsolatedSourceNatEnabledNetworkOfferingProviders, true, Network.GuestType.Isolated, false, null, true, null, false, false, null, false, null, true); + + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#5 - default vpc offering with LB service + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks, + "Offering for Isolated VPC networks with Source Nat service enabled", TrafficType.Guest, null, false, Availability.Optional, null, defaultVPCOffProviders, true, + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#6 - default vpc offering with no LB service + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB) == null) { + //remove LB service + defaultVPCOffProviders.remove(Service.Lb); + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, + "Offering for Isolated VPC networks with Source Nat service enabled and LB service disabled", TrafficType.Guest, null, false, Availability.Optional, null, + defaultVPCOffProviders, true, Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#7 - isolated offering with source nat disabled + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOffering) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOffering, "Offering for Isolated networks with no Source Nat service", + TrafficType.Guest, null, true, Availability.Optional, null, defaultIsolatedNetworkOfferingProviders, true, Network.GuestType.Isolated, false, null, true, null, + true, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + _networkOfferingDao.update(offering.getId(), offering); + } + + //#8 - network offering with internal lb service + Map> internalLbOffProviders = new HashMap>(); + Set defaultVpcProvider = new HashSet(); + defaultVpcProvider.add(Network.Provider.VPCVirtualRouter); + + Set defaultInternalLbProvider = new HashSet(); + defaultInternalLbProvider.add(Network.Provider.InternalLbVm); + + internalLbOffProviders.put(Service.Dhcp, defaultVpcProvider); + internalLbOffProviders.put(Service.Dns, defaultVpcProvider); + internalLbOffProviders.put(Service.UserData, defaultVpcProvider); + internalLbOffProviders.put(Service.NetworkACL, defaultVpcProvider); + internalLbOffProviders.put(Service.Gateway, defaultVpcProvider); + internalLbOffProviders.put(Service.Lb, defaultInternalLbProvider); + internalLbOffProviders.put(Service.SourceNat, defaultVpcProvider); + + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB, + "Offering for Isolated VPC networks with Internal Lb support", TrafficType.Guest, null, false, Availability.Optional, null, internalLbOffProviders, true, + Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + offering.setInternalLb(true); + offering.setPublicLb(false); + _networkOfferingDao.update(offering.getId(), offering); + } + + Map> netscalerServiceProviders = new HashMap>(); + Set vrProvider = new HashSet(); + vrProvider.add(Provider.VirtualRouter); + Set sgProvider = new HashSet(); + sgProvider.add(Provider.SecurityGroupProvider); + Set nsProvider = new HashSet(); + nsProvider.add(Provider.Netscaler); + netscalerServiceProviders.put(Service.Dhcp, vrProvider); + netscalerServiceProviders.put(Service.Dns, vrProvider); + netscalerServiceProviders.put(Service.UserData, vrProvider); + netscalerServiceProviders.put(Service.SecurityGroup, sgProvider); + netscalerServiceProviders.put(Service.StaticNat, nsProvider); + netscalerServiceProviders.put(Service.Lb, nsProvider); + + Map> serviceCapabilityMap = new HashMap>(); + Map elb = new HashMap(); + elb.put(Capability.ElasticLb, "true"); + Map eip = new HashMap(); + eip.put(Capability.ElasticIp, "true"); + serviceCapabilityMap.put(Service.Lb, elb); + serviceCapabilityMap.put(Service.StaticNat, eip); + + if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedEIPandELBNetworkOffering) == null) { + offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedEIPandELBNetworkOffering, + "Offering for Shared networks with Elastic IP and Elastic LB capabilities", TrafficType.Guest, null, true, Availability.Optional, null, netscalerServiceProviders, + true, Network.GuestType.Shared, false, null, true, serviceCapabilityMap, true, false, null, false, null, true); + offering.setState(NetworkOffering.State.Enabled); + offering.setDedicatedLB(false); + _networkOfferingDao.update(offering.getId(), offering); + } + } + }); - NetworkOfferingVO offering = null; - //#1 - quick cloud network offering - if (_networkOfferingDao.findByUniqueName(NetworkOffering.QuickCloudNoServices) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.QuickCloudNoServices, "Offering for QuickCloud with no services", TrafficType.Guest, null, true, - Availability.Optional, null, new HashMap>(), true, Network.GuestType.Shared, false, null, true, null, true, false, null, - false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#2 - SG enabled network offering - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedNetworkOfferingWithSGService) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedNetworkOfferingWithSGService, "Offering for Shared Security group enabled networks", - TrafficType.Guest, null, true, Availability.Optional, null, defaultSharedNetworkOfferingProviders, true, Network.GuestType.Shared, false, null, true, null, true, - false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#3 - shared network offering with no SG service - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedNetworkOffering) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedNetworkOffering, "Offering for Shared networks", TrafficType.Guest, null, true, - Availability.Optional, null, defaultSharedNetworkOfferingProviders, true, Network.GuestType.Shared, false, null, true, null, true, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#4 - default isolated offering with Source nat service - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService, - "Offering for Isolated networks with Source Nat service enabled", TrafficType.Guest, null, false, Availability.Required, null, - defaultIsolatedSourceNatEnabledNetworkOfferingProviders, true, Network.GuestType.Isolated, false, null, true, null, false, false, null, false, null); - - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#5 - default vpc offering with LB service - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks, - "Offering for Isolated VPC networks with Source Nat service enabled", TrafficType.Guest, null, false, Availability.Optional, null, defaultVPCOffProviders, true, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#6 - default vpc offering with no LB service - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB) == null) { - //remove LB service - defaultVPCOffProviders.remove(Service.Lb); - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, - "Offering for Isolated VPC networks with Source Nat service enabled and LB service disabled", TrafficType.Guest, null, false, Availability.Optional, null, - defaultVPCOffProviders, true, Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#7 - isolated offering with source nat disabled - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOffering) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOffering, "Offering for Isolated networks with no Source Nat service", - TrafficType.Guest, null, true, Availability.Optional, null, defaultIsolatedNetworkOfferingProviders, true, Network.GuestType.Isolated, false, null, true, null, - true, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - _networkOfferingDao.update(offering.getId(), offering); - } - - //#8 - network offering with internal lb service - Map> internalLbOffProviders = new HashMap>(); - Set defaultVpcProvider = new HashSet(); - defaultVpcProvider.add(Network.Provider.VPCVirtualRouter); - - Set defaultInternalLbProvider = new HashSet(); - defaultInternalLbProvider.add(Network.Provider.InternalLbVm); - - internalLbOffProviders.put(Service.Dhcp, defaultVpcProvider); - internalLbOffProviders.put(Service.Dns, defaultVpcProvider); - internalLbOffProviders.put(Service.UserData, defaultVpcProvider); - internalLbOffProviders.put(Service.NetworkACL, defaultVpcProvider); - internalLbOffProviders.put(Service.Gateway, defaultVpcProvider); - internalLbOffProviders.put(Service.Lb, defaultInternalLbProvider); - internalLbOffProviders.put(Service.SourceNat, defaultVpcProvider); - - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB, - "Offering for Isolated VPC networks with Internal Lb support", TrafficType.Guest, null, false, Availability.Optional, null, internalLbOffProviders, true, - Network.GuestType.Isolated, false, null, false, null, false, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - offering.setInternalLb(true); - offering.setPublicLb(false); - _networkOfferingDao.update(offering.getId(), offering); - } - - Map> netscalerServiceProviders = new HashMap>(); - Set vrProvider = new HashSet(); - vrProvider.add(Provider.VirtualRouter); - Set sgProvider = new HashSet(); - sgProvider.add(Provider.SecurityGroupProvider); - Set nsProvider = new HashSet(); - nsProvider.add(Provider.Netscaler); - netscalerServiceProviders.put(Service.Dhcp, vrProvider); - netscalerServiceProviders.put(Service.Dns, vrProvider); - netscalerServiceProviders.put(Service.UserData, vrProvider); - netscalerServiceProviders.put(Service.SecurityGroup, sgProvider); - netscalerServiceProviders.put(Service.StaticNat, nsProvider); - netscalerServiceProviders.put(Service.Lb, nsProvider); - - Map> serviceCapabilityMap = new HashMap>(); - Map elb = new HashMap(); - elb.put(Capability.ElasticLb, "true"); - Map eip = new HashMap(); - eip.put(Capability.ElasticIp, "true"); - serviceCapabilityMap.put(Service.Lb, elb); - serviceCapabilityMap.put(Service.StaticNat, eip); - - if (_networkOfferingDao.findByUniqueName(NetworkOffering.DefaultSharedEIPandELBNetworkOffering) == null) { - offering = _configMgr.createNetworkOffering(NetworkOffering.DefaultSharedEIPandELBNetworkOffering, - "Offering for Shared networks with Elastic IP and Elastic LB capabilities", TrafficType.Guest, null, true, Availability.Optional, null, netscalerServiceProviders, - true, Network.GuestType.Shared, false, null, true, serviceCapabilityMap, true, false, null, false, null); - offering.setState(NetworkOffering.State.Enabled); - offering.setDedicatedLB(false); - _networkOfferingDao.update(offering.getId(), offering); - } - - txn.commit(); AssignIpAddressSearch = _ipAddressDao.createSearchBuilder(); AssignIpAddressSearch.and("dc", AssignIpAddressSearch.entity().getDataCenterId(), Op.EQ); @@ -591,8 +595,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override @DB - public List setupNetwork(Account owner, NetworkOffering offering, Network predefined, DeploymentPlan plan, String name, String displayText, - boolean errorIfAlreadySetup, Long domainId, ACLType aclType, Boolean subdomainAccess, Long vpcId, Boolean isDisplayNetworkEnabled) throws ConcurrentOperationException { + public List setupNetwork(final Account owner, final NetworkOffering offering, final Network predefined, final DeploymentPlan plan, final String name, final String displayText, + boolean errorIfAlreadySetup, final Long domainId, final ACLType aclType, final Boolean subdomainAccess, final Long vpcId, final Boolean isDisplayNetworkEnabled) throws ConcurrentOperationException { Account locked = _accountDao.acquireInLockTable(owner.getId()); if (locked == null) { @@ -600,8 +604,11 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } try { - if (predefined == null || - (offering.getTrafficType() != TrafficType.Guest && predefined.getCidr() == null && predefined.getBroadcastUri() == null && !(predefined.getBroadcastDomainType() == BroadcastDomainType.Vlan || predefined.getBroadcastDomainType() == BroadcastDomainType.Lswitch))) { + if (predefined == null || (offering.getTrafficType() != TrafficType.Guest && predefined.getCidr() == null && predefined.getBroadcastUri() == null && + !(predefined.getBroadcastDomainType() == BroadcastDomainType.Vlan || + predefined.getBroadcastDomainType() == BroadcastDomainType.Lswitch || + predefined.getBroadcastDomainType() == BroadcastDomainType.Vxlan) + )) { List configs = _networksDao.listBy(owner.getId(), offering.getId(), plan.getDataCenterId()); if (configs.size() > 0) { if (s_logger.isDebugEnabled()) { @@ -620,12 +627,12 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } - List networks = new ArrayList(); + final List networks = new ArrayList(); long related = -1; - for (NetworkGuru guru : _networkGurus) { - Network network = guru.design(offering, plan, predefined, owner); + for (final NetworkGuru guru : _networkGurus) { + final Network network = guru.design(offering, plan, predefined, owner); if (network == null) { continue; } @@ -639,28 +646,26 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra continue; } - long id = _networksDao.getNextInSequence(Long.class, "id"); + final long id = _networksDao.getNextInSequence(Long.class, "id"); if (related == -1) { related = id; } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - NetworkVO vo = new NetworkVO(id, network, offering.getId(), guru.getName(), owner.getDomainId(), owner.getId(), related, name, displayText, - predefined.getNetworkDomain(), offering.getGuestType(), plan.getDataCenterId(), plan.getPhysicalNetworkId(), aclType, offering.getSpecifyIpRanges(), vpcId); - vo.setDisplayNetwork(isDisplayNetworkEnabled == null ? true : isDisplayNetworkEnabled); - networks.add(_networksDao.persist(vo, vo.getGuestType() == Network.GuestType.Isolated, - finalizeServicesAndProvidersForNetwork(offering, plan.getPhysicalNetworkId()))); - - if (domainId != null && aclType == ACLType.Domain) { - if (subdomainAccess == null) { - subdomainAccess = true; + final long relatedFile = related; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + NetworkVO vo = new NetworkVO(id, network, offering.getId(), guru.getName(), owner.getDomainId(), owner.getId(), relatedFile, name, displayText, + predefined.getNetworkDomain(), offering.getGuestType(), plan.getDataCenterId(), plan.getPhysicalNetworkId(), aclType, offering.getSpecifyIpRanges(), vpcId); + vo.setDisplayNetwork(isDisplayNetworkEnabled == null ? true : isDisplayNetworkEnabled); + networks.add(_networksDao.persist(vo, vo.getGuestType() == Network.GuestType.Isolated, + finalizeServicesAndProvidersForNetwork(offering, plan.getPhysicalNetworkId()))); + + if (domainId != null && aclType == ACLType.Domain) { + _networksDao.addDomainToNetwork(id, domainId, subdomainAccess == null ? true : subdomainAccess); + } } - _networksDao.addDomainToNetwork(id, domainId, subdomainAccess); - } - - txn.commit(); + }); } if (networks.size() < 1) { @@ -679,73 +684,74 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override @DB - public void allocate(VirtualMachineProfile vm, LinkedHashMap networks) throws InsufficientCapacityException, + public void allocate(final VirtualMachineProfile vm, final LinkedHashMap networks) throws InsufficientCapacityException, ConcurrentOperationException { - Transaction txn = Transaction.currentTxn(); - txn.start(); - int deviceId = 0; - - boolean[] deviceIds = new boolean[networks.size()]; - Arrays.fill(deviceIds, false); - - List nics = new ArrayList(networks.size()); - NicProfile defaultNic = null; - - for (Map.Entry network : networks.entrySet()) { - Network config = network.getKey(); - NicProfile requested = network.getValue(); - - Boolean isDefaultNic = false; - if (vm != null && (requested != null && requested.isDefaultNic())) { - isDefaultNic = true; - } - - while (deviceIds[deviceId] && deviceId < deviceIds.length) { - deviceId++; - } - - Pair vmNicPair = allocateNic(requested, config, isDefaultNic, deviceId, vm); - - NicProfile vmNic = vmNicPair.first(); - if (vmNic == null) { - continue; - } - - deviceId = vmNicPair.second(); - - int devId = vmNic.getDeviceId(); - if (devId > deviceIds.length) { - throw new IllegalArgumentException("Device id for nic is too large: " + vmNic); - } - if (deviceIds[devId]) { - throw new IllegalArgumentException("Conflicting device id for two different nics: " + vmNic); - } - - deviceIds[devId] = true; - - if (vmNic.isDefaultNic()) { - if (defaultNic != null) { - throw new IllegalArgumentException("You cannot specify two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + vmNic); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientCapacityException { + int deviceId = 0; + + boolean[] deviceIds = new boolean[networks.size()]; + Arrays.fill(deviceIds, false); + + List nics = new ArrayList(networks.size()); + NicProfile defaultNic = null; + + for (Map.Entry network : networks.entrySet()) { + Network config = network.getKey(); + NicProfile requested = network.getValue(); + + Boolean isDefaultNic = false; + if (vm != null && (requested != null && requested.isDefaultNic())) { + isDefaultNic = true; + } + + while (deviceIds[deviceId] && deviceId < deviceIds.length) { + deviceId++; + } + + Pair vmNicPair = allocateNic(requested, config, isDefaultNic, deviceId, vm); + + NicProfile vmNic = vmNicPair.first(); + if (vmNic == null) { + continue; + } + + deviceId = vmNicPair.second(); + + int devId = vmNic.getDeviceId(); + if (devId > deviceIds.length) { + throw new IllegalArgumentException("Device id for nic is too large: " + vmNic); + } + if (deviceIds[devId]) { + throw new IllegalArgumentException("Conflicting device id for two different nics: " + vmNic); + } + + deviceIds[devId] = true; + + if (vmNic.isDefaultNic()) { + if (defaultNic != null) { + throw new IllegalArgumentException("You cannot specify two nics as default nics: nic 1 = " + defaultNic + "; nic 2 = " + vmNic); + } + defaultNic = vmNic; + } + + nics.add(vmNic); + vm.addNic(vmNic); + + } + + if (nics.size() != networks.size()) { + s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested networks " + networks.size()); + throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + networks.size()); + } + + if (nics.size() == 1) { + nics.get(0).setDefaultNic(true); } - defaultNic = vmNic; } - - nics.add(vmNic); - vm.addNic(vmNic); - - } - - if (nics.size() != networks.size()) { - s_logger.warn("Number of nics " + nics.size() + " doesn't match number of requested networks " + networks.size()); - throw new CloudRuntimeException("Number of nics " + nics.size() + " doesn't match number of requested networks " + networks.size()); - } - - if (nics.size() == 1) { - nics.get(0).setDefaultNic(true); - } - - txn.commit(); + }); } @DB @@ -898,7 +904,6 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @DB public Pair implementNetwork(long networkId, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - Transaction.currentTxn(); Pair implemented = new Pair(null, null); NetworkVO network = _networksDao.findById(networkId); @@ -1174,22 +1179,23 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } @DB - protected void updateNic(NicVO nic, long networkId, int count) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - _nicDao.update(nic.getId(), nic); - - if (nic.getVmType() == VirtualMachine.Type.User) { - s_logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); - _networksDao.changeActiveNicsBy(networkId, count); - } - - if (nic.getVmType() == VirtualMachine.Type.User || - (nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest)) { - _networksDao.setCheckForGc(networkId); - } - - txn.commit(); + protected void updateNic(final NicVO nic, final long networkId, final int count) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _nicDao.update(nic.getId(), nic); + + if (nic.getVmType() == VirtualMachine.Type.User) { + s_logger.debug("Changing active number of nics for network id=" + networkId + " on " + count); + _networksDao.changeActiveNicsBy(networkId, count); + } + + if (nic.getVmType() == VirtualMachine.Type.User || + (nic.getVmType() == VirtualMachine.Type.DomainRouter && _networksDao.findById(networkId).getTrafficType() == TrafficType.Guest)) { + _networksDao.setCheckForGc(networkId); + } + } + }); } @Override @@ -1412,59 +1418,63 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } @DB - protected void releaseNic(VirtualMachineProfile vmProfile, long nicId) throws ConcurrentOperationException, ResourceUnavailableException { - //lock the nic - Transaction txn = Transaction.currentTxn(); - txn.start(); + protected void releaseNic(final VirtualMachineProfile vmProfile, final long nicId) throws ConcurrentOperationException, ResourceUnavailableException { + Pair networkToRelease = Transaction.execute(new TransactionCallback>() { + @Override + public Pair doInTransaction(TransactionStatus status) { + NicVO nic = _nicDao.lockRow(nicId, true); + if (nic == null) { + throw new ConcurrentOperationException("Unable to acquire lock on nic " + nic); + } - NicVO nic = _nicDao.lockRow(nicId, true); - if (nic == null) { - throw new ConcurrentOperationException("Unable to acquire lock on nic " + nic); - } + Nic.State originalState = nic.getState(); + NetworkVO network = _networksDao.findById(nic.getNetworkId()); - Nic.State originalState = nic.getState(); - NetworkVO network = _networksDao.findById(nic.getNetworkId()); - - if (originalState == Nic.State.Reserved || originalState == Nic.State.Reserving) { - if (nic.getReservationStrategy() == Nic.ReservationStrategy.Start) { - NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); - nic.setState(Nic.State.Releasing); - _nicDao.update(nic.getId(), nic); - NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, _networkModel.isSecurityGroupSupportedInNetwork(network), - _networkModel.getNetworkTag(vmProfile.getHypervisorType(), network)); - if (guru.release(profile, vmProfile, nic.getReservationId())) { - applyProfileToNicForRelease(nic, profile); - nic.setState(Nic.State.Allocated); - if (originalState == Nic.State.Reserved) { - updateNic(nic, network.getId(), -1); - } else { + if (originalState == Nic.State.Reserved || originalState == Nic.State.Reserving) { + if (nic.getReservationStrategy() == Nic.ReservationStrategy.Start) { + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); + nic.setState(Nic.State.Releasing); _nicDao.update(nic.getId(), nic); - } - } - //commit the transaction before proceeding releasing nic profile on the network elements - txn.commit(); - - // Perform release on network elements - List providersToImplement = getNetworkProviders(network.getId()); - for (NetworkElement element : _networkElements) { - if (providersToImplement.contains(element.getProvider())) { - if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) { - throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + - " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); + NicProfile profile = new NicProfile(nic, network, nic.getBroadcastUri(), nic.getIsolationUri(), null, _networkModel.isSecurityGroupSupportedInNetwork(network), + _networkModel.getNetworkTag(vmProfile.getHypervisorType(), network)); + if (guru.release(profile, vmProfile, nic.getReservationId())) { + applyProfileToNicForRelease(nic, profile); + nic.setState(Nic.State.Allocated); + if (originalState == Nic.State.Reserved) { + updateNic(nic, network.getId(), -1); + } else { + _nicDao.update(nic.getId(), nic); + } } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Asking " + element.getName() + " to release " + nic); - } - //NOTE: Context appear to never be used in release method - //implementations. Consider removing it from interface Element - element.release(network, profile, vmProfile, null); + // Perform release on network elements + return new Pair(network, profile); + } else { + nic.setState(Nic.State.Allocated); + updateNic(nic, network.getId(), -1); } } - } else { - nic.setState(Nic.State.Allocated); - updateNic(nic, network.getId(), -1); - txn.commit(); + return null; + } + }); + + if (networkToRelease != null) { + Network network = networkToRelease.first(); + NicProfile profile = networkToRelease.second(); + List providersToImplement = getNetworkProviders(network.getId()); + for (NetworkElement element : _networkElements) { + if (providersToImplement.contains(element.getProvider())) { + if (!_networkModel.isProviderEnabledInPhysicalNetwork(_networkModel.getPhysicalNetworkId(network), element.getProvider().getName())) { + throw new CloudRuntimeException("Service provider " + element.getProvider().getName() + + " either doesn't exist or is not enabled in physical network id: " + network.getPhysicalNetworkId()); + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Asking " + element.getName() + " to release " + profile); + } + //NOTE: Context appear to never be used in release method + //implementations. Consider removing it from interface Element + element.release(network, profile, vmProfile, null); + } } } } @@ -1561,15 +1571,17 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra Network network = _networksDao.findById(nic.getNetworkId()); DhcpServiceProvider dhcpServiceProvider = getDhcpServiceProvider(network); try { - NicIpAliasVO ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(nic.getGateway(), network.getId(), NicIpAlias.state.active); + final NicIpAliasVO ipAlias = _nicIpAliasDao.findByGatewayAndNetworkIdAndState(nic.getGateway(), network.getId(), NicIpAlias.state.active); if (ipAlias != null) { ipAlias.setState(NicIpAlias.state.revoked); - Transaction txn = Transaction.currentTxn(); - txn.start(); - _nicIpAliasDao.update(ipAlias.getId(), ipAlias); - IPAddressVO aliasIpaddressVo = _publicIpAddressDao.findByIpAndSourceNetworkId(ipAlias.getNetworkId(), ipAlias.getIp4Address()); - _publicIpAddressDao.unassignIpAddress(aliasIpaddressVo.getId()); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _nicIpAliasDao.update(ipAlias.getId(), ipAlias); + IPAddressVO aliasIpaddressVo = _publicIpAddressDao.findByIpAndSourceNetworkId(ipAlias.getNetworkId(), ipAlias.getIp4Address()); + _publicIpAddressDao.unassignIpAddress(aliasIpaddressVo.getId()); + } + }); if (!dhcpServiceProvider.removeDhcpSupportForSubnet(network)) { s_logger.warn("Failed to remove the ip alias on the router, marking it as removed in db and freed the allocated ip " + ipAlias.getIp4Address()); } @@ -1591,18 +1603,18 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override @DB - public Network createGuestNetwork(long networkOfferingId, String name, String displayText, String gateway, String cidr, String vlanId, String networkDomain, Account owner, - Long domainId, PhysicalNetwork pNtwk, long zoneId, ACLType aclType, Boolean subdomainAccess, Long vpcId, String ip6Gateway, String ip6Cidr, - Boolean isDisplayNetworkEnabled, String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { + public Network createGuestNetwork(long networkOfferingId, final String name, final String displayText, final String gateway, final String cidr, String vlanId, String networkDomain, final Account owner, + final Long domainId, final PhysicalNetwork pNtwk, final long zoneId, final ACLType aclType, Boolean subdomainAccess, final Long vpcId, final String ip6Gateway, final String ip6Cidr, + final Boolean isDisplayNetworkEnabled, final String isolatedPvlan) throws ConcurrentOperationException, InsufficientCapacityException, ResourceAllocationException { - NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(networkOfferingId); + final NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(networkOfferingId); // this method supports only guest network creation if (ntwkOff.getTrafficType() != TrafficType.Guest) { s_logger.warn("Only guest networks can be created using this method"); return null; } - boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, aclType); + final boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, aclType); //check resource limits if (updateResourceCount) { _resourceLimitMgr.checkResourceLimit(owner, ResourceType.network); @@ -1630,7 +1642,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra ipv6 = true; } // Validate zone - DataCenterVO zone = _dcDao.findById(zoneId); + final DataCenterVO zone = _dcDao.findById(zoneId); if (zone.getNetworkType() == NetworkType.Basic) { if (ipv6) { throw new InvalidParameterValueException("IPv6 is not supported in Basic zone"); @@ -1698,6 +1710,7 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } + //TODO(VXLAN): Support VNI specified // VlanId can be specified only when network offering supports it boolean vlanSpecified = (vlanId != null); if (vlanSpecified != ntwkOff.getSpecifyVlan()) { @@ -1816,79 +1829,84 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - Long physicalNetworkId = null; - if (pNtwk != null) { - physicalNetworkId = pNtwk.getId(); - } - DataCenterDeployment plan = new DataCenterDeployment(zoneId, null, null, null, null, physicalNetworkId); - NetworkVO userNetwork = new NetworkVO(); - userNetwork.setNetworkDomain(networkDomain); - - if (cidr != null && gateway != null) { - userNetwork.setCidr(cidr); - userNetwork.setGateway(gateway); - } - - if (ip6Cidr != null && ip6Gateway != null) { - userNetwork.setIp6Cidr(ip6Cidr); - userNetwork.setIp6Gateway(ip6Gateway); - } - - if (vlanId != null) { - if (isolatedPvlan == null) { - URI uri = BroadcastDomainType.fromString(vlanId); - userNetwork.setBroadcastUri(uri); - if (!vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); - } else { - userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); - } - } else { - if (vlanId.equalsIgnoreCase(Vlan.UNTAGGED)) { - throw new InvalidParameterValueException("Cannot support pvlan with untagged primary vlan!"); - } - userNetwork.setBroadcastUri(NetUtils.generateUriForPvlan(vlanId, isolatedPvlan)); - userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); - } - } + final String networkDomainFinal = networkDomain; + final String vlanIdFinal = vlanId; + final Boolean subdomainAccessFinal = subdomainAccess; + Network network = Transaction.execute(new TransactionCallback() { + @Override + public Network doInTransaction(TransactionStatus status) { + Long physicalNetworkId = null; + if (pNtwk != null) { + physicalNetworkId = pNtwk.getId(); + } + DataCenterDeployment plan = new DataCenterDeployment(zoneId, null, null, null, null, physicalNetworkId); + NetworkVO userNetwork = new NetworkVO(); + userNetwork.setNetworkDomain(networkDomainFinal); - List networks = setupNetwork(owner, ntwkOff, userNetwork, plan, name, displayText, true, domainId, aclType, subdomainAccess, vpcId, - isDisplayNetworkEnabled); - - Network network = null; - if (networks == null || networks.isEmpty()) { - throw new CloudRuntimeException("Fail to create a network"); - } else { - if (networks.size() > 0 && networks.get(0).getGuestType() == Network.GuestType.Isolated && networks.get(0).getTrafficType() == TrafficType.Guest) { - Network defaultGuestNetwork = networks.get(0); - for (Network nw : networks) { - if (nw.getCidr() != null && nw.getCidr().equals(zone.getGuestNetworkCidr())) { - defaultGuestNetwork = nw; + if (cidr != null && gateway != null) { + userNetwork.setCidr(cidr); + userNetwork.setGateway(gateway); + } + + if (ip6Cidr != null && ip6Gateway != null) { + userNetwork.setIp6Cidr(ip6Cidr); + userNetwork.setIp6Gateway(ip6Gateway); + } + + if (vlanIdFinal != null) { + if (isolatedPvlan == null) { + URI uri = BroadcastDomainType.fromString(vlanIdFinal); + userNetwork.setBroadcastUri(uri); + if (!vlanIdFinal.equalsIgnoreCase(Vlan.UNTAGGED)) { + userNetwork.setBroadcastDomainType(BroadcastDomainType.Vlan); + } else { + userNetwork.setBroadcastDomainType(BroadcastDomainType.Native); + } + } else { + if (vlanIdFinal.equalsIgnoreCase(Vlan.UNTAGGED)) { + throw new InvalidParameterValueException("Cannot support pvlan with untagged primary vlan!"); + } + userNetwork.setBroadcastUri(NetUtils.generateUriForPvlan(vlanIdFinal, isolatedPvlan)); + userNetwork.setBroadcastDomainType(BroadcastDomainType.Pvlan); + } + } + + List networks = setupNetwork(owner, ntwkOff, userNetwork, plan, name, displayText, true, domainId, aclType, subdomainAccessFinal, vpcId, + isDisplayNetworkEnabled); + + Network network = null; + if (networks == null || networks.isEmpty()) { + throw new CloudRuntimeException("Fail to create a network"); + } else { + if (networks.size() > 0 && networks.get(0).getGuestType() == Network.GuestType.Isolated && networks.get(0).getTrafficType() == TrafficType.Guest) { + Network defaultGuestNetwork = networks.get(0); + for (Network nw : networks) { + if (nw.getCidr() != null && nw.getCidr().equals(zone.getGuestNetworkCidr())) { + defaultGuestNetwork = nw; + } + } + network = defaultGuestNetwork; + } else { + // For shared network + network = networks.get(0); } } - network = defaultGuestNetwork; - } else { - // For shared network - network = networks.get(0); + + if (updateResourceCount) { + _resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.network); + } + + return network; } - } + }); - if (updateResourceCount) { - _resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.network); - } - - txn.commit(); CallContext.current().setEventDetails("Network Id: " + network.getId()); return network; } @Override @DB - public boolean shutdownNetwork(long networkId, ReservationContext context, boolean cleanupElements) { - boolean result = false; + public boolean shutdownNetwork(final long networkId, ReservationContext context, boolean cleanupElements) { NetworkVO network = _networksDao.findById(networkId); if (network.getState() == Network.State.Allocated) { s_logger.debug("Network is already shutdown: " + network); @@ -1933,43 +1951,51 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } - boolean success = shutdownNetworkElementsAndResources(context, cleanupElements, network); + final boolean success = shutdownNetworkElementsAndResources(context, cleanupElements, network); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (success) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); - } - NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); - NetworkProfile profile = convertNetworkToNetworkProfile(network.getId()); - guru.shutdown(profile, _networkOfferingDao.findById(network.getNetworkOfferingId())); + final NetworkVO networkFinal = network; + boolean result = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean result = false; - applyProfileToNetwork(network, profile); - DataCenterVO zone = _dcDao.findById(network.getDataCenterId()); - if (isSharedNetworkOfferingWithServices(network.getNetworkOfferingId()) && (zone.getNetworkType() == NetworkType.Advanced)) { - network.setState(Network.State.Setup); - } else { - try { - stateTransitTo(network, Event.OperationSucceeded); - } catch (NoTransitionException e) { - network.setState(Network.State.Allocated); - network.setRestartRequired(false); + if (success) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Network id=" + networkId + " is shutdown successfully, cleaning up corresponding resources now."); + } + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, networkFinal.getGuruName()); + NetworkProfile profile = convertNetworkToNetworkProfile(networkFinal.getId()); + guru.shutdown(profile, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); + + applyProfileToNetwork(networkFinal, profile); + DataCenterVO zone = _dcDao.findById(networkFinal.getDataCenterId()); + if (isSharedNetworkOfferingWithServices(networkFinal.getNetworkOfferingId()) && (zone.getNetworkType() == NetworkType.Advanced)) { + networkFinal.setState(Network.State.Setup); + } else { + try { + stateTransitTo(networkFinal, Event.OperationSucceeded); + } catch (NoTransitionException e) { + networkFinal.setState(Network.State.Allocated); + networkFinal.setRestartRequired(false); + } + } + _networksDao.update(networkFinal.getId(), networkFinal); + _networksDao.clearCheckForGc(networkId); + result = true; + } else { + try { + stateTransitTo(networkFinal, Event.OperationFailed); + } catch (NoTransitionException e) { + networkFinal.setState(Network.State.Implemented); + _networksDao.update(networkFinal.getId(), networkFinal); + } + result = false; } + + return result; } - _networksDao.update(network.getId(), network); - _networksDao.clearCheckForGc(networkId); - result = true; - } else { - try { - stateTransitTo(network, Event.OperationFailed); - } catch (NoTransitionException e) { - network.setState(Network.State.Implemented); - _networksDao.update(network.getId(), network); - } - result = false; - } - txn.commit(); + }); + return result; } finally { if (network != null) { @@ -2033,8 +2059,8 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @Override @DB - public boolean destroyNetwork(long networkId, ReservationContext context) { - Account callerAccount = context.getAccount(); + public boolean destroyNetwork(long networkId, final ReservationContext context) { + final Account callerAccount = context.getAccount(); NetworkVO network = _networksDao.findById(networkId); if (network == null) { @@ -2123,38 +2149,48 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra if (s_logger.isDebugEnabled()) { s_logger.debug("Network id=" + networkId + " is destroyed successfully, cleaning up corresponding resources now."); } - NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, network.getGuruName()); - Transaction txn = Transaction.currentTxn(); - txn.start(); - guru.trash(network, _networkOfferingDao.findById(network.getNetworkOfferingId())); - - if (!deleteVlansInNetwork(network.getId(), context.getCaller().getId(), callerAccount)) { - success = false; - s_logger.warn("Failed to delete network " + network + "; was unable to cleanup corresponding ip ranges"); - } else { - // commit transaction only when ips and vlans for the network are released successfully - try { - stateTransitTo(network, Event.DestroyNetwork); - } catch (NoTransitionException e) { - s_logger.debug(e.getMessage()); - } - if (_networksDao.remove(network.getId())) { - NetworkDomainVO networkDomain = _networkDomainDao.getDomainNetworkMapByNetworkId(network.getId()); - if (networkDomain != null) - _networkDomainDao.remove(networkDomain.getId()); - - NetworkAccountVO networkAccount = _networkAccountDao.getAccountNetworkMapByNetworkId(network.getId()); - if (networkAccount != null) - _networkAccountDao.remove(networkAccount.getId()); - } - - NetworkOffering ntwkOff = _entityMgr.findById(NetworkOffering.class, network.getNetworkOfferingId()); - boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, network.getAclType()); - if (updateResourceCount) { - _resourceLimitMgr.decrementResourceCount(network.getAccountId(), ResourceType.network); - } - txn.commit(); + final NetworkVO networkFinal = network; + try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + NetworkGuru guru = AdapterBase.getAdapterByName(_networkGurus, networkFinal.getGuruName()); + + guru.trash(networkFinal, _networkOfferingDao.findById(networkFinal.getNetworkOfferingId())); + + if (!deleteVlansInNetwork(networkFinal.getId(), context.getCaller().getId(), callerAccount)) { + s_logger.warn("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); + throw new CloudRuntimeException("Failed to delete network " + networkFinal + "; was unable to cleanup corresponding ip ranges"); + } else { + // commit transaction only when ips and vlans for the network are released successfully + try { + stateTransitTo(networkFinal, Event.DestroyNetwork); + } catch (NoTransitionException e) { + s_logger.debug(e.getMessage()); + } + if (_networksDao.remove(networkFinal.getId())) { + NetworkDomainVO networkDomain = _networkDomainDao.getDomainNetworkMapByNetworkId(networkFinal.getId()); + if (networkDomain != null) + _networkDomainDao.remove(networkDomain.getId()); + + NetworkAccountVO networkAccount = _networkAccountDao.getAccountNetworkMapByNetworkId(networkFinal.getId()); + if (networkAccount != null) + _networkAccountDao.remove(networkAccount.getId()); + } + + NetworkOffering ntwkOff = _entityMgr.findById(NetworkOffering.class, networkFinal.getNetworkOfferingId()); + boolean updateResourceCount = resourceCountNeedsUpdate(ntwkOff, networkFinal.getAclType()); + if (updateResourceCount) { + _resourceLimitMgr.decrementResourceCount(networkFinal.getAccountId(), ResourceType.network); + } + } + } + }); + return false; + } catch ( CloudRuntimeException e ) { + s_logger.error("Failed to delete network", e); + return false; } } @@ -2191,10 +2227,9 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return result; } - public class NetworkGarbageCollector implements Runnable { + public class NetworkGarbageCollector extends ManagedContextRunnable { @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { GlobalLock gcLock = GlobalLock.getInternLock("Network.GC.Lock"); try { if (gcLock.lock(3)) { @@ -2206,7 +2241,6 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra } } finally { gcLock.releaseRef(); - ServerContexts.unregisterSystemContext(); } } @@ -2425,24 +2459,22 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra @DB @Override - public boolean reallocate(VirtualMachineProfile vm, DataCenterDeployment dest) throws InsufficientCapacityException, ConcurrentOperationException { + public boolean reallocate(final VirtualMachineProfile vm, DataCenterDeployment dest) throws InsufficientCapacityException, ConcurrentOperationException { VMInstanceVO vmInstance = _vmDao.findById(vm.getId()); DataCenterVO dc = _dcDao.findById(vmInstance.getDataCenterId()); if (dc.getNetworkType() == NetworkType.Basic) { List nics = _nicDao.listByVmId(vmInstance.getId()); NetworkVO network = _networksDao.findById(nics.get(0).getNetworkId()); - LinkedHashMap profiles = new LinkedHashMap(); + final LinkedHashMap profiles = new LinkedHashMap(); profiles.put(network, null); - Transaction txn = Transaction.currentTxn(); - txn.start(); - - try { - cleanupNics(vm); - allocate(vm, profiles); - } finally { - txn.commit(); - } + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientCapacityException { + cleanupNics(vm); + allocate(vm, profiles); + } + }); } return true; } @@ -3049,17 +3081,20 @@ public class NetworkOrchestrator extends ManagerBase implements NetworkOrchestra return nic.getSecondaryIp(); } - private boolean removeVmSecondaryIpsOfNic(long nicId) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - List ipList = _nicSecondaryIpDao.listByNicId(nicId); - if (ipList != null) { - for (NicSecondaryIpVO ip : ipList) { - _nicSecondaryIpDao.remove(ip.getId()); + private boolean removeVmSecondaryIpsOfNic(final long nicId) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List ipList = _nicSecondaryIpDao.listByNicId(nicId); + if (ipList != null) { + for (NicSecondaryIpVO ip : ipList) { + _nicSecondaryIpDao.remove(ip.getId()); + } + s_logger.debug("Revoving nic secondary ip entry ..."); + } } - s_logger.debug("Revoving nic secondary ip entry ..."); - } - txn.commit(); + }); + return true; } diff --git a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 088c943e6c0..0821c81f71a 100644 --- a/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -31,8 +31,8 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; @@ -97,6 +97,9 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; @@ -681,9 +684,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } @DB - protected VolumeVO switchVolume(VolumeVO existingVolume, VirtualMachineProfile vm) throws StorageUnavailableException { - Transaction txn = Transaction.currentTxn(); - + protected VolumeVO switchVolume(final VolumeVO existingVolume, final VirtualMachineProfile vm) throws StorageUnavailableException { Long templateIdToUse = null; Long volTemplateId = existingVolume.getTemplateId(); long vmTemplateId = vm.getTemplateId(); @@ -695,22 +696,26 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati templateIdToUse = vmTemplateId; } - txn.start(); - VolumeVO newVolume = allocateDuplicateVolumeVO(existingVolume, templateIdToUse); - // In case of Vmware if vm reference is not removed then during root - // disk cleanup - // the vm also gets deleted, so remove the reference - if (vm.getHypervisorType() == HypervisorType.VMware) { - _volsDao.detachVolume(existingVolume.getId()); - } - try { - stateTransitTo(existingVolume, Volume.Event.DestroyRequested); - } catch (NoTransitionException e) { - s_logger.debug("Unable to destroy existing volume: " + e.toString()); - } - txn.commit(); - return newVolume; - + final Long templateIdToUseFinal = templateIdToUse; + return Transaction.execute(new TransactionCallback() { + @Override + public VolumeVO doInTransaction(TransactionStatus status) { + VolumeVO newVolume = allocateDuplicateVolumeVO(existingVolume, templateIdToUseFinal); + // In case of Vmware if vm reference is not removed then during root + // disk cleanup + // the vm also gets deleted, so remove the reference + if (vm.getHypervisorType() == HypervisorType.VMware) { + _volsDao.detachVolume(existingVolume.getId()); + } + try { + stateTransitTo(existingVolume, Volume.Event.DestroyRequested); + } catch (NoTransitionException e) { + s_logger.debug("Unable to destroy existing volume: " + e.toString()); + } + + return newVolume; + } + }); } @Override @@ -724,28 +729,32 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati if (s_logger.isDebugEnabled()) { s_logger.debug("Cleaning storage for vm: " + vmId); } - List volumesForVm = _volsDao.findByInstance(vmId); - List toBeExpunged = new ArrayList(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (VolumeVO vol : volumesForVm) { - if (vol.getVolumeType().equals(Type.ROOT)) { - // Destroy volume if not already destroyed - boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging); - if (!volumeAlreadyDestroyed) { - volService.destroyVolume(vol.getId()); - } else { - s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString()); + final List volumesForVm = _volsDao.findByInstance(vmId); + final List toBeExpunged = new ArrayList(); + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (VolumeVO vol : volumesForVm) { + if (vol.getVolumeType().equals(Type.ROOT)) { + // Destroy volume if not already destroyed + boolean volumeAlreadyDestroyed = (vol.getState() == Volume.State.Destroy || vol.getState() == Volume.State.Expunged || vol.getState() == Volume.State.Expunging); + if (!volumeAlreadyDestroyed) { + volService.destroyVolume(vol.getId()); + } else { + s_logger.debug("Skipping destroy for the volume " + vol + " as its in state " + vol.getState().toString()); + } + toBeExpunged.add(vol); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Detaching " + vol); + } + _volsDao.detachVolume(vol.getId()); + } } - toBeExpunged.add(vol); - } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Detaching " + vol); - } - _volsDao.detachVolume(vol.getId()); } - } - txn.commit(); + }); + AsyncCallFuture future = null; for (VolumeVO expunge : toBeExpunged) { future = volService.expungeVolumeAsync(volFactory.getVolume(expunge.getId())); @@ -878,7 +887,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati for (VolumeVO vol : vols) { DataTO volTO = volFactory.getVolume(vol.getId()).getTO(); - DiskTO disk = new DiskTO(volTO, vol.getDeviceId(), null, vol.getVolumeType()); + DiskTO disk = new DiskTO(volTO, vol.getDeviceId(), vol.getPath(), vol.getVolumeType()); + VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); + DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + + disk.setDetails(getDetails(volumeInfo, dataStore)); + vm.addDisk(disk); } @@ -889,6 +903,29 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } } + private Map getDetails(VolumeInfo volumeInfo, DataStore dataStore) { + Map details = new HashMap(); + + StoragePoolVO storagePool = _storagePoolDao.findById(dataStore.getId()); + + details.put(DiskTO.MANAGED, String.valueOf(storagePool.isManaged())); + details.put(DiskTO.STORAGE_HOST, storagePool.getHostAddress()); + details.put(DiskTO.STORAGE_PORT, String.valueOf(storagePool.getPort())); + details.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeInfo.getSize())); + details.put(DiskTO.IQN, volumeInfo.get_iScsiName()); + + ChapInfo chapInfo = volService.getChapInfo(volumeInfo, dataStore); + + if (chapInfo != null) { + details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); + details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret()); + details.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername()); + details.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); + } + + return details; + } + private static enum VolumeTaskType { RECREATE, NOP, MIGRATE } @@ -944,11 +981,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati } throw new CloudRuntimeException("Local volume " + vol + " cannot be recreated on storagepool " + assignedPool + " assigned by deploymentPlanner"); } else { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); + //Check if storage migration is enabled in config + if (StorageHAMigrationEnabled.value()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Shared volume " + vol + " will be migrated on storage pool " + assignedPool + " assigned by deploymentPlanner"); + } + VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); + tasks.add(task); + } else { + throw new CloudRuntimeException("Cannot migrate volumes. Volume Migration is disabled"); } - VolumeTask task = new VolumeTask(VolumeTaskType.MIGRATE, vol, assignedPool); - tasks.add(task); } } else { StoragePoolVO pool = _storagePoolDao.findById(vol.getPoolId()); @@ -1071,7 +1113,12 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati vol = result.first(); } DataTO volumeTO = volFactory.getVolume(vol.getId()).getTO(); - DiskTO disk = new DiskTO(volumeTO, vol.getDeviceId(), null, vol.getVolumeType()); + DiskTO disk = new DiskTO(volumeTO, vol.getDeviceId(), vol.getPath(), vol.getVolumeType()); + VolumeInfo volumeInfo = volFactory.getVolume(vol.getId()); + DataStore dataStore = dataStoreMgr.getDataStore(vol.getPoolId(), DataStoreRole.Primary); + + disk.setDetails(getDetails(volumeInfo, dataStore)); + vm.addDisk(disk); } } @@ -1084,7 +1131,8 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati public boolean canVmRestartOnAnotherServer(long vmId) { List vols = _volsDao.findCreatedByInstance(vmId); for (VolumeVO vol : vols) { - if (!vol.isRecreatable() && !vol.getPoolType().isShared()) { + StoragePoolVO storagePoolVO = _storagePoolDao.findById(vol.getPoolId()); + if (!vol.isRecreatable() && storagePoolVO != null && storagePoolVO.getPoolType() != null && !(storagePoolVO.getPoolType().isShared())) { return false; } } @@ -1105,9 +1153,16 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati "If true, will recreate system vm root disk whenever starting system vm", true); + public static final ConfigKey StorageHAMigrationEnabled = new ConfigKey(Boolean.class, + "enable.ha.storage.migration", + "Storage", + "true", + "Enable/disable storage migration across primary storage during HA", + true); + @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {RecreatableSystemVmEnabled, MaxVolumeSize}; + return new ConfigKey[] {RecreatableSystemVmEnabled, MaxVolumeSize, StorageHAMigrationEnabled}; } @Override @@ -1163,6 +1218,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati return dataStoreMgr.getPrimaryDataStore(vol.getPoolId()).getUuid(); } + @Override public void updateVolumeDiskChain(long volumeId, String path, String chainInfo) { VolumeVO vol = _volsDao.findById(volumeId); boolean needUpdate = false; @@ -1173,7 +1229,7 @@ public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrati needUpdate = true; if(needUpdate) { - s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + s_logger.info("Update volume disk chain info. vol: " + vol.getId() + ", " + vol.getPath() + " -> " + path + ", " + vol.getChainInfo() + " -> " + chainInfo); vol.setPath(path); vol.setChainInfo(chainInfo); diff --git a/engine/orchestration/test/resource/provisioningContext.xml b/engine/orchestration/test/resource/provisioningContext.xml index 6ed0ab5d472..0320be33087 100644 --- a/engine/orchestration/test/resource/provisioningContext.xml +++ b/engine/orchestration/test/resource/provisioningContext.xml @@ -1,3 +1,4 @@ + - +--> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/schema/resources/META-INF/cloudstack/system/spring-engine-schema-system-checkers-context.xml b/engine/schema/resources/META-INF/cloudstack/system/spring-engine-schema-system-checkers-context.xml new file mode 100644 index 00000000000..2a308873f6a --- /dev/null +++ b/engine/schema/resources/META-INF/cloudstack/system/spring-engine-schema-system-checkers-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java b/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java index 7c0a56224f3..57102cbb50b 100755 --- a/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java +++ b/engine/schema/src/com/cloud/alert/dao/AlertDaoImpl.java @@ -29,7 +29,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { AlertDao.class }) @@ -116,7 +116,7 @@ public class AlertDaoImpl extends GenericDaoBase implements Alert return result; } if (alerts != null && !alerts.isEmpty()) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (AlertVO alert : alerts) { alert = lockRow(alert.getId(), true); diff --git a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java index 88a2b2b7ab7..64a1660a966 100755 --- a/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java +++ b/engine/schema/src/com/cloud/capacity/dao/CapacityDaoImpl.java @@ -46,7 +46,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -80,7 +80,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2= " AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC"; private static final String LIST_PODSINZONE_BY_HOST_CAPACITY_TYPE = "SELECT DISTINCT capacity.pod_id FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`host_pod_ref` pod " + - " ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster ON (capacity.cluster_id = cluster.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? ((total_capacity * cluster.value ) - used_capacity + reserved_capacity) >= ? "; + " ON (pod.id = capacity.pod_id AND pod.removed is NULL) INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id ) WHERE capacity.data_center_id = ? AND capacity_type = ? AND cluster_details.name= ? AND ((total_capacity * cluster_details.value ) - used_capacity + reserved_capacity) >= ? "; private static final String ORDER_PODS_BY_AGGREGATE_CAPACITY = " SELECT capacity.pod_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity) FROM `cloud`.`op_host_capacity` capacity WHERE data_center_id= ? AND capacity_type = ? GROUP BY capacity.pod_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC "; @@ -186,7 +186,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List listClustersCrossingThreshold(short capacityType, Long zoneId, String configName, long compute_requested){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); StringBuilder sql = new StringBuilder(LIST_CLUSTERS_CROSSING_THRESHOLD); @@ -241,7 +241,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId, String resource_state){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -291,7 +291,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements public List listCapacitiesGroupedByLevelAndType(Integer capacityType, Long zoneId, Long podId, Long clusterId, int level, Long limit){ StringBuilder finalQuery = new StringBuilder(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -360,7 +360,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements if(level == 3 && rs.getLong(7) != 0) capacityClusterId = rs.getLong(7); - SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(3), rs.getFloat(4), + SummedCapacity summedCapacity = new SummedCapacity( rs.getLong(1), rs.getLong(2), rs.getLong(3), rs.getFloat(4), (short)rs.getLong(5), rs.getLong(6), capacityPodId, capacityClusterId); @@ -378,7 +378,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -425,7 +425,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements } public void updateAllocated(Long hostId, long allocatedAmount, short capacityType, boolean add) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { txn.start(); @@ -458,7 +458,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -503,7 +503,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -565,6 +565,11 @@ public class CapacityDaoImpl extends GenericDaoBase implements this.dcId = zoneId; } + public SummedCapacity(long sumUsed, long sumReserved, long sumTotal, float percentUsed, short capacityType, Long zoneId, Long podId, Long clusterId) { + this(sumUsed, sumTotal, percentUsed, capacityType, zoneId, podId, clusterId); + this.sumReserved = sumReserved; + } + public Short getCapacityType() { return capacityType; } @@ -594,7 +599,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List findByClusterPodZone(Long zoneId, Long podId, Long clusterId){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -706,7 +711,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public Pair, Map> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map clusterCapacityMap = new HashMap(); @@ -759,7 +764,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public List listPodsByHostCapacities(long zoneId, int requiredCpu, long requiredRam, short capacityType) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -793,7 +798,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public Pair, Map> orderPodsByAggregateCapacity(long zoneId, short capacityTypeForOrdering) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map podCapacityMap = new HashMap(); @@ -835,7 +840,7 @@ public class CapacityDaoImpl extends GenericDaoBase implements @Override public void updateCapacityState(Long dcId, Long podId, Long clusterId, Long hostId, String capacityState) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); StringBuilder sql = new StringBuilder(UPDATE_CAPACITY_STATE); List resourceIdList = new ArrayList(); diff --git a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java index f071cea60e6..fdd5a287b40 100644 --- a/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java +++ b/engine/schema/src/com/cloud/certificate/dao/CertificateDaoImpl.java @@ -17,9 +17,6 @@ package com.cloud.certificate.dao; import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; import javax.ejb.Local; @@ -32,7 +29,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Component -@Local(value={CertificateDao.class}) @DB(txn=false) +@Local(value={CertificateDao.class}) @DB public class CertificateDaoImpl extends GenericDaoBase implements CertificateDao { private static final Logger s_logger = Logger.getLogger(CertificateDaoImpl.class); diff --git a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java index cff4cfc1b95..8a43d23fedb 100644 --- a/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java +++ b/engine/schema/src/com/cloud/cluster/agentlb/dao/HostTransferMapDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value = { HostTransferMapDao.class }) -@DB(txn = false) +@DB public class HostTransferMapDaoImpl extends GenericDaoBase implements HostTransferMapDao { private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class); diff --git a/engine/schema/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java b/engine/schema/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java index 9070ff912b3..0d99d30f11b 100644 --- a/engine/schema/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java +++ b/engine/schema/src/com/cloud/configuration/dao/ResourceCountDaoImpl.java @@ -40,7 +40,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={ResourceCountDao.class}) @@ -158,7 +158,7 @@ public class ResourceCountDaoImpl extends GenericDaoBase @Override @DB public void createResourceCounts(long ownerId, ResourceLimit.ResourceOwnerType ownerType){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ResourceType[] resourceTypes = Resource.ResourceType.values(); diff --git a/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java b/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java index f7f0f11ba07..72ac400ba72 100755 --- a/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/ClusterDetailsDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Local(value=ClusterDetailsDao.class) public class ClusterDetailsDaoImpl extends GenericDaoBase implements ClusterDetailsDao, ScopedConfigStorage { @@ -100,7 +100,7 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = ClusterSearch.create(); sc.setParameters("clusterId", clusterId); @@ -119,7 +119,7 @@ public class ClusterDetailsDaoImpl extends GenericDaoBase sc = DetailSearch.create(); sc.setParameters("clusterId", clusterId); diff --git a/engine/schema/src/com/cloud/dc/DcDetailVO.java b/engine/schema/src/com/cloud/dc/DataCenterDetailVO.java similarity index 74% rename from engine/schema/src/com/cloud/dc/DcDetailVO.java rename to engine/schema/src/com/cloud/dc/DataCenterDetailVO.java index 15bfacbb3e1..0ff7865c2c5 100644 --- a/engine/schema/src/com/cloud/dc/DcDetailVO.java +++ b/engine/schema/src/com/cloud/dc/DataCenterDetailVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.dc; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,16 +23,18 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name="data_center_details") -public class DcDetailVO implements InternalIdentity { +public class DataCenterDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") private long id; @Column(name="dc_id") - private long dcId; + private long resourceId; @Column(name="name") private String name; @@ -42,32 +42,41 @@ public class DcDetailVO implements InternalIdentity { @Column(name="value") private String value; - protected DcDetailVO() { + @Column(name="display") + private boolean display; + + protected DataCenterDetailVO() { } - public DcDetailVO(long dcId, String name, String value) { - this.dcId = dcId; + public DataCenterDetailVO(long dcId, String name, String value) { + this.resourceId = dcId; this.name = name; this.value = value; } + - public long getDcId() { - return dcId; - } - + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - public void setValue(String value) { - this.value = value; - } - + @Override public long getId() { return id; } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } } diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java index 64bf1fe6103..bd45ecd93fa 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterDaoImpl.java @@ -40,7 +40,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -94,7 +94,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C ZoneClusterSearch.done(); ClusterIdSearch = createSearchBuilder(Long.class); - ClusterIdSearch.selectField(ClusterIdSearch.entity().getId()); + ClusterIdSearch.selectFields(ClusterIdSearch.entity().getId()); ClusterIdSearch.and("dataCenterId", ClusterIdSearch.entity().getDataCenterId(), Op.EQ); ClusterIdSearch.done(); } @@ -156,7 +156,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C @Override public Map> getPodClusterIdMap(List clusterIds){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; Map> result = new HashMap>(); @@ -196,7 +196,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C @Override public List listDisabledClusters(long zoneId, Long podId) { GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.selectFields(clusterIdSearch.entity().getId()); clusterIdSearch.and("dataCenterId", clusterIdSearch.entity().getDataCenterId(), Op.EQ); if (podId != null) { clusterIdSearch.and("podId", clusterIdSearch.entity().getPodId(), Op.EQ); @@ -217,12 +217,12 @@ public class ClusterDaoImpl extends GenericDaoBase implements C public List listClustersWithDisabledPods(long zoneId) { GenericSearchBuilder disabledPodIdSearch = _hostPodDao.createSearchBuilder(Long.class); - disabledPodIdSearch.selectField(disabledPodIdSearch.entity().getId()); + disabledPodIdSearch.selectFields(disabledPodIdSearch.entity().getId()); disabledPodIdSearch.and("dataCenterId", disabledPodIdSearch.entity().getDataCenterId(), Op.EQ); disabledPodIdSearch.and("allocationState", disabledPodIdSearch.entity().getAllocationState(), Op.EQ); GenericSearchBuilder clusterIdSearch = createSearchBuilder(Long.class); - clusterIdSearch.selectField(clusterIdSearch.entity().getId()); + clusterIdSearch.selectFields(clusterIdSearch.entity().getId()); clusterIdSearch.join("disabledPodIdSearch", disabledPodIdSearch, clusterIdSearch.entity().getPodId(), disabledPodIdSearch.entity().getId(), JoinBuilder.JoinType.INNER); clusterIdSearch.done(); @@ -243,7 +243,7 @@ public class ClusterDaoImpl extends GenericDaoBase implements C @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ClusterVO cluster = createForUpdate(); cluster.setName(null); diff --git a/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java index b12fa9dc007..a01af0656a6 100644 --- a/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java @@ -26,11 +26,11 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value=ClusterVSMMapDao.class) -@DB(txn = false) +@DB public class ClusterVSMMapDaoImpl extends GenericDaoBase implements ClusterVSMMapDao { final SearchBuilder ClusterSearch; @@ -79,7 +79,7 @@ public class ClusterVSMMapDaoImpl extends GenericDaoBase } public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ClusterVSMMapVO cluster = createForUpdate(); //cluster.setClusterId(null); diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java index 47b5522326a..9cc43efbbb6 100755 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterDaoImpl.java @@ -26,9 +26,11 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import javax.persistence.TableGenerator; +import org.apache.cloudstack.api.ResourceDetail; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.dc.DataCenterDetailVO; import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.DataCenterLinkLocalIpAddressVO; import com.cloud.dc.DataCenterVO; @@ -44,7 +46,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SequenceFetcher; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.NetUtils; /** @@ -70,7 +72,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem @Inject protected DataCenterLinkLocalIpAddressDao _LinkLocalIpAllocDao = null; @Inject protected DataCenterVnetDao _vnetAllocDao = null; @Inject protected PodVlanDao _podVlanAllocDao = null; - @Inject protected DcDetailsDao _detailsDao = null; + @Inject protected DataCenterDetailsDao _detailsDao = null; @Inject protected AccountGuestVlanMapDao _accountGuestVlanMapDao = null; protected long _prefix; @@ -343,7 +345,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem @Override @DB public boolean update(Long zoneId, DataCenterVO zone) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); boolean persisted = super.update(zoneId, zone); if (!persisted) { @@ -356,7 +358,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem @Override public void loadDetails(DataCenterVO zone) { - Map details =_detailsDao.findDetails(zone.getId()); + Map details =_detailsDao.listDetailsKeyPairs(zone.getId()); zone.setDetails(details); } @@ -366,7 +368,13 @@ public class DataCenterDaoImpl extends GenericDaoBase implem if (details == null) { return; } - _detailsDao.persist(zone.getId(), details); + + List resourceDetails = new ArrayList(); + for (String key : details.keySet()) { + resourceDetails.add(new DataCenterDetailVO(zone.getId(), key, details.get(key))); + } + + _detailsDao.saveDetails(resourceDetails); } @Override @@ -408,7 +416,7 @@ public class DataCenterDaoImpl extends GenericDaoBase implem @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); DataCenterVO zone = createForUpdate(); zone.setName(null); diff --git a/engine/schema/src/com/cloud/dc/dao/DcDetailsDao.java b/engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDao.java similarity index 72% rename from engine/schema/src/com/cloud/dc/dao/DcDetailsDao.java rename to engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDao.java index a3b72a84745..ed7c494744f 100644 --- a/engine/schema/src/com/cloud/dc/dao/DcDetailsDao.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDao.java @@ -16,17 +16,10 @@ // under the License. package com.cloud.dc.dao; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -import com.cloud.dc.DcDetailVO; +import com.cloud.dc.DataCenterDetailVO; import com.cloud.utils.db.GenericDao; -public interface DcDetailsDao extends GenericDao { - Map findDetails(long dcId); - - void persist(long dcId, Map details); - - DcDetailVO findDetail(long dcId, String name); - - void deleteDetails(long dcId); +public interface DataCenterDetailsDao extends GenericDao, ResourceDetailsDao { } \ No newline at end of file diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java new file mode 100644 index 00000000000..49092f8eeae --- /dev/null +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java @@ -0,0 +1,49 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.dc.dao; + +import javax.ejb.Local; + +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.ConfigKey.Scope; +import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; + +import com.cloud.dc.DataCenterDetailVO; + +@Local(value=DataCenterDetailsDao.class) +public class DataCenterDetailsDaoImpl extends ResourceDetailsDaoBase implements DataCenterDetailsDao, ScopedConfigStorage { + + + @Override + public Scope getScope() { + return ConfigKey.Scope.Zone; + } + + @Override + public String getConfigValue(long id, ConfigKey key) { + ResourceDetail vo = findDetail(id, key.key()); + return vo == null ? null : vo.getValue(); + } + + @Override + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new DataCenterDetailVO(resourceId, key, value)); + } + +} diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java index 353402d30cf..c2ed551a668 100755 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterIpAddressDaoImpl.java @@ -33,12 +33,12 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @Component -@Local(value={DataCenterIpAddressDao.class}) @DB(txn=false) +@Local(value={DataCenterIpAddressDao.class}) @DB public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterIpAddressDao { private static final Logger s_logger = Logger.getLogger(DataCenterIpAddressDaoImpl.class); @@ -52,7 +52,7 @@ public class DataCenterIpAddressDaoImpl extends GenericDaoBase implements DataCenterLinkLocalIpAddressDao { private static final Logger s_logger = Logger.getLogger(DataCenterLinkLocalIpAddressDaoImpl.class); @@ -55,7 +54,7 @@ public class DataCenterLinkLocalIpAddressDaoImpl extends GenericDaoBase { public List listAllocatedVnets(long physicalNetworkId); @@ -33,7 +34,7 @@ public interface DataCenterVnetDao extends GenericDao { public void delete(long physicalNetworkId); - public void deleteVnets(Transaction txn, long dcId, long physicalNetworkId, List vnets); + public void deleteVnets(TransactionLegacy txn, long dcId, long physicalNetworkId, List vnets); public void lockRange(long dcId, long physicalNetworkId, Integer start, Integer end); diff --git a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java index d3a2409dc96..4e2aabf117f 100755 --- a/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/DataCenterVnetDaoImpl.java @@ -25,14 +25,12 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.exception.InvalidParameterValueException; import org.springframework.stereotype.Component; import com.cloud.dc.DataCenterVnetVO; import com.cloud.network.dao.AccountGuestVlanMapDao; import com.cloud.network.dao.AccountGuestVlanMapVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; @@ -40,7 +38,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -48,7 +46,7 @@ import com.cloud.utils.exception.CloudRuntimeException; * data center/physical_network and the vnet that appears within the physical network. */ @Component -@DB(txn=false) +@DB public class DataCenterVnetDaoImpl extends GenericDaoBase implements DataCenterVnetDao { private final SearchBuilder FreeVnetSearch; @@ -123,7 +121,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase vnets) { String insertVnet = "INSERT INTO `cloud`.`op_dc_vnet_alloc` (vnet, data_center_id, physical_network_id) VALUES ( ?, ?, ?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); PreparedStatement stmt = txn.prepareAutoCloseStatement(insertVnet); @@ -141,7 +139,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase argument each string is a vlan. not a vlanRange. - public void deleteVnets(Transaction txn, long dcId, long physicalNetworkId, List vnets) { + public void deleteVnets(TransactionLegacy txn, long dcId, long physicalNetworkId, List vnets) { String deleteVnet = "DELETE FROM `cloud`.`op_dc_vnet_alloc` WHERE data_center_id=? AND physical_network_id=? AND taken IS NULL AND vnet=?"; try { PreparedStatement stmt = txn.prepareAutoCloseStatement(deleteVnet); @@ -173,7 +171,7 @@ public class DataCenterVnetDaoImpl extends GenericDaoBase implements DcDetailsDao, ScopedConfigStorage { - protected final SearchBuilder DcSearch; - protected final SearchBuilder DetailSearch; - - public DcDetailsDaoImpl() { - DcSearch = createSearchBuilder(); - DcSearch.and("dcId", DcSearch.entity().getDcId(), SearchCriteria.Op.EQ); - DcSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("dcId", DetailSearch.entity().getDcId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - } - - @Override - public DcDetailVO findDetail(long dcId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("dcId", dcId); - sc.setParameters("name", name); - - return findOneIncludingRemovedBy(sc); - } - - @Override - public Map findDetails(long dcId) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dcId", dcId); - - List results = search(sc, null); - Map details = new HashMap(results.size()); - for (DcDetailVO result : results) { - details.put(result.getName(), result.getValue()); - } - return details; - } - - @Override - public void deleteDetails(long dcId) { - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dcId", dcId); - - List results = search(sc, null); - for (DcDetailVO result : results) { - remove(result.getId()); - } - } - - @Override - public void persist(long dcId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = DcSearch.create(); - sc.setParameters("dcId", dcId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - DcDetailVO vo = new DcDetailVO(dcId, detail.getKey(), detail.getValue()); - persist(vo); - } - txn.commit(); - } - - @Override - public Scope getScope() { - return ConfigKey.Scope.Zone; - } - - @Override - public String getConfigValue(long id, ConfigKey key) { - DcDetailVO vo = findDetail(id, key.key()); - return vo == null ? null : vo.getValue(); - } - -} diff --git a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java index 14b2931dcc5..384fa00350f 100644 --- a/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/HostPodDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={HostPodDao.class}) @@ -57,7 +57,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H DataCenterIdSearch.done(); PodIdSearch = createSearchBuilder(Long.class); - PodIdSearch.selectField(PodIdSearch.entity().getId()); + PodIdSearch.selectFields(PodIdSearch.entity().getId()); PodIdSearch.and("dataCenterId", PodIdSearch.entity().getDataCenterId(), Op.EQ); PodIdSearch.and("allocationState", PodIdSearch.entity().getAllocationState(), Op.EQ); PodIdSearch.done(); @@ -85,7 +85,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H HashMap> currentPodCidrSubnets = new HashMap>(); String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + zoneId +" and removed IS NULL"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet rs = stmt.executeQuery(); @@ -111,7 +111,7 @@ public class HostPodDaoImpl extends GenericDaoBase implements H @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); HostPodVO pod = createForUpdate(); pod.setName(null); diff --git a/engine/schema/src/com/cloud/dc/dao/PodVlanDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/PodVlanDaoImpl.java index 413f9ed6c18..9e9a0e77d18 100755 --- a/engine/schema/src/com/cloud/dc/dao/PodVlanDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/PodVlanDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; /** @@ -49,7 +49,7 @@ public class PodVlanDaoImpl extends GenericDaoBase implements P public void add(long podId, int start, int end) { String insertVnet = "INSERT INTO `cloud`.`op_pod_vlan_alloc` (vlan, pod_id) VALUES ( ?, ?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); PreparedStatement stmt = txn.prepareAutoCloseStatement(insertVnet); @@ -68,7 +68,7 @@ public class PodVlanDaoImpl extends GenericDaoBase implements P public void delete(long podId) { String deleteVnet = "DELETE FROM `cloud`.`op_pod_vlan_alloc` WHERE pod_id = ?"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(deleteVnet); stmt.setLong(1, podId); @@ -82,7 +82,7 @@ public class PodVlanDaoImpl extends GenericDaoBase implements P SearchCriteria sc = FreeVlanSearch.create(); sc.setParameters("podId", podId); Date now = new Date(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); PodVlanVO vo = lockOneRandomRow(sc, true); diff --git a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java index 782ee0d9727..d6a38794ba5 100755 --- a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpAddressDaoImpl.java @@ -18,29 +18,26 @@ package com.cloud.dc.dao; import java.util.Date; import java.util.List; -import java.util.Map; import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; -import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.StorageNetworkIpAddressVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.GenericQueryBuilder; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={StorageNetworkIpAddressDao.class}) -@DB(txn=false) +@DB public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase implements StorageNetworkIpAddressDao { protected final GenericSearchBuilder countInUserIp; protected final GenericSearchBuilder listInUseIp; @@ -56,7 +53,7 @@ public class StorageNetworkIpAddressDaoImpl extends GenericDaoBase sc = untakenIp.create(); sc.setParameters("rangeId", rangeId); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); StorageNetworkIpAddressVO ip = lockOneRandomRow(sc, true); if (ip == null) { diff --git a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java index d732e6fcb7a..517099a1c63 100755 --- a/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/StorageNetworkIpRangeDaoImpl.java @@ -17,28 +17,23 @@ package com.cloud.dc.dao; import java.util.List; -import java.util.Map; import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.dc.StorageNetworkIpRangeVO; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; -import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value={StorageNetworkIpRangeDao.class}) -@DB(txn=false) +@DB public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase implements StorageNetworkIpRangeDao { protected final GenericSearchBuilder countRanges; @@ -50,22 +45,22 @@ public class StorageNetworkIpRangeDaoImpl extends GenericDaoBase listByPodId(long podId) { - SearchCriteriaService sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, podId); + QueryBuilder sc = QueryBuilder.create(StorageNetworkIpRangeVO.class); + sc.and(sc.entity().getPodId(), Op.EQ, podId); return sc.list(); } @Override public List listByRangeId(long rangeId) { - SearchCriteriaService sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class); - sc.addAnd(sc.getEntity().getId(), Op.EQ, rangeId); + QueryBuilder sc = QueryBuilder.create(StorageNetworkIpRangeVO.class); + sc.and(sc.entity().getId(), Op.EQ, rangeId); return sc.list(); } @Override public List listByDataCenterId(long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + QueryBuilder sc = QueryBuilder.create(StorageNetworkIpRangeVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, dcId); return sc.list(); } diff --git a/engine/schema/src/com/cloud/dc/dao/VlanDaoImpl.java b/engine/schema/src/com/cloud/dc/dao/VlanDaoImpl.java index 6f5a01f1476..6ec819d17f1 100755 --- a/engine/schema/src/com/cloud/dc/dao/VlanDaoImpl.java +++ b/engine/schema/src/com/cloud/dc/dao/VlanDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import org.springframework.stereotype.Component; @@ -301,7 +301,7 @@ public class VlanDaoImpl extends GenericDaoBase implements VlanDao StringBuilder sql = new StringBuilder(FindZoneWideVlans); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); diff --git a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java index c84aa60897c..3fd1d582302 100644 --- a/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java +++ b/engine/schema/src/com/cloud/domain/dao/DomainDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={DomainDao.class}) @@ -74,7 +74,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom FindAllChildrenSearch.done(); FindIdsOfAllChildrenSearch = createSearchBuilder(Long.class); - FindIdsOfAllChildrenSearch.selectField(FindIdsOfAllChildrenSearch.entity().getId()); + FindIdsOfAllChildrenSearch.selectFields(FindIdsOfAllChildrenSearch.entity().getId()); FindIdsOfAllChildrenSearch.and("path", FindIdsOfAllChildrenSearch.entity().getPath(), SearchCriteria.Op.LIKE); FindIdsOfAllChildrenSearch.done(); @@ -117,7 +117,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom return null; } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); @@ -168,7 +168,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom String sql1 = "SELECT * from domain where parent = " + id + " and removed is null"; boolean success = false; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); DomainVO parentDomain = super.lockRow(domain.getParent(), true); @@ -224,7 +224,7 @@ public class DomainDaoImpl extends GenericDaoBase implements Dom @Override public List findAllChildren(String path, Long parentId){ SearchCriteria sc = FindAllChildrenSearch.create(); - sc.setParameters("path", "%"+path+"%"); + sc.setParameters("path", path+"%"); sc.setParameters("id", parentId); return listBy(sc); } diff --git a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java index e5615db3433..f3a11c601e3 100644 --- a/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java +++ b/engine/schema/src/com/cloud/event/dao/EventDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Op; @Component @@ -104,7 +104,7 @@ public class EventDaoImpl extends GenericDaoBase implements Event @Override public void archiveEvents(List events) { if (events != null && !events.isEmpty()) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (EventVO event : events) { event = lockRow(event.getId(), true); diff --git a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java index cda02efe6bf..43339038c53 100644 --- a/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java +++ b/engine/schema/src/com/cloud/event/dao/UsageEventDaoImpl.java @@ -36,7 +36,7 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -66,7 +66,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem IpeventsSearch.and("networktype", IpeventsSearch.entity().getResourceType(), SearchCriteria.Op.EQ); IpeventsSearch.and().op("assignEvent", IpeventsSearch.entity().getType(), SearchCriteria.Op.EQ); IpeventsSearch.or("releaseEvent", IpeventsSearch.entity().getType(), SearchCriteria.Op.EQ); - IpeventsSearch.closeParen(); + IpeventsSearch.cp(); IpeventsSearch.done(); } @@ -90,7 +90,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem public synchronized List getRecentEvents(Date endDate) { long recentEventId = getMostRecentEventId(); long maxEventId = getMaxEventId(endDate); - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); String sql = COPY_EVENTS; if (recentEventId == 0) { if (s_logger.isDebugEnabled()) { @@ -120,7 +120,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem @DB private long getMostRecentEventId() { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { List latestEvents = getLatestEvent(); @@ -140,7 +140,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem } private List findRecentEvents(Date endDate) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { return listLatestEvents(endDate); } catch (Exception ex) { @@ -152,7 +152,7 @@ public class UsageEventDaoImpl extends GenericDaoBase implem } private long getMaxEventId(Date endDate) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { String sql = MAX_EVENT; diff --git a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java index b373737ee2b..c51e2c7ff60 100755 --- a/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDaoImpl.java @@ -59,13 +59,13 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @Component -@Local(value = { HostDao.class }) -@DB(txn = false) +@Local(value = {HostDao.class}) +@DB @TableGenerator(name = "host_req_sq", table = "op_host", pkColumnName = "id", valueColumnName = "sequence", allocationSize = 1) public class HostDaoImpl extends GenericDaoBase implements HostDao { //FIXME: , ExternalIdDao { private static final Logger s_logger = Logger.getLogger(HostDaoImpl.class); @@ -123,13 +123,17 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao protected Attribute _msIdAttr; protected Attribute _pingTimeAttr; - @Inject protected HostDetailsDao _detailsDao; - @Inject protected HostTagsDao _hostTagsDao; - @Inject protected HostTransferMapDao _hostTransferDao; - @Inject protected ClusterDao _clusterDao; + @Inject + protected HostDetailsDao _detailsDao; + @Inject + protected HostTagsDao _hostTagsDao; + @Inject + protected HostTransferMapDao _hostTransferDao; + @Inject + protected ClusterDao _clusterDao; public HostDaoImpl() { - super(); + super(); } @PostConstruct @@ -271,18 +275,19 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao * SearchCriteria.Op.LTEQ); UnmanagedDirectConnectSearch.cp(); UnmanagedDirectConnectSearch.cp(); */ try { - HostTransferSearch = _hostTransferDao.createSearchBuilder(); + HostTransferSearch = _hostTransferDao.createSearchBuilder(); } catch (Throwable e) { - s_logger.debug("error", e); + s_logger.debug("error", e); } HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL); - UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), JoinType.LEFTOUTER); + UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), + JoinType.LEFTOUTER); ClusterManagedSearch = _clusterDao.createSearchBuilder(); ClusterManagedSearch.and("managed", ClusterManagedSearch.entity().getManagedState(), SearchCriteria.Op.EQ); - UnmanagedDirectConnectSearch.join("ClusterManagedSearch", ClusterManagedSearch, ClusterManagedSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getClusterId(), JoinType.INNER); + UnmanagedDirectConnectSearch.join("ClusterManagedSearch", ClusterManagedSearch, ClusterManagedSearch.entity().getId(), + UnmanagedDirectConnectSearch.entity().getClusterId(), JoinType.INNER); UnmanagedDirectConnectSearch.done(); - DirectConnectSearch = createSearchBuilder(); DirectConnectSearch.and("resource", DirectConnectSearch.entity().getResource(), SearchCriteria.Op.NNULL); DirectConnectSearch.and("id", DirectConnectSearch.entity().getId(), SearchCriteria.Op.EQ); @@ -306,7 +311,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao AvailHypevisorInZone.done(); HostsInStatusSearch = createSearchBuilder(Long.class); - HostsInStatusSearch.selectField(HostsInStatusSearch.entity().getId()); + HostsInStatusSearch.selectFields(HostsInStatusSearch.entity().getId()); HostsInStatusSearch.and("dc", HostsInStatusSearch.entity().getDataCenterId(), Op.EQ); HostsInStatusSearch.and("pod", HostsInStatusSearch.entity().getPodId(), Op.EQ); HostsInStatusSearch.and("cluster", HostsInStatusSearch.entity().getClusterId(), Op.EQ); @@ -370,7 +375,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao HostsInClusterSearch.done(); HostIdSearch = createSearchBuilder(Long.class); - HostIdSearch.selectField(HostIdSearch.entity().getId()); + HostIdSearch.selectFields(HostIdSearch.entity().getId()); HostIdSearch.and("dataCenterId", HostIdSearch.entity().getDataCenterId(), Op.EQ); HostIdSearch.done(); @@ -386,7 +391,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao public long countBy(long clusterId, ResourceState... states) { SearchCriteria sc = MaintenanceCountSearch.create(); - sc.setParameters("resourceState", (Object[]) states); + sc.setParameters("resourceState", (Object[])states); sc.setParameters("cluster", clusterId); List hosts = listBy(sc); @@ -482,9 +487,10 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return ownCluster; } - @Override @DB + @Override + @DB public List findAndUpdateDirectAgentToLoad(long lastPingSecondsAfter, Long limit, long managementServerId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); if (s_logger.isDebugEnabled()) { @@ -580,24 +586,26 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return assignedHosts; } - @Override @DB + @Override + @DB public List findAndUpdateApplianceToLoad(long lastPingSecondsAfter, long managementServerId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); - txn.start(); - SearchCriteria sc = UnmanagedApplianceSearch.create(); - sc.setParameters("lastPinged", lastPingSecondsAfter); - sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.BaremetalDhcp, Type.BaremetalPxe, Type.TrafficMonitor, Type.L2Networking); - List hosts = lockRows(sc, null, true); + txn.start(); + SearchCriteria sc = UnmanagedApplianceSearch.create(); + sc.setParameters("lastPinged", lastPingSecondsAfter); + sc.setParameters("types", Type.ExternalDhcp, Type.ExternalFirewall, Type.ExternalLoadBalancer, Type.BaremetalDhcp, Type.BaremetalPxe, Type.TrafficMonitor, + Type.L2Networking); + List hosts = lockRows(sc, null, true); - for (HostVO host : hosts) { - host.setManagementServerId(managementServerId); - update(host.getId(), host); - } + for (HostVO host : hosts) { + host.setManagementServerId(managementServerId); + update(host.getId(), host); + } - txn.commit(); + txn.commit(); - return hosts; + return hosts; } @Override @@ -657,7 +665,6 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return listBy(sc); } - @Override public List listAllUpAndEnabledNonHAHosts(Type type, Long clusterId, Long podId, long dcId, String haTag) { SearchBuilder hostTagSearch = null; @@ -721,12 +728,12 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @DB @Override public List findLostHosts(long timeout) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; try { - String sql = "select h.id from host h left join cluster c on h.cluster_id=c.id where h.mgmt_server_id is not null and h.last_ping < ? and h.status in ('Up', 'Updating', 'Disconnected', 'Connecting') and h.type not in ('ExternalFirewall', 'ExternalLoadBalancer', 'TrafficMonitor', 'SecondaryStorage', 'LocalSecondaryStorage', 'L2Networking') and (h.cluster_id is null or c.managed_state = 'Managed') ;" ; + String sql = "select h.id from host h left join cluster c on h.cluster_id=c.id where h.mgmt_server_id is not null and h.last_ping < ? and h.status in ('Up', 'Updating', 'Disconnected', 'Connecting') and h.type not in ('ExternalFirewall', 'ExternalLoadBalancer', 'TrafficMonitor', 'SecondaryStorage', 'LocalSecondaryStorage', 'L2Networking') and (h.cluster_id is null or c.managed_state = 'Managed') ;"; pstmt = txn.prepareStatement(sql); pstmt.setLong(1, timeout); rs = pstmt.executeQuery(); @@ -772,7 +779,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao public HostVO persist(HostVO host) { final String InsertSequenceSql = "INSERT INTO op_host(id) VALUES(?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); HostVO dbHost = super.persist(host); @@ -798,7 +805,7 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @Override @DB public boolean update(Long hostId, HostVO host) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); boolean persisted = super.update(hostId, host); @@ -818,13 +825,13 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao @DB public List getRunningHostCounts(Date cutTime) { String sql = "select * from (" + "select h.data_center_id, h.type, count(*) as count from host as h INNER JOIN mshost as m ON h.mgmt_server_id=m.msid " - + "where h.status='Up' and h.type='SecondaryStorage' and m.last_update > ? " + "group by h.data_center_id, h.type " + "UNION ALL " - + "select h.data_center_id, h.type, count(*) as count from host as h INNER JOIN mshost as m ON h.mgmt_server_id=m.msid " - + "where h.status='Up' and h.type='Routing' and m.last_update > ? " + "group by h.data_center_id, h.type) as t " + "ORDER by t.data_center_id, t.type"; + + "where h.status='Up' and h.type='SecondaryStorage' and m.last_update > ? " + "group by h.data_center_id, h.type " + "UNION ALL " + + "select h.data_center_id, h.type, count(*) as count from host as h INNER JOIN mshost as m ON h.mgmt_server_id=m.msid " + + "where h.status='Up' and h.type='Routing' and m.last_update > ? " + "group by h.data_center_id, h.type) as t " + "ORDER by t.data_center_id, t.type"; ArrayList l = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ; PreparedStatement pstmt = null; try { @@ -870,90 +877,100 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao return customSearch(sc, null).get(0); } + @Override + public boolean updateState(Status oldStatus, Event event, Status newStatus, Host vo, Object data) { + HostVO host = findById(vo.getId()); + if (host == null) { + if (event == Event.Remove && newStatus == Status.Removed) { + host = findByIdIncludingRemoved(vo.getId()); + } + } - @Override - public boolean updateState(Status oldStatus, Event event, Status newStatus, Host vo, Object data) { - HostVO host = findById(vo.getId()); - if(host == null){ - if(event == Event.Remove && newStatus == Status.Removed){ - host = findByIdIncludingRemoved(vo.getId()); - } - } - - if(host == null){ + if (host == null) { return false; - } - long oldPingTime = host.getLastPinged(); + } + long oldPingTime = host.getLastPinged(); - SearchBuilder sb = createSearchBuilder(); - sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); - sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); - sb.and("update", sb.entity().getUpdated(), SearchCriteria.Op.EQ); - if (newStatus.checkManagementServer()) { - sb.and("ping", sb.entity().getLastPinged(), SearchCriteria.Op.EQ); - sb.and().op("nullmsid", sb.entity().getManagementServerId(), SearchCriteria.Op.NULL); - sb.or("msid", sb.entity().getManagementServerId(), SearchCriteria.Op.EQ); - sb.closeParen(); - } - sb.done(); + SearchBuilder sb = createSearchBuilder(); + sb.and("status", sb.entity().getStatus(), SearchCriteria.Op.EQ); + sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); + sb.and("update", sb.entity().getUpdated(), SearchCriteria.Op.EQ); + if (newStatus.checkManagementServer()) { + sb.and("ping", sb.entity().getLastPinged(), SearchCriteria.Op.EQ); + sb.and().op("nullmsid", sb.entity().getManagementServerId(), SearchCriteria.Op.NULL); + sb.or("msid", sb.entity().getManagementServerId(), SearchCriteria.Op.EQ); + sb.cp(); + } + sb.done(); - SearchCriteria sc = sb.create(); + SearchCriteria sc = sb.create(); - sc.setParameters("status", oldStatus); - sc.setParameters("id", host.getId()); - sc.setParameters("update", host.getUpdated()); - long oldUpdateCount = host.getUpdated(); - if (newStatus.checkManagementServer()) { - sc.setParameters("ping", oldPingTime); - sc.setParameters("msid", host.getManagementServerId()); - } + sc.setParameters("status", oldStatus); + sc.setParameters("id", host.getId()); + sc.setParameters("update", host.getUpdated()); + long oldUpdateCount = host.getUpdated(); + if (newStatus.checkManagementServer()) { + sc.setParameters("ping", oldPingTime); + sc.setParameters("msid", host.getManagementServerId()); + } - long newUpdateCount = host.incrUpdated(); - UpdateBuilder ub = getUpdateBuilder(host); - ub.set(host, _statusAttr, newStatus); - if (newStatus.updateManagementServer()) { - if (newStatus.lostConnection()) { - ub.set(host, _msIdAttr, null); - } else { - ub.set(host, _msIdAttr, host.getManagementServerId()); - } - if (event.equals(Event.Ping) || event.equals(Event.AgentConnected)) { - ub.set(host, _pingTimeAttr, System.currentTimeMillis() >> 10); - } - } - if (event.equals(Event.ManagementServerDown)) { - ub.set(host, _pingTimeAttr, ((System.currentTimeMillis() >> 10) - (10 * 60))); - } - int result = update(ub, sc, null); - assert result <= 1 : "How can this update " + result + " rows? "; + long newUpdateCount = host.incrUpdated(); + UpdateBuilder ub = getUpdateBuilder(host); + ub.set(host, _statusAttr, newStatus); + if (newStatus.updateManagementServer()) { + if (newStatus.lostConnection()) { + ub.set(host, _msIdAttr, null); + } else { + ub.set(host, _msIdAttr, host.getManagementServerId()); + } + if (event.equals(Event.Ping) || event.equals(Event.AgentConnected)) { + ub.set(host, _pingTimeAttr, System.currentTimeMillis() >> 10); + } + } + if (event.equals(Event.ManagementServerDown)) { + ub.set(host, _pingTimeAttr, ((System.currentTimeMillis() >> 10) - (10 * 60))); + } + int result = update(ub, sc, null); + assert result <= 1 : "How can this update " + result + " rows? "; - if (status_logger.isDebugEnabled() && result == 0) { - HostVO ho = findById(host.getId()); - assert ho != null : "How how how? : " + host.getId(); + if (status_logger.isDebugEnabled() && result == 0) { + HostVO ho = findById(host.getId()); + assert ho != null : "How how how? : " + host.getId(); - StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); - str.append(". Name=").append(host.getName()); - str.append("; New=[status=").append(newStatus.toString()).append(":msid=") - .append(newStatus.lostConnection() ? "null" : host.getManagementServerId()).append(":lastpinged=").append(host.getLastPinged()).append("]"); - str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=") - .append(oldPingTime).append("]"); - str.append("; DB=[status=").append(vo.getStatus().toString()).append(":msid=").append(vo.getManagementServerId()).append(":lastpinged=") - .append(vo.getLastPinged()).append(":old update count=").append(oldUpdateCount).append("]"); - status_logger.debug(str.toString()); - } else { - StringBuilder msg = new StringBuilder("Agent status update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old status = " + oldStatus); - msg.append("; event = " + event); - msg.append("; new status = " + newStatus); - msg.append("; old update count = " + oldUpdateCount); - msg.append("; new update count = " + newUpdateCount + "]"); - status_logger.debug(msg.toString()); - } + StringBuilder str = new StringBuilder("Unable to update host for event:").append(event.toString()); + str.append(". Name=").append(host.getName()); + str.append("; New=[status=") + .append(newStatus.toString()) + .append(":msid=") + .append(newStatus.lostConnection() ? "null" : host.getManagementServerId()) + .append(":lastpinged=") + .append(host.getLastPinged()) + .append("]"); + str.append("; Old=[status=").append(oldStatus.toString()).append(":msid=").append(host.getManagementServerId()).append(":lastpinged=").append(oldPingTime).append("]"); + str.append("; DB=[status=") + .append(vo.getStatus().toString()) + .append(":msid=") + .append(vo.getManagementServerId()) + .append(":lastpinged=") + .append(vo.getLastPinged()) + .append(":old update count=") + .append(oldUpdateCount) + .append("]"); + status_logger.debug(str.toString()); + } else { + StringBuilder msg = new StringBuilder("Agent status update: ["); + msg.append("id = " + host.getId()); + msg.append("; name = " + host.getName()); + msg.append("; old status = " + oldStatus); + msg.append("; event = " + event); + msg.append("; new status = " + newStatus); + msg.append("; old update count = " + oldUpdateCount); + msg.append("; new update count = " + newUpdateCount + "]"); + status_logger.debug(msg.toString()); + } - return result > 0; - } + return result > 0; + } @Override public boolean updateResourceState(ResourceState oldState, ResourceState.Event event, ResourceState newState, Host vo) { @@ -978,20 +995,20 @@ public class HostDaoImpl extends GenericDaoBase implements HostDao assert ho != null : "How how how? : " + host.getId(); StringBuilder str = new StringBuilder("Unable to update resource state: ["); - str.append("m = " + host.getId()); - str.append("; name = " + host.getName()); - str.append("; old state = " + oldState); - str.append("; event = " + event); - str.append("; new state = " + newState + "]"); - state_logger.debug(str.toString()); + str.append("m = " + host.getId()); + str.append("; name = " + host.getName()); + str.append("; old state = " + oldState); + str.append("; event = " + event); + str.append("; new state = " + newState + "]"); + state_logger.debug(str.toString()); } else { - StringBuilder msg = new StringBuilder("Resource state update: ["); - msg.append("id = " + host.getId()); - msg.append("; name = " + host.getName()); - msg.append("; old state = " + oldState); - msg.append("; event = " + event); - msg.append("; new state = " + newState + "]"); - state_logger.debug(msg.toString()); + StringBuilder msg = new StringBuilder("Resource state update: ["); + msg.append("id = " + host.getId()); + msg.append("; name = " + host.getName()); + msg.append("; old state = " + oldState); + msg.append("; event = " + event); + msg.append("; new state = " + newState + "]"); + state_logger.debug(msg.toString()); } return result > 0; diff --git a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java index 47cdeb30633..7b7267cccf5 100644 --- a/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostDetailsDaoImpl.java @@ -31,7 +31,7 @@ import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -96,7 +96,7 @@ public class HostDetailsDaoImpl extends GenericDaoBase implement public void persist(long hostId, Map details) { final String InsertOrUpdateSql = "INSERT INTO `cloud`.`host_details` (host_id, name, value) VALUES (?,?,?) ON DUPLICATE KEY UPDATE value=?"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (Map.Entry detail : details.entrySet()) { diff --git a/engine/schema/src/com/cloud/host/dao/HostTagsDaoImpl.java b/engine/schema/src/com/cloud/host/dao/HostTagsDaoImpl.java index 0e93275d360..b12fd7e1ac1 100644 --- a/engine/schema/src/com/cloud/host/dao/HostTagsDaoImpl.java +++ b/engine/schema/src/com/cloud/host/dao/HostTagsDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.host.HostTagVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value=HostTagsDao.class) @@ -56,7 +56,7 @@ public class HostTagsDaoImpl extends GenericDaoBase implements @Override public void persist(long hostId, List hostTags) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = HostSearch.create(); diff --git a/engine/schema/src/com/cloud/keystore/KeystoreDaoImpl.java b/engine/schema/src/com/cloud/keystore/KeystoreDaoImpl.java index f53f228e6b3..370f874ecd9 100644 --- a/engine/schema/src/com/cloud/keystore/KeystoreDaoImpl.java +++ b/engine/schema/src/com/cloud/keystore/KeystoreDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -74,7 +74,7 @@ public class KeystoreDaoImpl extends GenericDaoBase implements @Override @DB public void save(String name, String certificate, String key, String domainSuffix) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); diff --git a/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java b/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java index e7a7b34d9bd..85f37c9c8f0 100644 --- a/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/AccountGuestVlanMapDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; @Component @Local(value={AccountGuestVlanMapDao.class}) -@DB(txn=false) +@DB public class AccountGuestVlanMapDaoImpl extends GenericDaoBase implements AccountGuestVlanMapDao { protected SearchBuilder AccountSearch; diff --git a/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java b/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java index 01f8861f9d1..f0e40c1512b 100644 --- a/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/ExternalFirewallDeviceDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=ExternalFirewallDeviceDao.class) @DB(txn=false) +@Local(value=ExternalFirewallDeviceDao.class) @DB public class ExternalFirewallDeviceDaoImpl extends GenericDaoBase implements ExternalFirewallDeviceDao { final SearchBuilder physicalNetworkServiceProviderSearch; final SearchBuilder physicalNetworkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java b/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java index ea6437dc2c3..e8ef0d22420 100644 --- a/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/ExternalLoadBalancerDeviceDaoImpl.java @@ -29,7 +29,7 @@ import javax.ejb.Local; import java.util.List; @Component -@Local(value=ExternalLoadBalancerDeviceDao.class) @DB(txn=false) +@Local(value=ExternalLoadBalancerDeviceDao.class) @DB public class ExternalLoadBalancerDeviceDaoImpl extends GenericDaoBase implements ExternalLoadBalancerDeviceDao { final SearchBuilder physicalNetworkIdSearch; final SearchBuilder physicalNetworkServiceProviderSearch; diff --git a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java index b007e19e779..26f399d1246 100644 --- a/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/FirewallRulesCidrsDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value=FirewallRulesCidrsDao.class) @@ -58,7 +58,7 @@ public class FirewallRulesCidrsDaoImpl extends GenericDaoBase sourceCidrs) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (String tag : sourceCidrs) { diff --git a/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java b/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java index 41f911ca1d1..daac54a1709 100644 --- a/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/FirewallRulesDaoImpl.java @@ -29,9 +29,8 @@ import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRule.State; import com.cloud.network.rules.FirewallRule.TrafficType; import com.cloud.network.rules.FirewallRuleVO; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -40,11 +39,11 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = FirewallRulesDao.class) -@DB(txn = false) +@DB public class FirewallRulesDaoImpl extends GenericDaoBase implements FirewallRulesDao { protected final SearchBuilder AllFieldsSearch; @@ -221,7 +220,7 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i @Override @DB public FirewallRuleVO persist(FirewallRuleVO firewallRule) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); FirewallRuleVO dbfirewallRule = super.persist(firewallRule); @@ -310,18 +309,18 @@ public class FirewallRulesDaoImpl extends GenericDaoBase i @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); FirewallRuleVO entry = findById(id); if (entry != null) { if (entry.getPurpose() == Purpose.LoadBalancing) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.LoadBalancer); + _tagsDao.removeByIdAndType(id, ResourceObjectType.LoadBalancer); } else if (entry.getPurpose() == Purpose.PortForwarding) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.PortForwardingRule); + _tagsDao.removeByIdAndType(id, ResourceObjectType.PortForwardingRule); } else if (entry.getPurpose() == Purpose.Firewall) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.FirewallRule); + _tagsDao.removeByIdAndType(id, ResourceObjectType.FirewallRule); } else if (entry.getPurpose() == Purpose.NetworkACL) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.NetworkACL); + _tagsDao.removeByIdAndType(id, ResourceObjectType.NetworkACL); } } boolean result = super.remove(id); diff --git a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java index 9f5f403631b..08bff891968 100755 --- a/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/IPAddressDaoImpl.java @@ -21,7 +21,7 @@ import com.cloud.dc.Vlan.VlanType; import com.cloud.dc.VlanVO; import com.cloud.dc.dao.VlanDao; import com.cloud.network.IpAddress.State; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -31,7 +31,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.Ip; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -285,7 +285,7 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen @Override @DB public int countIPs(long dcId, Long accountId, String vlanId, String vlanGateway, String vlanNetmask) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); int ipCount = 0; try { String sql = "SELECT count(*) FROM user_ip_address u INNER JOIN vlan v on (u.vlan_db_id = v.id AND v.data_center_id = ? AND v.vlan_id = ? AND v.vlan_gateway = ? AND v.vlan_netmask = ? AND u.account_id = ?)"; @@ -390,11 +390,11 @@ public class IPAddressDaoImpl extends GenericDaoBase implemen @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); IPAddressVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.SecurityGroup); + _tagsDao.removeByIdAndType(id, ResourceObjectType.SecurityGroup); } boolean result = super.remove(id); txn.commit(); diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java index 1b2c2bf862f..90ba4a32828 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDaoImpl.java @@ -40,10 +40,8 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; -import com.cloud.offerings.dao.NetworkOfferingDaoImpl; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.utils.db.*; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchCriteria.Func; @@ -52,7 +50,7 @@ import com.cloud.utils.net.NetUtils; @Component @Local(value = NetworkDao.class) -@DB(txn = false) +@DB() public class NetworkDaoImpl extends GenericDaoBase implements NetworkDao { SearchBuilder AllFieldsSearch; SearchBuilder AccountSearch; @@ -226,7 +224,7 @@ public class NetworkDaoImpl extends GenericDaoBase implements N OfferingAccountNetworkSearch.done(); GarbageCollectedSearch = createSearchBuilder(Long.class); - GarbageCollectedSearch.selectField(GarbageCollectedSearch.entity().getId()); + GarbageCollectedSearch.selectFields(GarbageCollectedSearch.entity().getId()); SearchBuilder join7 = _ntwkOpDao.createSearchBuilder(); join7.and("activenics", join7.entity().getActiveNicsCount(), Op.EQ); join7.and("gc", join7.entity().isGarbageCollected(), Op.EQ); @@ -291,7 +289,7 @@ public class NetworkDaoImpl extends GenericDaoBase implements N @Override @DB public NetworkVO persist(NetworkVO network, boolean gc, Map serviceProviderMap) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); // 1) create network @@ -311,7 +309,7 @@ public class NetworkDaoImpl extends GenericDaoBase implements N @Override @DB public boolean update(Long networkId, NetworkVO network, Map serviceProviderMap) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); super.update(networkId, network); @@ -327,7 +325,7 @@ public class NetworkDaoImpl extends GenericDaoBase implements N @Override @DB public void persistNetworkServiceProviders(long networkId, Map serviceProviderMap) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (String service : serviceProviderMap.keySet()) { NetworkServiceMapVO serviceMap = new NetworkServiceMapVO(networkId, Service.getService(service), Provider.getProvider(serviceProviderMap.get(service))); @@ -578,11 +576,11 @@ public class NetworkDaoImpl extends GenericDaoBase implements N @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); NetworkVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Network); + _tagsDao.removeByIdAndType(id, ResourceObjectType.Network); } boolean result = super.remove(id); txn.commit(); @@ -601,7 +599,7 @@ public class NetworkDaoImpl extends GenericDaoBase implements N @Override public boolean updateState(State currentState, Event event, State nextState, Network vo, Object data) { // TODO: ensure this update is correct - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); NetworkVO networkVo = (NetworkVO) vo; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDetailVO.java b/engine/schema/src/com/cloud/network/dao/NetworkDetailVO.java new file mode 100644 index 00000000000..b7c4db8a14e --- /dev/null +++ b/engine/schema/src/com/cloud/network/dao/NetworkDetailVO.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name="network_details") +public class NetworkDetailVO implements ResourceDetail { + @Id + @GeneratedValue(strategy= GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="network_id") + private long resourceId; + + @Column(name="name") + private String name; + + @Column(name="value", length=1024) + private String value; + + @Column(name="display") + private boolean display; + + public NetworkDetailVO() {} + + public NetworkDetailVO(long networkId, String name, String value) { + this.resourceId = networkId; + this.name = name; + this.value = value; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } +} diff --git a/framework/db/src/com/cloud/utils/db/SearchCriteriaService.java b/engine/schema/src/com/cloud/network/dao/NetworkDetailsDao.java old mode 100755 new mode 100644 similarity index 71% rename from framework/db/src/com/cloud/utils/db/SearchCriteriaService.java rename to engine/schema/src/com/cloud/network/dao/NetworkDetailsDao.java index 2947255f452..efe84f6bff2 --- a/framework/db/src/com/cloud/utils/db/SearchCriteriaService.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDetailsDao.java @@ -14,16 +14,12 @@ // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -package com.cloud.utils.db; +package com.cloud.network.dao; -import java.util.List; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.GenericDao; -public interface SearchCriteriaService { - public void selectField(Object... useless); - public void addAnd(Object useless, Op op, Object...values); - public List list(); - public T getEntity(); - public K find(); -} +public interface NetworkDetailsDao extends GenericDao, ResourceDetailsDao { + +} \ No newline at end of file diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDetailsDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDetailsDaoImpl.java new file mode 100644 index 00000000000..b42481cc606 --- /dev/null +++ b/engine/schema/src/com/cloud/network/dao/NetworkDetailsDaoImpl.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.dao; + +import javax.ejb.Local; + +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + + +@Component +@Local(value=NetworkDetailsDao.class) +public class NetworkDetailsDaoImpl extends ResourceDetailsDaoBase implements NetworkDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new NetworkDetailVO(resourceId, key, value)); + } + +} diff --git a/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java index bbb920337c8..882c7fecdb4 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkDomainDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkDomainDao.class) @DB(txn=false) +@Local(value=NetworkDomainDao.class) @DB() public class NetworkDomainDaoImpl extends GenericDaoBase implements NetworkDomainDao { final SearchBuilder AllFieldsSearch; final SearchBuilder DomainsSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java index b1767609429..9a2bd76b314 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkExternalFirewallDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkExternalFirewallDao.class) @DB(txn=false) +@Local(value=NetworkExternalFirewallDao.class) @DB() public class NetworkExternalFirewallDaoImpl extends GenericDaoBase implements NetworkExternalFirewallDao { final SearchBuilder networkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java index c29c164fd28..8d77a5b73c8 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkExternalLoadBalancerDaoImpl.java @@ -29,7 +29,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetworkExternalLoadBalancerDao.class) @DB(txn=false) +@Local(value=NetworkExternalLoadBalancerDao.class) @DB() public class NetworkExternalLoadBalancerDaoImpl extends GenericDaoBase implements NetworkExternalLoadBalancerDao { final SearchBuilder networkIdSearch; diff --git a/engine/schema/src/com/cloud/network/dao/NetworkOpDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkOpDaoImpl.java index a3f54b78850..18e2c806910 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkOpDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkOpDaoImpl.java @@ -39,7 +39,7 @@ public class NetworkOpDaoImpl extends GenericDaoBase implemen super(); ActiveNicsSearch = createSearchBuilder(Integer.class); - ActiveNicsSearch.selectField(ActiveNicsSearch.entity().getActiveNicsCount()); + ActiveNicsSearch.selectFields(ActiveNicsSearch.entity().getActiveNicsCount()); ActiveNicsSearch.and("network", ActiveNicsSearch.entity().getId(), Op.EQ); ActiveNicsSearch.done(); diff --git a/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java index 3cdd73885c8..d1402a9e942 100644 --- a/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/NetworkServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value=NetworkServiceMapDao.class) @DB(txn=false) +@Local(value=NetworkServiceMapDao.class) @DB() public class NetworkServiceMapDaoImpl extends GenericDaoBase implements NetworkServiceMapDao { final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; @@ -57,7 +57,7 @@ public class NetworkServiceMapDaoImpl extends GenericDaoBase implements PhysicalNetworkDao { final SearchBuilder ZoneSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkIsolationMethodDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkIsolationMethodDaoImpl.java index 04508e72545..0cebb48cb6b 100644 --- a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkIsolationMethodDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkIsolationMethodDaoImpl.java @@ -35,7 +35,7 @@ public class PhysicalNetworkIsolationMethodDaoImpl extends GenericDaoBase implements PhysicalNetworkServiceProviderDao { final SearchBuilder physicalNetworkSearch; final SearchBuilder physicalNetworkServiceProviderSearch; diff --git a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTagDaoImpl.java b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTagDaoImpl.java index c3e9f73d86b..b38f67a2c8c 100644 --- a/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTagDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/PhysicalNetworkTagDaoImpl.java @@ -35,7 +35,7 @@ public class PhysicalNetworkTagDaoImpl extends GenericDaoBase implements PhysicalNetworkTrafficTypeDao { final SearchBuilder physicalNetworkSearch; final GenericSearchBuilder kvmAllFieldsSearch; @@ -52,31 +52,31 @@ public class PhysicalNetworkTrafficTypeDaoImpl extends GenericDaoBase implements PortProfileDao { protected static final Logger s_logger = Logger.getLogger(PortProfileDaoImpl.class); @@ -66,7 +66,7 @@ public class PortProfileDaoImpl extends GenericDaoBase impl String condition = "(trunk_low_vlan_id BETWEEN " + lowVlanId + " AND " + highVlanId + ")" + " OR (trunk_high_vlan_id BETWEEN " + lowVlanId + " AND " + highVlanId + ")"; String selectSql = "SELECT * FROM `" + dbName + "`.`" + tableName + "` WHERE " + condition; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet rs = stmt.executeQuery(); diff --git a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDao.java b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDao.java index 6e3b48355e7..fd8fde76677 100644 --- a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDao.java +++ b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDao.java @@ -24,7 +24,8 @@ import com.cloud.utils.db.GenericDao; public interface RemoteAccessVpnDao extends GenericDao { RemoteAccessVpnVO findByPublicIpAddress(long ipAddressId); RemoteAccessVpnVO findByPublicIpAddressAndState(long ipAddressId, RemoteAccessVpn.State state); - RemoteAccessVpnVO findByAccountAndNetwork(Long accountId, Long zoneId); + RemoteAccessVpnVO findByAccountAndNetwork(Long accountId, Long networkId); + RemoteAccessVpnVO findByAccountAndVpc(Long accountId, Long vpcId); List findByAccount(Long accountId); List listByNetworkId(Long networkId); } diff --git a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java index ed732d8e43d..2d85c155386 100644 --- a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnDaoImpl.java @@ -40,6 +40,7 @@ public class RemoteAccessVpnDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); + sc.setParameters("accountId", accountId); + sc.setParameters("vpcId", vpcId); + return findOneBy(sc); + } + @Override public List findByAccount(Long accountId) { SearchCriteria sc = AllFieldsSearch.create(); diff --git a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnVO.java b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnVO.java index 2e8ee912464..af82281bdc2 100644 --- a/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnVO.java +++ b/engine/schema/src/com/cloud/network/dao/RemoteAccessVpnVO.java @@ -34,7 +34,7 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { private long accountId; @Column(name="network_id") - private long networkId; + private Long networkId; @Column(name="domain_id") private long domainId; @@ -62,11 +62,14 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { @Column(name="uuid") private String uuid; + @Column(name="vpc_id") + private Long vpcId; + public RemoteAccessVpnVO() { this.uuid = UUID.randomUUID().toString(); } - public RemoteAccessVpnVO(long accountId, long domainId, long networkId, long publicIpId, String localIp, String ipRange, String presharedKey) { + public RemoteAccessVpnVO(long accountId, long domainId, Long networkId, long publicIpId, Long vpcId, String localIp, String ipRange, String presharedKey) { this.accountId = accountId; this.serverAddressId = publicIpId; this.ipRange = ipRange; @@ -76,6 +79,7 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { this.networkId = networkId; this.state = State.Added; this.uuid = UUID.randomUUID().toString(); + this.vpcId = vpcId; } @Override @@ -126,7 +130,7 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { } @Override - public long getNetworkId() { + public Long getNetworkId() { return networkId; } @@ -139,4 +143,9 @@ public class RemoteAccessVpnVO implements RemoteAccessVpn { public String getUuid() { return uuid; } + + @Override + public Long getVpcId() { + return vpcId; + } } diff --git a/engine/schema/src/com/cloud/network/dao/RouterNetworkDaoImpl.java b/engine/schema/src/com/cloud/network/dao/RouterNetworkDaoImpl.java index b0b633c55e9..6b0c6ef3f21 100644 --- a/engine/schema/src/com/cloud/network/dao/RouterNetworkDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/RouterNetworkDaoImpl.java @@ -36,7 +36,7 @@ public class RouterNetworkDaoImpl extends GenericDaoBase super(); RouterNetworksSearch = createSearchBuilder(Long.class); - RouterNetworksSearch.selectField(RouterNetworksSearch.entity().getNetworkId()); + RouterNetworksSearch.selectFields(RouterNetworksSearch.entity().getNetworkId()); RouterNetworksSearch.and("routerId", RouterNetworksSearch.entity().getRouterId(), Op.EQ); RouterNetworksSearch.done(); diff --git a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java index c9f38ec88c4..b3f2416f63f 100644 --- a/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/UserIpv6AddressDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.GenericQueryBuilder; @Component @Local(value=UserIpv6AddressDao.class) diff --git a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDao.java b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDao.java index d929293c56d..70d71308904 100644 --- a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDao.java +++ b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDao.java @@ -20,12 +20,12 @@ import java.util.List; import com.cloud.network.VirtualRouterProvider; import com.cloud.network.element.VirtualRouterProviderVO; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.utils.db.GenericDao; public interface VirtualRouterProviderDao extends GenericDao { - public VirtualRouterProviderVO findByNspIdAndType(long nspId, VirtualRouterProviderType type); - public List listByEnabledAndType(boolean enabled, VirtualRouterProviderType type); - public VirtualRouterProviderVO findByIdAndEnabledAndType(long id, boolean enabled, VirtualRouterProviderType type); - public List listByType(VirtualRouterProviderType type); + public VirtualRouterProviderVO findByNspIdAndType(long nspId, Type type); + public List listByEnabledAndType(boolean enabled, Type type); + public VirtualRouterProviderVO findByIdAndEnabledAndType(long id, boolean enabled, Type type); + public List listByType(Type type); } diff --git a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java index dba835f9e1c..8dce4e42ea7 100644 --- a/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java +++ b/engine/schema/src/com/cloud/network/dao/VirtualRouterProviderDaoImpl.java @@ -23,14 +23,14 @@ import javax.ejb.Local; import org.springframework.stereotype.Component; import com.cloud.network.element.VirtualRouterProviderVO; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value=VirtualRouterProviderDao.class) @DB(txn=false) +@Local(value=VirtualRouterProviderDao.class) @DB() public class VirtualRouterProviderDaoImpl extends GenericDaoBase implements VirtualRouterProviderDao { final SearchBuilder AllFieldsSearch; @@ -46,7 +46,7 @@ public class VirtualRouterProviderDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("nsp_id", nspId); sc.setParameters("type", type); @@ -54,7 +54,7 @@ public class VirtualRouterProviderDaoImpl extends GenericDaoBase listByEnabledAndType(boolean enabled, VirtualRouterProviderType type) { + public List listByEnabledAndType(boolean enabled, Type type) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("enabled", enabled); sc.setParameters("type", type); @@ -62,7 +62,7 @@ public class VirtualRouterProviderDaoImpl extends GenericDaoBase sc = AllFieldsSearch.create(); sc.setParameters("id", id); sc.setParameters("enabled", enabled); @@ -71,7 +71,7 @@ public class VirtualRouterProviderDaoImpl extends GenericDaoBase listByType(VirtualRouterProviderType type) { + public List listByType(Type type) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("type", type); return listBy(sc); diff --git a/engine/schema/src/com/cloud/network/element/VirtualRouterProviderVO.java b/engine/schema/src/com/cloud/network/element/VirtualRouterProviderVO.java index ff2b6210504..62a25c1bc05 100644 --- a/engine/schema/src/com/cloud/network/element/VirtualRouterProviderVO.java +++ b/engine/schema/src/com/cloud/network/element/VirtualRouterProviderVO.java @@ -42,7 +42,7 @@ public class VirtualRouterProviderVO implements VirtualRouterProvider { @Column(name="type") @Enumerated(EnumType.STRING) - private VirtualRouterProviderType type; + private Type type; @Column(name="enabled") private boolean enabled; @@ -60,7 +60,7 @@ public class VirtualRouterProviderVO implements VirtualRouterProvider { this.uuid = UUID.randomUUID().toString(); } - public VirtualRouterProviderVO(long nspId, VirtualRouterProviderType type) { + public VirtualRouterProviderVO(long nspId, Type type) { this.nspId = nspId; this.type = type; this.uuid = UUID.randomUUID().toString(); @@ -81,7 +81,7 @@ public class VirtualRouterProviderVO implements VirtualRouterProvider { } @Override - public VirtualRouterProviderType getType() { + public Type getType() { return this.type; } @@ -106,7 +106,7 @@ public class VirtualRouterProviderVO implements VirtualRouterProvider { this.id = id; } - public void setType(VirtualRouterProviderType type) { + public void setType(Type type) { this.type = type; } diff --git a/engine/schema/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java b/engine/schema/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java index d82cc4a79b5..31131b8bd3d 100644 --- a/engine/schema/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java +++ b/engine/schema/src/com/cloud/network/security/dao/SecurityGroupDaoImpl.java @@ -24,7 +24,7 @@ import javax.inject.Inject; import org.springframework.stereotype.Component; import com.cloud.network.security.SecurityGroupVO; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; @@ -32,7 +32,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={SecurityGroupDao.class}) @@ -109,11 +109,11 @@ public class SecurityGroupDaoImpl extends GenericDaoBase @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SecurityGroupVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.SecurityGroup); + _tagsDao.removeByIdAndType(id, ResourceObjectType.SecurityGroup); } boolean result = super.remove(id); txn.commit(); @@ -123,11 +123,11 @@ public class SecurityGroupDaoImpl extends GenericDaoBase @Override @DB public boolean expunge(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SecurityGroupVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.SecurityGroup); + _tagsDao.removeByIdAndType(id, ResourceObjectType.SecurityGroup); } boolean result = super.expunge(id); txn.commit(); diff --git a/engine/schema/src/com/cloud/network/security/dao/SecurityGroupVMMapDaoImpl.java b/engine/schema/src/com/cloud/network/security/dao/SecurityGroupVMMapDaoImpl.java index 46135d18029..26d5a42bc7b 100644 --- a/engine/schema/src/com/cloud/network/security/dao/SecurityGroupVMMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/security/dao/SecurityGroupVMMapDaoImpl.java @@ -59,7 +59,7 @@ public class SecurityGroupVMMapDaoImpl extends GenericDaoBase sc = UntakenWorkSearch.create(); sc.setParameters("step", Step.Scheduled); @@ -149,7 +149,7 @@ public class SecurityGroupWorkDaoImpl extends GenericDaoBase sc = VmIdSeqNumSearch.create(); sc.setParameters("vmId", vmId); @@ -180,7 +180,7 @@ public class SecurityGroupWorkDaoImpl extends GenericDaoBase im return createOrUpdateUsingMultiInsert(workItems); } - private int executeWithRetryOnDeadlock(Transaction txn, String pstmt, List vmIds) throws SQLException { + private int executeWithRetryOnDeadlock(TransactionLegacy txn, String pstmt, List vmIds) throws SQLException { int numUpdated = 0; final int maxTries = 3; @@ -120,7 +120,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im } protected int createOrUpdateUsingMultiInsert(Set workItems) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); int size = workItems.size(); int count = 0; @@ -156,7 +156,7 @@ public class VmRulesetLogDaoImpl extends GenericDaoBase im } protected int createOrUpdateUsingBatch(Set workItems) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement stmtInsert = null; int [] queryResult = null; int count=0; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java index fd3308d176f..5a2eeede193 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLDaoImpl.java @@ -19,14 +19,13 @@ package com.cloud.network.vpc.dao; import com.cloud.network.vpc.NetworkACLVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; import org.springframework.stereotype.Component; import javax.ejb.Local; @Component @Local(value = NetworkACLDao.class) -@DB(txn = false) +@DB() public class NetworkACLDaoImpl extends GenericDaoBase implements NetworkACLDao{ protected NetworkACLDaoImpl() { diff --git a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java index 8162ce85ca1..6bd47bd679f 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/NetworkACLItemDaoImpl.java @@ -28,7 +28,7 @@ import java.util.List; @Component @Local(value = NetworkACLItemDao.class) -@DB(txn = false) +@DB() public class NetworkACLItemDaoImpl extends GenericDaoBase implements NetworkACLItemDao { protected final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java index fe435c05175..7511657c0ea 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/PrivateIpDaoImpl.java @@ -32,11 +32,11 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = PrivateIpDao.class) -@DB(txn = false) +@DB() public class PrivateIpDaoImpl extends GenericDaoBase implements PrivateIpDao { private static final Logger s_logger = Logger.getLogger(PrivateIpDaoImpl.class); @@ -78,7 +78,7 @@ public class PrivateIpDaoImpl extends GenericDaoBase implemen sc.setParameters("ipAddress", requestedIp); } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); PrivateIpVO vo = lockOneRandomRow(sc, true); if (vo == null) { diff --git a/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java index 518237d96b3..a85b907f89c 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/StaticRouteDaoImpl.java @@ -25,7 +25,7 @@ import org.springframework.stereotype.Component; import com.cloud.network.vpc.StaticRoute; import com.cloud.network.vpc.StaticRouteVO; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.tags.dao.ResourceTagsDaoImpl; @@ -36,11 +36,11 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = StaticRouteDao.class) -@DB(txn = false) +@DB() public class StaticRouteDaoImpl extends GenericDaoBase implements StaticRouteDao{ protected final SearchBuilder AllFieldsSearch; protected final SearchBuilder NotRevokedSearch; @@ -106,11 +106,11 @@ public class StaticRouteDaoImpl extends GenericDaoBase impl @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); StaticRouteVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.StaticRoute); + _tagsDao.removeByIdAndType(id, ResourceObjectType.StaticRoute); } boolean result = super.remove(id); txn.commit(); diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcDao.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcDao.java index 5a33217c028..57a26214028 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcDao.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcDao.java @@ -40,8 +40,8 @@ public interface VpcDao extends GenericDao{ long countByAccountId(long accountId); - VpcVO persist(VpcVO vpc, Map serviceProviderMap); + VpcVO persist(VpcVO vpc, Map> serviceProviderMap); void persistVpcServiceProviders(long vpcId, - Map serviceProviderMap); + Map> serviceProviderMap); } diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java index 6560b90ce7d..ba030e9a44a 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcDaoImpl.java @@ -28,7 +28,7 @@ import org.springframework.stereotype.Component; import com.cloud.network.vpc.Vpc; import com.cloud.network.vpc.VpcVO; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.db.DB; @@ -38,11 +38,11 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = VpcDao.class) -@DB(txn = false) +@DB() public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ final GenericSearchBuilder CountByOfferingId; final SearchBuilder AllFieldsSearch; @@ -107,11 +107,11 @@ public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VpcVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Vpc); + _tagsDao.removeByIdAndType(id, ResourceObjectType.Vpc); } boolean result = super.remove(id); txn.commit(); @@ -128,8 +128,8 @@ public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ @Override @DB - public VpcVO persist(VpcVO vpc, Map serviceProviderMap) { - Transaction txn = Transaction.currentTxn(); + public VpcVO persist(VpcVO vpc, Map> serviceProviderMap) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VpcVO newVpc = super.persist(vpc); persistVpcServiceProviders(vpc.getId(), serviceProviderMap); @@ -139,12 +139,14 @@ public class VpcDaoImpl extends GenericDaoBase implements VpcDao{ @Override @DB - public void persistVpcServiceProviders(long vpcId, Map serviceProviderMap) { - Transaction txn = Transaction.currentTxn(); + public void persistVpcServiceProviders(long vpcId, Map> serviceProviderMap) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (String service : serviceProviderMap.keySet()) { - VpcServiceMapVO serviceMap = new VpcServiceMapVO(vpcId, Network.Service.getService(service), Network.Provider.getProvider(serviceProviderMap.get(service))); - _vpcSvcMap.persist(serviceMap); + for (String provider : serviceProviderMap.get(service)) { + VpcServiceMapVO serviceMap = new VpcServiceMapVO(vpcId, Network.Service.getService(service), Network.Provider.getProvider(provider)); + _vpcSvcMap.persist(serviceMap); + } } txn.commit(); } diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java index 13c37c4e0e6..e718209529d 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcGatewayDaoImpl.java @@ -31,7 +31,7 @@ import java.util.List; @Component @Local(value = VpcGatewayDao.class) -@DB(txn = false) +@DB() public class VpcGatewayDaoImpl extends GenericDaoBase implements VpcGatewayDao{ protected final SearchBuilder AllFieldsSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java index 2cda5471c14..3453da04483 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingDaoImpl.java @@ -26,11 +26,11 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = VpcOfferingDao.class) -@DB(txn = false) +@DB() public class VpcOfferingDaoImpl extends GenericDaoBase implements VpcOfferingDao{ final SearchBuilder AllFieldsSearch; @@ -51,7 +51,7 @@ public class VpcOfferingDaoImpl extends GenericDaoBase impl @Override @DB public boolean remove(Long vpcOffId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VpcOfferingVO offering = findById(vpcOffId); offering.setUniqueName(null); diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java index 4b5f1b9620b..8b503c9146d 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcOfferingServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria.Func; @Component @Local(value = VpcOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class VpcOfferingServiceMapDaoImpl extends GenericDaoBase implements VpcOfferingServiceMapDao{ final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; diff --git a/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java b/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java index a992181f864..227694fe159 100644 --- a/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/network/vpc/dao/VpcServiceMapDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.db.SearchCriteria; import org.springframework.stereotype.Component; @Component -@Local(value=VpcServiceMapDao.class) @DB(txn=false) +@Local(value=VpcServiceMapDao.class) @DB() public class VpcServiceMapDaoImpl extends GenericDaoBase implements VpcServiceMapDao { final SearchBuilder AllFieldsSearch; final SearchBuilder MultipleServicesSearch; @@ -56,7 +56,7 @@ public class VpcServiceMapDaoImpl extends GenericDaoBase DistinctProvidersSearch = createSearchBuilder(String.class); DistinctProvidersSearch.and("vpcId", DistinctProvidersSearch.entity().getVpcId(), SearchCriteria.Op.EQ); DistinctProvidersSearch.and("provider", DistinctProvidersSearch.entity().getProvider(), SearchCriteria.Op.EQ); - DistinctProvidersSearch.selectField(DistinctProvidersSearch.entity().getProvider()); + DistinctProvidersSearch.selectFields(DistinctProvidersSearch.entity().getProvider()); DistinctProvidersSearch.done(); } diff --git a/engine/schema/src/com/cloud/offerings/NetworkOfferingVO.java b/engine/schema/src/com/cloud/offerings/NetworkOfferingVO.java index eefdc9442c8..d9fb54399f9 100755 --- a/engine/schema/src/com/cloud/offerings/NetworkOfferingVO.java +++ b/engine/schema/src/com/cloud/offerings/NetworkOfferingVO.java @@ -136,6 +136,8 @@ public class NetworkOfferingVO implements NetworkOffering { @Column(name = "concurrent_connections") Integer concurrentConnections; + @Column(name = "keep_alive_enabled") + boolean keepAliveEnabled = false; @Override public String getDisplayText() { @@ -148,6 +150,15 @@ public class NetworkOfferingVO implements NetworkOffering { @Column(name = "public_lb") boolean publicLb; + @Override + public boolean isKeepAliveEnabled() { + return keepAliveEnabled; + } + + public void setKeepAliveEnabled(boolean keepAliveEnabled) { + this.keepAliveEnabled = keepAliveEnabled; + } + @Override public long getId() { return id; @@ -430,6 +441,7 @@ public class NetworkOfferingVO implements NetworkOffering { this.internalLb = internalLb; } + @Override public Integer getConcurrentConnections() { return this.concurrentConnections; } diff --git a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java index ef8237a48f5..19290a60693 100644 --- a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingDaoImpl.java @@ -38,11 +38,11 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = NetworkOfferingDao.class) -@DB(txn = false) +@DB() public class NetworkOfferingDaoImpl extends GenericDaoBase implements NetworkOfferingDao { final SearchBuilder NameSearch; final SearchBuilder SystemOfferingSearch; @@ -76,7 +76,7 @@ public class NetworkOfferingDaoImpl extends GenericDaoBase details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); //1) persist the offering NetworkOfferingVO vo = super.persist(off); diff --git a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java index 7282443ff02..6694eb826b9 100644 --- a/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java +++ b/engine/schema/src/com/cloud/offerings/dao/NetworkOfferingServiceMapDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; @Component -@Local(value=NetworkOfferingServiceMapDao.class) @DB(txn=false) +@Local(value=NetworkOfferingServiceMapDao.class) @DB() public class NetworkOfferingServiceMapDaoImpl extends GenericDaoBase implements NetworkOfferingServiceMapDao { final SearchBuilder AllFieldsSearch; @@ -72,7 +72,7 @@ public class NetworkOfferingServiceMapDaoImpl extends GenericDaoBase implements P @DB public boolean remove(Long projectId) { boolean result = false; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ProjectVO projectToRemove = findById(projectId); projectToRemove.setName(null); @@ -84,7 +84,7 @@ public class ProjectDaoImpl extends GenericDaoBase implements P return false; } - _tagsDao.removeByIdAndType(projectId, TaggedResourceType.Project); + _tagsDao.removeByIdAndType(projectId, ResourceObjectType.Project); result = super.remove(projectId); txn.commit(); diff --git a/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java b/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java index b005c738e82..4ab313438f2 100644 --- a/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java +++ b/engine/schema/src/com/cloud/service/ServiceOfferingDetailsVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.service; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,49 +23,59 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name="service_offering_details") -public class ServiceOfferingDetailsVO implements InternalIdentity { +public class ServiceOfferingDetailsVO implements ResourceDetail { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") private long id; @Column(name="service_offering_id") - private long serviceOfferingId; + private long resourceId; @Column(name="name") private String name; @Column(name="value") private String value; + + @Column(name="display") + boolean display; protected ServiceOfferingDetailsVO() { } public ServiceOfferingDetailsVO(long serviceOfferingId, String name, String value) { - this.serviceOfferingId = serviceOfferingId; + this.resourceId = serviceOfferingId; this.name = name; this.value = value; } - public long getServiceOfferingId() { - return serviceOfferingId; + @Override + public long getResourceId() { + return resourceId; } + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - public void setValue(String value) { - this.value = value; - } - + @Override public long getId() { return id; } + + @Override + public boolean isDisplay() { + return display; + } } \ No newline at end of file diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java index 14b2abf8fc4..f807f0df565 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDaoImpl.java @@ -16,6 +16,7 @@ // under the License. package com.cloud.service.dao; +import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; @@ -27,6 +28,7 @@ import javax.persistence.EntityExistsException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.service.ServiceOfferingDetailsVO; import com.cloud.service.ServiceOfferingVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @@ -34,7 +36,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @Component -@Local(value={ServiceOfferingDao.class}) @DB(txn=false) +@Local(value={ServiceOfferingDao.class}) @DB() public class ServiceOfferingDaoImpl extends GenericDaoBase implements ServiceOfferingDao { protected static final Logger s_logger = Logger.getLogger(ServiceOfferingDaoImpl.class); @@ -160,15 +162,22 @@ public class ServiceOfferingDaoImpl extends GenericDaoBase details = detailsDao.findDetails(serviceOffering.getId()); + Map details = detailsDao.listDetailsKeyPairs(serviceOffering.getId()); serviceOffering.setDetails(details); } @Override public void saveDetails(ServiceOfferingVO serviceOffering) { Map details = serviceOffering.getDetails(); - if (details != null) { - detailsDao.persist(serviceOffering.getId(), details); + if (details == null) { + return; } + + List resourceDetails = new ArrayList(); + for (String key : details.keySet()) { + resourceDetails.add(new ServiceOfferingDetailsVO(serviceOffering.getId(), key, details.get(key))); + } + + detailsDao.saveDetails(resourceDetails); } } diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java index 38169105819..b377e6c502a 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDao.java @@ -16,14 +16,10 @@ // under the License. package com.cloud.service.dao; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.service.ServiceOfferingDetailsVO; import com.cloud.utils.db.GenericDao; -public interface ServiceOfferingDetailsDao extends GenericDao { - Map findDetails(long serviceOfferingId); - void persist(long serviceOfferingId, Map details); - ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name); - void deleteDetails(long serviceOfferingId); +public interface ServiceOfferingDetailsDao extends GenericDao, ResourceDetailsDao { } \ No newline at end of file diff --git a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java index 91d736a38c4..5d818612d20 100644 --- a/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/service/dao/ServiceOfferingDetailsDaoImpl.java @@ -16,83 +16,21 @@ // under the License. package com.cloud.service.dao; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; import com.cloud.service.ServiceOfferingDetailsVO; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; @Component @Local(value=ServiceOfferingDetailsDao.class) -public class ServiceOfferingDetailsDaoImpl extends GenericDaoBase +public class ServiceOfferingDetailsDaoImpl extends ResourceDetailsDaoBase implements ServiceOfferingDetailsDao { - protected final SearchBuilder ServiceOfferingSearch; - protected final SearchBuilder DetailSearch; - - public ServiceOfferingDetailsDaoImpl() { - ServiceOfferingSearch = createSearchBuilder(); - ServiceOfferingSearch.and("serviceOfferingId", ServiceOfferingSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ); - ServiceOfferingSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("serviceOfferingId", DetailSearch.entity().getServiceOfferingId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - } @Override - public ServiceOfferingDetailsVO findDetail(long serviceOfferingId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("serviceOfferingId", serviceOfferingId); - sc.setParameters("name", name); - ServiceOfferingDetailsVO detail = findOneIncludingRemovedBy(sc); - return detail; - } - - @Override - public Map findDetails(long serviceOfferingId) { - SearchCriteria sc = ServiceOfferingSearch.create(); - sc.setParameters("serviceOfferingId", serviceOfferingId); - List results = search(sc, null); - Map details = new HashMap(results.size()); - for (ServiceOfferingDetailsVO result : results) { - details.put(result.getName(), result.getValue()); - } - - return details; - } - - @Override - public void deleteDetails(long serviceOfferingId) { - SearchCriteria sc = ServiceOfferingSearch.create(); - sc.setParameters("serviceOfferingId", serviceOfferingId); - List results = search(sc, null); - for (ServiceOfferingDetailsVO result : results) { - remove(result.getId()); - } - } - - @Override - public void persist(long serviceOfferingId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = ServiceOfferingSearch.create(); - sc.setParameters("serviceOfferingId", serviceOfferingId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - String value = detail.getValue(); - ServiceOfferingDetailsVO vo = new ServiceOfferingDetailsVO(serviceOfferingId, detail.getKey(), value); - persist(vo); - } - txn.commit(); + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new ServiceOfferingDetailsVO(resourceId, key, value)); } + } diff --git a/engine/schema/src/com/cloud/storage/VMTemplateDetailVO.java b/engine/schema/src/com/cloud/storage/VMTemplateDetailVO.java index 3d4c7ef30db..52efe3be407 100644 --- a/engine/schema/src/com/cloud/storage/VMTemplateDetailVO.java +++ b/engine/schema/src/com/cloud/storage/VMTemplateDetailVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.storage; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,61 +23,59 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name = "vm_template_details") -public class VMTemplateDetailVO implements InternalIdentity { +public class VMTemplateDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id") private long id; @Column(name = "template_id") - private long templateId; + private long resourceId; @Column(name = "name") private String name; @Column(name = "value", length = 1024) private String value; + + @Column(name="display") + private boolean display; public VMTemplateDetailVO() { } public VMTemplateDetailVO(long templateId, String name, String value) { - this.templateId = templateId; + this.resourceId = templateId; this.name = name; this.value = value; } + @Override public long getId() { return id; } - public long getTemplateId() { - return templateId; + @Override + public long getResourceId() { + return resourceId; } + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - - public void setId(long id) { - this.id = id; - } - - public void setTemplateId(long templateId) { - this.templateId = templateId; - } - - public void setName(String name) { - this.name = name; - } - - public void setValue(String value) { - this.value = value; + + @Override + public boolean isDisplay() { + return display; } } diff --git a/engine/schema/src/com/cloud/storage/VMTemplateStoragePoolVO.java b/engine/schema/src/com/cloud/storage/VMTemplateStoragePoolVO.java index 8ee0c4a5955..b9886e08237 100644 --- a/engine/schema/src/com/cloud/storage/VMTemplateStoragePoolVO.java +++ b/engine/schema/src/com/cloud/storage/VMTemplateStoragePoolVO.java @@ -257,7 +257,7 @@ public class VMTemplateStoragePoolVO implements VMTemplateStorageResourceAssoc, @Override public String toString() { - return new StringBuilder("TmplPool[").append(id).append("-").append(templateId).append("-").append("poolId") + return new StringBuilder("TmplPool[").append(id).append("-").append(templateId).append("-").append(poolId) .append("-").append(installPath).append("]").toString(); } diff --git a/engine/schema/src/com/cloud/storage/VolumeDetailVO.java b/engine/schema/src/com/cloud/storage/VolumeDetailVO.java index b0c8c1dbf35..f9b7653b38d 100644 --- a/engine/schema/src/com/cloud/storage/VolumeDetailVO.java +++ b/engine/schema/src/com/cloud/storage/VolumeDetailVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.storage; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,61 +23,59 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name="volume_details") -public class VolumeDetailVO implements InternalIdentity { +public class VolumeDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") private long id; @Column(name="volume_id") - private long volumeId; + private long resourceId; @Column(name="name") private String name; @Column(name="value", length=1024) private String value; + + @Column(name="display") + private boolean display; public VolumeDetailVO() {} public VolumeDetailVO(long volumeId, String name, String value) { - this.volumeId = volumeId; + this.resourceId = volumeId; this.name = name; this.value = value; } + @Override public long getId() { return id; } - public long getVolumeId() { - return volumeId; - } - + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - public void setId(long id) { - this.id = id; + @Override + public long getResourceId() { + return resourceId; } - - public void setVolumeId(long volumeId) { - this.volumeId = volumeId; - } - - public void setName(String name) { - this.name = name; - } - - public void setValue(String value) { - this.value = value; + + @Override + public boolean isDisplay() { + return display; } } diff --git a/engine/schema/src/com/cloud/storage/VolumeVO.java b/engine/schema/src/com/cloud/storage/VolumeVO.java index ea3d6bffa67..df7cfd4a749 100755 --- a/engine/schema/src/com/cloud/storage/VolumeVO.java +++ b/engine/schema/src/com/cloud/storage/VolumeVO.java @@ -35,6 +35,7 @@ import javax.persistence.Transient; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.GenericDao; +import com.cloud.vm.VirtualMachine.State; @Entity @Table(name = "volumes") @@ -154,7 +155,7 @@ public class VolumeVO implements Volume { private Long vmSnapshotChainSize; @Column(name = "iso_id") - private long isoId; + private Long isoId; @Transient // @Column(name="reservation") @@ -570,7 +571,13 @@ public class VolumeVO implements Volume { return this.isoId; } - public void setIsoId(long isoId) { + public void setIsoId(Long isoId) { this.isoId =isoId; } + + // don't use this directly, use volume state machine instead + // This method is used by UpdateVolume as a part of "Better control over first class objects in CS" + public void setState(State state) { + this.state = state; + } } diff --git a/engine/schema/src/com/cloud/storage/dao/LaunchPermissionDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/LaunchPermissionDaoImpl.java index 3e32f1a9529..60c87af0fcc 100644 --- a/engine/schema/src/com/cloud/storage/dao/LaunchPermissionDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/LaunchPermissionDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -69,7 +69,7 @@ public class LaunchPermissionDaoImpl extends GenericDaoBase accountIds) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { txn.start(); @@ -106,7 +106,7 @@ public class LaunchPermissionDaoImpl extends GenericDaoBase listPermittedTemplates(long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); List permittedTemplates = new ArrayList(); PreparedStatement pstmt = null; try { diff --git a/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java index f5319ea120a..c56a3cf43b8 100644 --- a/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/SnapshotDaoImpl.java @@ -27,7 +27,7 @@ import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; import com.cloud.storage.Snapshot.Event; @@ -47,7 +47,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; @@ -204,7 +204,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public Long getSecHostId(long volumeId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_SECHOST_ID; try { @@ -221,7 +221,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public long getLastSnapshot(long volumeId, DataStoreRole role) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_LAST_SNAPSHOT; try { @@ -240,7 +240,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public long updateSnapshotVersion(long volumeId, String from, String to) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = UPDATE_SNAPSHOT_VERSION; try { @@ -258,7 +258,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public long updateSnapshotSecHost(long dcId, long secHostId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = UPDATE_SECHOST_ID; try { @@ -304,11 +304,11 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SnapshotVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Snapshot); + _tagsDao.removeByIdAndType(id, ResourceObjectType.Snapshot); } boolean result = super.remove(id); txn.commit(); @@ -324,7 +324,7 @@ public class SnapshotDaoImpl extends GenericDaoBase implements @Override public boolean updateState(State currentState, Event event, State nextState, SnapshotVO snapshot, Object data) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SnapshotVO snapshotVO = (SnapshotVO) snapshot; snapshotVO.setState(nextState); diff --git a/engine/schema/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java index f60ea176f52..ed10270d8dd 100644 --- a/engine/schema/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/StoragePoolDetailsDaoImpl.java @@ -16,72 +16,20 @@ // under the License. package com.cloud.storage.dao; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; @Local(value = StoragePoolDetailsDao.class) -public class StoragePoolDetailsDaoImpl extends GenericDaoBase implements StoragePoolDetailsDao, ScopedConfigStorage { +public class StoragePoolDetailsDaoImpl extends ResourceDetailsDaoBase implements StoragePoolDetailsDao, ScopedConfigStorage { - protected final SearchBuilder PoolSearch; - - protected StoragePoolDetailsDaoImpl() { - super(); - PoolSearch = createSearchBuilder(); - PoolSearch.and("pool", PoolSearch.entity().getPoolId(), SearchCriteria.Op.EQ); - PoolSearch.and("name", PoolSearch.entity().getName(), SearchCriteria.Op.EQ); - PoolSearch.done(); - } - - @Override - public void update(long poolId, Map details) { - Transaction txn = Transaction.currentTxn(); - SearchCriteria sc = PoolSearch.create(); - sc.setParameters("pool", poolId); - - txn.start(); - expunge(sc); - for (Map.Entry entry : details.entrySet()) { - StoragePoolDetailVO detail = new StoragePoolDetailVO(poolId, entry.getKey(), entry.getValue()); - persist(detail); - } - txn.commit(); - } - - @Override - public Map getDetails(long poolId) { - SearchCriteria sc = PoolSearch.create(); - sc.setParameters("pool", poolId); - - List details = listBy(sc); - Map detailsMap = new HashMap(); - for (StoragePoolDetailVO detail : details) { - detailsMap.put(detail.getName(), detail.getValue()); - } - - return detailsMap; - } - - @Override - public StoragePoolDetailVO findDetail(long poolId, String name) { - SearchCriteria sc = PoolSearch.create(); - sc.setParameters("pool", poolId); - sc.setParameters("name", name); - - return findOneIncludingRemovedBy(sc); + public StoragePoolDetailsDaoImpl() { } @Override @@ -94,4 +42,9 @@ public class StoragePoolDetailsDaoImpl extends GenericDaoBase listByHostStatus(long poolId, Status hostStatus) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; @@ -142,7 +142,7 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase> getDatacenterStoragePoolHostInfo(long dcId, boolean sharedOnly) { ArrayList> l = new ArrayList>(); String sql = sharedOnly ? SHARED_STORAGE_POOL_HOST_INFO : STORAGE_POOL_HOST_INFO; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ; PreparedStatement pstmt = null; try { @@ -169,7 +169,7 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase sc = HostSearch.create(); sc.setParameters("host_id", hostId); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); @@ -180,7 +180,7 @@ public class StoragePoolHostDaoImpl extends GenericDaoBase sc = PoolHostSearch.create(); sc.setParameters("host_id", hostId); sc.setParameters("pool_id", poolId); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); diff --git a/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java index 052bae4fc78..df0b6f2312b 100644 --- a/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/StoragePoolWorkDaoImpl.java @@ -31,12 +31,12 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = { StoragePoolWorkDao.class }) -@DB(txn = false) +@DB() public class StoragePoolWorkDaoImpl extends GenericDaoBase implements StoragePoolWorkDao { protected final SearchBuilder PendingWorkForPrepareForMaintenanceSearch; @@ -125,7 +125,7 @@ public class StoragePoolWorkDaoImpl extends GenericDaoBase { List findTemplatesToSyncToS3(); + void loadDetails(VMTemplateVO tmpl); + + void saveDetails(VMTemplateVO tmpl); } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java index a96524cdd9f..73b68eac066 100755 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -28,11 +28,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.dao.DomainDao; @@ -40,10 +39,11 @@ import com.cloud.host.Host; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.Storage; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.VMTemplateDetailVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; @@ -57,7 +57,7 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -175,7 +175,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.ISO.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.ISO.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -220,7 +220,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem List l = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { @@ -402,6 +402,29 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem return routerTmpltName; } + + @Override + public void loadDetails(VMTemplateVO tmpl) { + Map details = _templateDetailsDao.listDetailsKeyPairs(tmpl.getId()); + tmpl.setDetails(details); + } + + @Override + public void saveDetails(VMTemplateVO tmpl) { + Map detailsStr = tmpl.getDetails(); + if (detailsStr == null) { + return; + } + List details = new ArrayList(); + for (String key : detailsStr.keySet()) { + VMTemplateDetailVO detail = new VMTemplateDetailVO(tmpl.getId(), key, detailsStr.get(key)); + details.add(detail); + } + + _templateDetailsDao.saveDetails(details); + } + + /* * @Override public Set> searchSwiftTemplates(String name, * String keyword, TemplateFilter templateFilter, boolean isIso, @@ -421,7 +444,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * permittedAccountsStr = permittedAccountsStr.substring(0, * permittedAccountsStr.length() - 1); } * - * Transaction txn = Transaction.currentTxn(); txn.start(); + * TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); * * Set> templateZonePairList = new HashSet>(); PreparedStatement pstmt = null; ResultSet rs = null; String sql @@ -505,7 +528,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * permittedAccountsStr = permittedAccountsStr.substring(0, * permittedAccountsStr.length()-1); } * - * Transaction txn = Transaction.currentTxn(); txn.start(); + * TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); * * // Use LinkedHashSet here to guarantee iteration order Set> templateZonePairList = new LinkedHashSet>(); @@ -720,15 +743,20 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override @DB public long addTemplateToZone(VMTemplateVO tmplt, long zoneId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VMTemplateVO tmplt2 = findById(tmplt.getId()); if (tmplt2 == null) { if (persist(tmplt) == null) { throw new CloudRuntimeException("Failed to persist the template " + tmplt); } + if (tmplt.getDetails() != null) { - _templateDetailsDao.persist(tmplt.getId(), tmplt.getDetails()); + List details = new ArrayList(); + for (String key : tmplt.getDetails().keySet()) { + details.add(new VMTemplateDetailVO(tmplt.getId(), key, tmplt.getDetails().get(key))); + } + _templateDetailsDao.saveDetails(details); } } VMTemplateZoneVO tmpltZoneVO = _templateZoneDao.findByZoneTemplate(zoneId, tmplt.getId()); @@ -855,7 +883,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VMTemplateVO template = createForUpdate(); template.setRemoved(new Date()); @@ -863,9 +891,9 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem VMTemplateVO vo = findById(id); if (vo != null) { if (vo.getFormat() == ImageFormat.ISO) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.ISO); + _tagsDao.removeByIdAndType(id, ResourceObjectType.ISO); } else { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Template); + _tagsDao.removeByIdAndType(id, ResourceObjectType.Template); } } @@ -890,7 +918,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem * * final String permittedAccountsStr = join(",", permittedAccounts); * - * final Transaction txn = Transaction.currentTxn(); txn.start(); + * final TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); * * Set> templateZonePairList = new HashSet>(); PreparedStatement pstmt = null; ResultSet rs = null; try { diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDao.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDao.java index 552f8f00a86..e98f651bc92 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDao.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDao.java @@ -16,17 +16,11 @@ // under the License. package com.cloud.storage.dao; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.storage.VMTemplateDetailVO; import com.cloud.utils.db.GenericDao; -public interface VMTemplateDetailsDao extends GenericDao { - Map findDetails(long templateId); - - void persist(long templateId, Map details); - - VMTemplateDetailVO findDetail(long templateId, String name); - - void deleteDetails(long vmId); +public interface VMTemplateDetailsDao extends GenericDao, ResourceDetailsDao { + } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java index 33b96c45bcc..327f8549b35 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateDetailsDaoImpl.java @@ -16,84 +16,19 @@ // under the License. package com.cloud.storage.dao; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; import com.cloud.storage.VMTemplateDetailVO; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; @Component @Local(value = VMTemplateDetailsDao.class) -public class VMTemplateDetailsDaoImpl extends GenericDaoBase implements VMTemplateDetailsDao { - - protected final SearchBuilder TemplateSearch; - protected final SearchBuilder DetailSearch; - - public VMTemplateDetailsDaoImpl() { - TemplateSearch = createSearchBuilder(); - TemplateSearch.and("templateId", TemplateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); - TemplateSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("templateId", DetailSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - } +public class VMTemplateDetailsDaoImpl extends ResourceDetailsDaoBase implements VMTemplateDetailsDao { @Override - public void deleteDetails(long templateId) { - SearchCriteria sc = TemplateSearch.create(); - sc.setParameters("templateId", templateId); - - List results = search(sc, null); - for (VMTemplateDetailVO result : results) { - remove(result.getId()); - } - } - - @Override - public VMTemplateDetailVO findDetail(long templateId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("templateId", templateId); - sc.setParameters("name", name); - - return findOneBy(sc); - } - - @Override - public Map findDetails(long templateId) { - SearchCriteria sc = TemplateSearch.create(); - sc.setParameters("templateId", templateId); - - List results = search(sc, null); - Map details = new HashMap(results.size()); - for (VMTemplateDetailVO result : results) { - details.put(result.getName(), result.getValue()); - } - - return details; - } - - @Override - public void persist(long templateId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = TemplateSearch.create(); - sc.setParameters("templateId", templateId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - VMTemplateDetailVO vo = new VMTemplateDetailVO(templateId, detail.getKey(), detail.getValue()); - persist(vo); - } - txn.commit(); + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new VMTemplateDetailVO(resourceId, key, value)); } } diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java index 85d8348e36e..d6283b60285 100755 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateHostDaoImpl.java @@ -46,7 +46,7 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; @Component @@ -163,7 +163,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase listByTemplateStatus(long templateId, long datacenterId, VMTemplateHostVO.Status downloadState) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); try { @@ -267,7 +267,7 @@ public class VMTemplateHostDaoImpl extends GenericDaoBase listByTemplateStatus(long templateId, long datacenterId, long podId, VMTemplateHostVO.Status downloadState) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java index ba8135b5f32..b121297437a 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplatePoolDaoImpl.java @@ -38,7 +38,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; @Component @@ -150,7 +150,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase listByTemplateStatus(long templateId, long datacenterId, VMTemplateStoragePoolVO.Status downloadState) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); try { @@ -173,7 +173,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase listByTemplateStatus(long templateId, long datacenterId, long podId, VMTemplateStoragePoolVO.Status downloadState) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; @@ -210,7 +210,7 @@ public class VMTemplatePoolDaoImpl extends GenericDaoBase listByHostTemplate(long hostId, long templateId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); ResultSet rs = null; diff --git a/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java index c4a4dc7230a..a56959d32ab 100644 --- a/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VMTemplateZoneDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.storage.VMTemplateZoneVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { VMTemplateZoneDao.class }) @@ -89,7 +89,7 @@ public class VMTemplateZoneDaoImpl extends GenericDaoBase sc = TemplateSearch.create(); sc.setParameters("template_id", templateId); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java index bf284105685..54b6465642e 100755 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.exception.InvalidParameterValueException; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Volume; @@ -48,7 +48,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @@ -237,7 +237,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol @DB public HypervisorType getHypervisorType(long volumeId) { /* lookup from cluster of pool */ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = null; try { @@ -354,7 +354,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - @DB(txn = false) + @DB() public Pair getCountAndTotalByPool(long poolId) { SearchCriteria sc = TotalSizeByPoolSearch.create(); sc.setParameters("poolId", poolId); @@ -458,7 +458,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol @Override public List listPoolIdsByVolumeCount(long dcId, Long podId, Long clusterId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); try { @@ -484,7 +484,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol @Override public List listZoneWidePoolIdsByVolumeCount(long dcId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); try { @@ -506,7 +506,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol } @Override - @DB(txn = false) + @DB() public Pair getNonDestroyedCountAndTotalByPool(long poolId) { SearchCriteria sc = TotalSizeByPoolSearch.create(); sc.setParameters("poolId", poolId); @@ -519,11 +519,11 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VolumeVO entry = findById(id); if (entry != null) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.Volume); + _tagsDao.removeByIdAndType(id, ResourceObjectType.Volume); } boolean result = super.remove(id); txn.commit(); @@ -533,7 +533,7 @@ public class VolumeDaoImpl extends GenericDaoBase implements Vol @Override public ScopeType getVolumeStoragePoolScope(long volumeId) { // finding the storage scope where the volume is present - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDao.java b/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDao.java index 4e786ba6255..a7cee81a81f 100644 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDao.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDao.java @@ -16,21 +16,11 @@ // under the License. package com.cloud.storage.dao; -import java.util.List; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.storage.VolumeDetailVO; import com.cloud.utils.db.GenericDao; -public interface VolumeDetailsDao extends GenericDao { - List findDetails(long volumeId); +public interface VolumeDetailsDao extends GenericDao, ResourceDetailsDao { - void persist(long vmId, Map details); - - VolumeDetailVO findDetail(long vmId, String name); - - void deleteDetails(long vmId); - - public void removeDetails(long volumeId, String key); - - } +} diff --git a/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDaoImpl.java b/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDaoImpl.java index 40af999032f..7bb540ad393 100644 --- a/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/storage/dao/VolumeDetailsDaoImpl.java @@ -16,99 +16,20 @@ // under the License. package com.cloud.storage.dao; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; -import com.cloud.storage.VolumeDetailVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.storage.VolumeDetailVO; @Component @Local(value=VolumeDetailsDao.class) -public class VolumeDetailsDaoImpl extends GenericDaoBase implements VolumeDetailsDao { - protected final SearchBuilder VolumeSearch; - protected final SearchBuilder DetailSearch; - protected final SearchBuilder VolumeDetailSearch; - - public VolumeDetailsDaoImpl() { - VolumeSearch = createSearchBuilder(); - VolumeSearch.and("volumeId", VolumeSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - VolumeSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("volumeId", DetailSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - - VolumeDetailSearch = createSearchBuilder(); - VolumeDetailSearch.and("volumeId", VolumeDetailSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); - VolumeDetailSearch.and("name", VolumeDetailSearch.entity().getName(), SearchCriteria.Op.IN); - VolumeDetailSearch.done(); - - } +public class VolumeDetailsDaoImpl extends ResourceDetailsDaoBase implements VolumeDetailsDao { @Override - public void deleteDetails(long volumeId) { - SearchCriteria sc = VolumeSearch.create(); - sc.setParameters("volumeId", volumeId); - - List results = search(sc, null); - for (VolumeDetailVO result : results) { - remove(result.getId()); - } + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new VolumeDetailVO(resourceId, key, value)); } - - @Override - public VolumeDetailVO findDetail(long volumeId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("volumeId", volumeId); - sc.setParameters("name", name); - - return findOneBy(sc); - } - - @Override - public void removeDetails(long volumeId, String key) { - - if(key != null){ - VolumeDetailVO detail = findDetail(volumeId, key); - if(detail != null){ - remove(detail.getId()); - } - }else { - deleteDetails(volumeId); - } - - } - - @Override - public List findDetails(long volumeId) { - SearchCriteria sc = VolumeSearch.create(); - sc.setParameters("volumeId", volumeId); - - List results = search(sc, null); - return results; - } - - @Override - public void persist(long volumeId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = VolumeSearch.create(); - sc.setParameters("volumeId", volumeId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - VolumeDetailVO vo = new VolumeDetailVO(volumeId, detail.getKey(), detail.getValue()); - persist(vo); - } - txn.commit(); - } - + } diff --git a/engine/schema/src/com/cloud/tags/ResourceTagVO.java b/engine/schema/src/com/cloud/tags/ResourceTagVO.java index 04ae757bf75..6130390aa4c 100644 --- a/engine/schema/src/com/cloud/tags/ResourceTagVO.java +++ b/engine/schema/src/com/cloud/tags/ResourceTagVO.java @@ -64,7 +64,7 @@ public class ResourceTagVO implements ResourceTag { @Column(name="resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType resourceType; + private ResourceObjectType resourceType; @Column(name="customer") String customer; @@ -85,7 +85,7 @@ public class ResourceTagVO implements ResourceTag { * @param resourceUuid TODO */ public ResourceTagVO(String key, String value, long accountId, long domainId, long resourceId, - TaggedResourceType resourceType, String customer, String resourceUuid) { + ResourceObjectType resourceType, String customer, String resourceUuid) { super(); this.key = key; this.value = value; @@ -139,7 +139,7 @@ public class ResourceTagVO implements ResourceTag { } @Override - public TaggedResourceType getResourceType() { + public ResourceObjectType getResourceType() { return resourceType; } diff --git a/engine/schema/src/com/cloud/tags/dao/ResourceTagDao.java b/engine/schema/src/com/cloud/tags/dao/ResourceTagDao.java index 335c748768c..395cf1b311c 100644 --- a/engine/schema/src/com/cloud/tags/dao/ResourceTagDao.java +++ b/engine/schema/src/com/cloud/tags/dao/ResourceTagDao.java @@ -19,7 +19,7 @@ package com.cloud.tags.dao; import java.util.List; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.utils.db.GenericDao; @@ -30,8 +30,8 @@ public interface ResourceTagDao extends GenericDao{ * @param resourceType * @return */ - boolean removeByIdAndType(long resourceId, TaggedResourceType resourceType); + boolean removeByIdAndType(long resourceId, ResourceObjectType resourceType); - List listBy(long resourceId, TaggedResourceType resourceType); + List listBy(long resourceId, ResourceObjectType resourceType); } diff --git a/engine/schema/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java b/engine/schema/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java index a8e1393d6da..2243fabaf5c 100644 --- a/engine/schema/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java +++ b/engine/schema/src/com/cloud/tags/dao/ResourceTagsDaoImpl.java @@ -23,7 +23,7 @@ import javax.ejb.Local; import org.springframework.stereotype.Component; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; @@ -44,7 +44,7 @@ public class ResourceTagsDaoImpl extends GenericDaoBase imp } @Override - public boolean removeByIdAndType(long resourceId, ResourceTag.TaggedResourceType resourceType) { + public boolean removeByIdAndType(long resourceId, ResourceTag.ResourceObjectType resourceType) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); sc.setParameters("resourceType", resourceType); @@ -53,7 +53,7 @@ public class ResourceTagsDaoImpl extends GenericDaoBase imp } @Override - public List listBy(long resourceId, TaggedResourceType resourceType) { + public List listBy(long resourceId, ResourceObjectType resourceType) { SearchCriteria sc = AllFieldsSearch.create(); sc.setParameters("resourceId", resourceId); sc.setParameters("resourceType", resourceType); diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseCreator.java b/engine/schema/src/com/cloud/upgrade/DatabaseCreator.java index c97085b6e9e..8260aa125e5 100755 --- a/engine/schema/src/com/cloud/upgrade/DatabaseCreator.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseCreator.java @@ -30,11 +30,11 @@ import java.util.Properties; import org.springframework.context.support.ClassPathXmlApplicationContext; import com.cloud.utils.PropertiesUtil; - import com.cloud.utils.component.ComponentContext; import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.ScriptRunner; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; // Creates the CloudStack Database by using the 4.0 schema and apply // upgrade steps to it. @@ -172,7 +172,7 @@ public class DatabaseCreator { } try { - Transaction.initDataSource(dbPropsFile); + TransactionLegacy.initDataSource(dbPropsFile); } catch (NullPointerException e) { } initDB(dbPropsFile, rootPassword, databases, dryRun); @@ -187,7 +187,7 @@ public class DatabaseCreator { } System.out.println("========> Processing SQL file at " + sqlScript.getAbsolutePath()); - Connection conn = Transaction.getStandaloneConnection(); + Connection conn = TransactionLegacy.getStandaloneConnection(); try { FileReader reader = null; try { @@ -207,7 +207,7 @@ public class DatabaseCreator { } } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { // Process db upgrade classes for (String upgradeClass: upgradeClasses) { diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java index 50eb47be027..b6e45ec8995 100755 --- a/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseIntegrityChecker.java @@ -29,12 +29,11 @@ import org.springframework.stereotype.Component; import com.cloud.maint.Version; import com.cloud.upgrade.dao.VersionDao; - import com.cloud.utils.component.AdapterBase; import com.cloud.utils.component.ComponentLifecycle; import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.GlobalLock; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -72,7 +71,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } private Boolean checkDuplicateHostWithTheSameLocalStorage() { - Transaction txn = Transaction.open("Integrity"); + TransactionLegacy txn = TransactionLegacy.open("Integrity"); txn.start(); try { Connection conn; @@ -167,7 +166,7 @@ public class DatabaseIntegrityChecker extends AdapterBase implements SystemInteg } private boolean checkMissedPremiumUpgradeFor228() { - Transaction txn = Transaction.open("Integrity"); + TransactionLegacy txn = TransactionLegacy.open("Integrity"); txn.start(); try { String dbVersion = _dao.getCurrentVersion(); diff --git a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java index f001bf71810..b0beedfc6ed 100755 --- a/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.TreeMap; import javax.ejb.Local; +import javax.inject.Inject; import org.apache.log4j.Logger; @@ -62,7 +63,8 @@ import com.cloud.upgrade.dao.Upgrade307to410; import com.cloud.upgrade.dao.Upgrade30to301; import com.cloud.upgrade.dao.Upgrade40to41; import com.cloud.upgrade.dao.Upgrade410to420; -import com.cloud.upgrade.dao.Upgrade420to430; +import com.cloud.upgrade.dao.Upgrade420to421; +import com.cloud.upgrade.dao.Upgrade421to430; import com.cloud.upgrade.dao.UpgradeSnapshot217to224; import com.cloud.upgrade.dao.UpgradeSnapshot223to224; import com.cloud.upgrade.dao.VersionDao; @@ -72,7 +74,7 @@ import com.cloud.upgrade.dao.VersionVO.Step; import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.ScriptRunner; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Local(value = {SystemIntegrityChecker.class}) @@ -81,126 +83,137 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { protected HashMap _upgradeMap = new HashMap(); + @Inject VersionDao _dao; public DatabaseUpgradeChecker() { _dao = new VersionDaoImpl(); + _upgradeMap.put("2.1.7", new DbUpgrade[] {new Upgrade217to218(), new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.1.8", new DbUpgrade[] {new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); _upgradeMap.put("2.1.9", new DbUpgrade[] {new Upgrade218to22(), new Upgrade221to222(), new UpgradeSnapshot217to224(), new Upgrade222to224(), new Upgrade218to224DomainVlans(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.1", new DbUpgrade[] {new Upgrade221to222(), new UpgradeSnapshot223to224(), new Upgrade222to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.2", new DbUpgrade[] {new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); _upgradeMap.put("2.2.3", new DbUpgrade[] {new Upgrade222to224(), new UpgradeSnapshot223to224(), new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); _upgradeMap.put("2.2.4", new DbUpgrade[] {new Upgrade224to225(), new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.5", new DbUpgrade[] {new Upgrade225to226(), new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.6", new DbUpgrade[] {new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); _upgradeMap.put("2.2.7", new DbUpgrade[] {new Upgrade227to228(), new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.8", new DbUpgrade[] {new Upgrade228to229(), new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30() - , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + , new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.9", new DbUpgrade[] {new Upgrade229to2210(), new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.10", new DbUpgrade[] {new Upgrade2210to2211(), new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.11", new DbUpgrade[] {new Upgrade2211to2212(), new Upgrade2212to2213(), new Upgrade2213to2214(), - new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); _upgradeMap.put("2.2.12", new DbUpgrade[] {new Upgrade2212to2213(), new Upgrade2213to2214(), new Upgrade2214to30(), - new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.13", new DbUpgrade[] {new Upgrade2213to2214(), new Upgrade2214to30(), new Upgrade30to301(), - new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.14", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), - new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("3.0.0", new DbUpgrade[] {new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("3.0.1", new DbUpgrade[] {new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("3.0.1", new DbUpgrade[] {new Upgrade301to302(), new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); - _upgradeMap.put("3.0.2", new DbUpgrade[] {new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("3.0.2", new DbUpgrade[] {new Upgrade302to40(), new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.0.0", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("4.0.0", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.0.1", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("4.0.1", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.0.2", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("4.0.2", new DbUpgrade[] {new Upgrade40to41(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.1.0", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("4.1.0", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.1.1", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("4.1.1", new DbUpgrade[] {new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("4.2.0", new DbUpgrade[] {new Upgrade420to430()}); + _upgradeMap.put("4.2.0", new DbUpgrade[] {new Upgrade420to421(), new Upgrade421to430()}); + + _upgradeMap.put("4.2.1", new DbUpgrade[] {new Upgrade421to430()}); //CP Upgrades _upgradeMap.put("3.0.3", new DbUpgrade[] {new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), - new Upgrade410to420(), new Upgrade420to430()}); + new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("3.0.4", new DbUpgrade[] {new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("3.0.5", new DbUpgrade[] {new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("3.0.5", new DbUpgrade[] {new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), + new Upgrade421to430()}); - _upgradeMap.put("3.0.6", new DbUpgrade[] {new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("3.0.6", new DbUpgrade[] {new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); - _upgradeMap.put("3.0.7", new DbUpgrade[] {new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to430()}); + _upgradeMap.put("3.0.7", new DbUpgrade[] {new Upgrade307to410(), new Upgrade410to420(), new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.15", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); _upgradeMap.put("2.2.16", new DbUpgrade[] {new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to304(), new Upgrade304to305(), new Upgrade305to306(), new Upgrade306to307(), new Upgrade307to410(), new Upgrade410to420(), - new Upgrade420to430()}); + new Upgrade420to421(), new Upgrade421to430()}); } protected void runScript(Connection conn, File file) { @@ -259,7 +272,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { s_logger.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade.getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); - Transaction txn = Transaction.open("Upgrade"); + TransactionLegacy txn = TransactionLegacy.open("Upgrade"); txn.start(); try { Connection conn; @@ -329,7 +342,7 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { VersionVO version = _dao.findByVersion(upgradedVersion, Step.Upgrade); s_logger.debug("Upgrading to version " + upgradedVersion + "..."); - Transaction txn = Transaction.open("Cleanup"); + TransactionLegacy txn = TransactionLegacy.open("Cleanup"); try { if (version != null) { for (DbUpgrade upgrade : versionUpgrades) { @@ -383,9 +396,6 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker { try { String dbVersion = _dao.getCurrentVersion(); String currentVersion = this.getClass().getPackage().getImplementationVersion(); - if (currentVersion == null) { - currentVersion = this.getClass().getSuperclass().getPackage().getImplementationVersion(); - } if (currentVersion == null) return; diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java index 78ee674e069..48b83b41cfc 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -663,7 +663,7 @@ public class Upgrade2214to30 extends Upgrade30xBase implements DbUpgrade { services.put("SecurityGroup", "SecurityGroupProvider"); } - if (uniqueName.equals(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService.toString()) || uniqueName.equalsIgnoreCase(externalOfferingName)) { + if (uniqueName.equals(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService.toString()) || uniqueName.equals(NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService.toString() + "-redundant") || uniqueName.equalsIgnoreCase(externalOfferingName)) { if (externalOfferingName != null && uniqueName.equalsIgnoreCase(externalOfferingName)) { services.put("SourceNat", "JuniperSRX"); services.put("PortForwarding", "JuniperSRX"); diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade307to410.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade307to410.java index 5c7717f83a6..aeeb6d5863f 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade307to410.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade307to410.java @@ -18,15 +18,21 @@ package com.cloud.upgrade.dao; import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; +import java.util.Properties; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.crypt.EncryptionSecretKeyChecker; import org.apache.log4j.Logger; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; +import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; +import org.jasypt.properties.EncryptableProperties; public class Upgrade307to410 implements DbUpgrade { final static Logger s_logger = Logger.getLogger(Upgrade307to410.class); @@ -62,7 +68,28 @@ public class Upgrade307to410 implements DbUpgrade { } private void updateRegionEntries(Connection conn) { - int region_id = Transaction.s_region_id; + File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); + final Properties dbProps; + if (EncryptionSecretKeyChecker.useEncryption()) { + StandardPBEStringEncryptor encryptor = EncryptionSecretKeyChecker.getEncryptor(); + dbProps = new EncryptableProperties(encryptor); + } else { + dbProps = new Properties(); + } + try { + dbProps.load(new FileInputStream(dbPropsFile)); + } catch (IOException e) { + s_logger.fatal("Unable to load db properties file, pl. check the classpath and file path configuration", e); + return; + } catch (NullPointerException e) { + s_logger.fatal("Unable to locate db properties file within classpath or absolute path: db.properties"); + return; + } + int region_id = 1; + String regionId = dbProps.getProperty("region.id"); + if(regionId != null){ + region_id = Integer.parseInt(regionId); + } PreparedStatement pstmt = null; try { //Update regionId in region table diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade40to41.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade40to41.java index 9e386b9f4db..79ca5e1fdb1 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade40to41.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade40to41.java @@ -17,16 +17,23 @@ package com.cloud.upgrade.dao; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.crypt.EncryptionSecretKeyChecker; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import org.apache.log4j.Logger; +import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; +import org.jasypt.properties.EncryptableProperties; import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Properties; import java.util.UUID; public class Upgrade40to41 implements DbUpgrade { @@ -74,7 +81,28 @@ public class Upgrade40to41 implements DbUpgrade { } private void updateRegionEntries(Connection conn) { - int region_id = Transaction.s_region_id; + File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); + final Properties dbProps; + if (EncryptionSecretKeyChecker.useEncryption()) { + StandardPBEStringEncryptor encryptor = EncryptionSecretKeyChecker.getEncryptor(); + dbProps = new EncryptableProperties(encryptor); + } else { + dbProps = new Properties(); + } + try { + dbProps.load(new FileInputStream(dbPropsFile)); + } catch (IOException e) { + s_logger.fatal("Unable to load db properties file, pl. check the classpath and file path configuration", e); + return; + } catch (NullPointerException e) { + s_logger.fatal("Unable to locate db properties file within classpath or absolute path: db.properties"); + return; + } + int region_id = 1; + String regionId = dbProps.getProperty("region.id"); + if(regionId != null){ + region_id = Integer.parseInt(regionId); + } PreparedStatement pstmt = null; try { //Update regionId in region table diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java index 9bd61161d05..c2630b068f0 100755 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade410to420.java @@ -115,6 +115,8 @@ public class Upgrade410to420 implements DbUpgrade { setRAWformatForRBDVolumes(conn); migrateVolumeOnSecondaryStorage(conn); createFullCloneFlag(conn); + upgradeVpcServiceMap(conn); + upgradeResourceCount(conn); } private void createFullCloneFlag(Connection conn) { @@ -2953,4 +2955,207 @@ public class Upgrade410to420 implements DbUpgrade { throw new CloudRuntimeException("Failed to update volume format to RAW for volumes on RBD pools due to exception ", e); } } + + + private void upgradeVpcServiceMap(Connection conn){ + s_logger.debug("Upgrading VPC service Map"); + PreparedStatement listVpc = null; + PreparedStatement listServiceProviders = null; + PreparedStatement insertProviders = null; + ResultSet rs = null; + ResultSet rs1 = null; + try { + //Get all vpc Ids along with vpc offering Id + listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL"); + rs = listVpc.executeQuery(); + while (rs.next()) { + long vpc_id = rs.getLong(1); + long offering_id = rs.getLong(2); + //list all services and providers in offering + listServiceProviders = conn.prepareStatement("SELECT service, provider FROM `cloud`.`vpc_offering_service_map` where vpc_offering_id = ?"); + listServiceProviders.setLong(1, offering_id); + rs1 = listServiceProviders.executeQuery(); + //Insert entries in vpc_service_map + while (rs1.next()) { + String service = rs1.getString(1); + String provider = rs1.getString(2); + insertProviders = conn.prepareStatement("INSERT INTO `cloud`.`vpc_service_map` (`vpc_id`, `service`, `provider`, `created`) VALUES (?, ?, ?, now());"); + insertProviders.setLong(1, vpc_id); + insertProviders.setString(2, service); + insertProviders.setString(3, provider); + insertProviders.executeUpdate(); + } + s_logger.debug("Upgraded service map for VPC: "+vpc_id); + } + }catch (SQLException e) { + throw new CloudRuntimeException("Error during VPC service map upgrade", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (rs1 != null) { + rs1.close(); + } + if (listVpc != null) { + listVpc.close(); + } + if (listServiceProviders != null) { + listServiceProviders.close(); + } + if (insertProviders != null) { + insertProviders.close(); + } + } catch (SQLException e) { + } + } + } + + private void upgradeResourceCount(Connection conn) { + s_logger.debug("upgradeResourceCount start"); + PreparedStatement pstmt1 = null; + PreparedStatement pstmt2 = null; + PreparedStatement pstmt3 = null; + PreparedStatement pstmt4 = null; + PreparedStatement pstmt5 = null; + ResultSet rs = null; + ResultSet rsAccount = null; + ResultSet rsCount = null; + try { + pstmt1 = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL "); + rsAccount = pstmt1.executeQuery(); + while (rsAccount.next()) { + long account_id = rsAccount.getLong(1); + long domain_id = rsAccount.getLong(2); + // 1. update cpu,memory for all accounts + pstmt2 = conn.prepareStatement( "SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`" + + " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?" + " AND vm_instance.removed is NULL" + + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')"); + pstmt2.setLong(1, account_id); + rsCount = pstmt2.executeQuery(); + if (rsCount.next()) { + upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", rsCount.getLong(1)); + upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", rsCount.getLong(2)); + } else { + upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", 0L); + upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", 0L); + } + // 2. update primary_storage for all accounts + pstmt3 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" + + " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL" + + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')"); + pstmt3.setLong(1, account_id); + rsCount = pstmt3.executeQuery(); + if (rsCount.next()) { + upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", rsCount.getLong(1)); + } else { + upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", 0L); + } + // 3. update secondary_storage for all accounts + long totalVolumesSize = 0; + long totalSnapshotsSize = 0; + long totalTemplatesSize = 0; + pstmt4 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" + + " AND path is NULL AND state not in ('Allocated') AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalVolumesSize = rsCount.getLong(1); + } + pstmt4 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalSnapshotsSize = rsCount.getLong(1); + } + pstmt4 = conn.prepareStatement("SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?" + + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalTemplatesSize = rsCount.getLong(1); + } + upgradeResourceCountforAccount(conn, account_id, domain_id, "secondary_storage", totalVolumesSize + totalSnapshotsSize + totalTemplatesSize); + } + // 4. upgrade cpu,memory,primary_storage,secondary_storage for domains + String resource_types[] = {"cpu","memory", "primary_storage", "secondary_storage"}; + pstmt5 = conn.prepareStatement("select id FROM `cloud`.`domain`"); + rsAccount = pstmt5.executeQuery(); + while (rsAccount.next()) { + long domain_id = rsAccount.getLong(1); + for(int count=0; count < resource_types.length; count++) { + String resource_type = resource_types[count]; + upgradeResourceCountforDomain(conn, domain_id, resource_type, 0L); // reset value to 0 before statistics + } + } + for(int count= 0; count < resource_types.length; count++) { + String resource_type = resource_types[count]; + pstmt5 = conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id " + + "where resource_count.type=? group by account.domain_id;"); + pstmt5.setString(1, resource_type); + rsCount = pstmt5.executeQuery(); + while (rsCount.next()) { + long domain_id = rsCount.getLong(1); + long resource_count = rsCount.getLong(2); + upgradeResourceCountforDomain(conn, domain_id, resource_type, resource_count); + } + } + s_logger.debug("upgradeResourceCount finish"); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (rsAccount != null) { + rsAccount.close(); + } + if (rsCount != null) { + rsCount.close(); + } + if (pstmt1 != null) { + pstmt1.close(); + } + if (pstmt2 != null) { + pstmt2.close(); + } + if (pstmt3 != null) { + pstmt3.close(); + } + if (pstmt4 != null) { + pstmt4.close(); + } + if (pstmt5 != null) { + pstmt5.close(); + } + } catch (SQLException e) { + } + } + } + + private static void upgradeResourceCountforAccount(Connection conn, Long account_id, Long domain_id, String type, Long resource_count) throws SQLException { + //update or insert into resource_count table. + PreparedStatement pstmt = null; + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?"); + pstmt.setLong(1, account_id); + pstmt.setString(2, type); + pstmt.setLong(3, resource_count); + pstmt.setLong(4, resource_count); + pstmt.executeUpdate(); + pstmt.close(); + } + + private static void upgradeResourceCountforDomain(Connection conn, Long domain_id, String type, Long resource_count) throws SQLException { + //update or insert into resource_count table. + PreparedStatement pstmt = null; + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?"); + pstmt.setLong(1, domain_id); + pstmt.setString(2, type); + pstmt.setLong(3, resource_count); + pstmt.setLong(4, resource_count); + pstmt.executeUpdate(); + pstmt.close(); + } } diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade420to421.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade420to421.java new file mode 100644 index 00000000000..27704e8739e --- /dev/null +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade420to421.java @@ -0,0 +1,217 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.upgrade.dao; + +import java.io.File; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.log4j.Logger; + +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.Script; + +public class Upgrade420to421 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade420to421.class); + + + @Override + public String[] getUpgradableVersionRange() { + return new String[] { "4.2.0", "4.2.1" }; + } + + @Override + public String getUpgradedVersion() { + return "4.2.1"; + } + + @Override + public boolean supportsRollingUpgrade() { + return false; + } + + @Override + public File[] getPrepareScripts() { + String script = Script.findScript("", "db/schema-420to421.sql"); + if (script == null) { + throw new CloudRuntimeException("Unable to find db/schema-420to421.sql"); + } + + return new File[] { new File(script) }; + } + + @Override + public File[] getCleanupScripts() { + return null; + } + + @Override + public void performDataMigration(Connection conn) { + upgradeResourceCount(conn); + } + + private void upgradeResourceCount(Connection conn) { + s_logger.debug("upgradeResourceCount start"); + PreparedStatement pstmt1 = null; + PreparedStatement pstmt2 = null; + PreparedStatement pstmt3 = null; + PreparedStatement pstmt4 = null; + PreparedStatement pstmt5 = null; + ResultSet rs = null; + ResultSet rsAccount = null; + ResultSet rsCount = null; + try { + pstmt1 = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL "); + rsAccount = pstmt1.executeQuery(); + while (rsAccount.next()) { + long account_id = rsAccount.getLong(1); + long domain_id = rsAccount.getLong(2); + // 1. update cpu,memory for all accounts + pstmt2 = conn.prepareStatement( "SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`" + + " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?" + " AND vm_instance.removed is NULL" + + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')"); + pstmt2.setLong(1, account_id); + rsCount = pstmt2.executeQuery(); + if (rsCount.next()) { + upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", rsCount.getLong(1)); + upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", rsCount.getLong(2)); + } else { + upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", 0L); + upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", 0L); + } + // 2. update primary_storage for all accounts + pstmt3 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" + + " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL" + + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')"); + pstmt3.setLong(1, account_id); + rsCount = pstmt3.executeQuery(); + if (rsCount.next()) { + upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", rsCount.getLong(1)); + } else { + upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", 0L); + } + // 3. update secondary_storage for all accounts + long totalVolumesSize = 0; + long totalSnapshotsSize = 0; + long totalTemplatesSize = 0; + pstmt4 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" + + " AND path is NULL AND state not in ('Allocated') AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalVolumesSize = rsCount.getLong(1); + } + pstmt4 = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalSnapshotsSize = rsCount.getLong(1); + } + pstmt4 = conn.prepareStatement("SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?" + + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL"); + pstmt4.setLong(1, account_id); + rsCount = pstmt4.executeQuery(); + if (rsCount.next()) { + totalTemplatesSize = rsCount.getLong(1); + } + upgradeResourceCountforAccount(conn, account_id, domain_id, "secondary_storage", totalVolumesSize + totalSnapshotsSize + totalTemplatesSize); + } + // 4. upgrade cpu,memory,primary_storage,secondary_storage for domains + String resource_types[] = {"cpu","memory", "primary_storage", "secondary_storage"}; + pstmt5 = conn.prepareStatement("select id FROM `cloud`.`domain`"); + rsAccount = pstmt5.executeQuery(); + while (rsAccount.next()) { + long domain_id = rsAccount.getLong(1); + for(int count=0; count < resource_types.length; count++) { + String resource_type = resource_types[count]; + upgradeResourceCountforDomain(conn, domain_id, resource_type, 0L); // reset value to 0 before statistics + } + } + for(int count= 0; count < resource_types.length; count++) { + String resource_type = resource_types[count]; + pstmt5 = conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id " + + "where resource_count.type=? group by account.domain_id;"); + pstmt5.setString(1, resource_type); + rsCount = pstmt5.executeQuery(); + while (rsCount.next()) { + long domain_id = rsCount.getLong(1); + long resource_count = rsCount.getLong(2); + upgradeResourceCountforDomain(conn, domain_id, resource_type, resource_count); + } + } + s_logger.debug("upgradeResourceCount finish"); + } catch (SQLException e) { + throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); + } finally { + try { + if (rs != null) { + rs.close(); + } + if (rsAccount != null) { + rsAccount.close(); + } + if (rsCount != null) { + rsCount.close(); + } + if (pstmt1 != null) { + pstmt1.close(); + } + if (pstmt2 != null) { + pstmt2.close(); + } + if (pstmt3 != null) { + pstmt3.close(); + } + if (pstmt4 != null) { + pstmt4.close(); + } + if (pstmt5 != null) { + pstmt5.close(); + } + } catch (SQLException e) { + } + } + } + + private static void upgradeResourceCountforAccount(Connection conn, Long account_id, Long domain_id, String type, Long resource_count) throws SQLException { + //update or insert into resource_count table. + PreparedStatement pstmt = null; + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?"); + pstmt.setLong(1, account_id); + pstmt.setString(2, type); + pstmt.setLong(3, resource_count); + pstmt.setLong(4, resource_count); + pstmt.executeUpdate(); + pstmt.close(); + } + + private static void upgradeResourceCountforDomain(Connection conn, Long domain_id, String type, Long resource_count) throws SQLException { + //update or insert into resource_count table. + PreparedStatement pstmt = null; + pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?"); + pstmt.setLong(1, domain_id); + pstmt.setString(2, type); + pstmt.setLong(3, resource_count); + pstmt.setLong(4, resource_count); + pstmt.executeUpdate(); + pstmt.close(); + } +} diff --git a/engine/schema/src/com/cloud/upgrade/dao/Upgrade420to430.java b/engine/schema/src/com/cloud/upgrade/dao/Upgrade421to430.java similarity index 95% rename from engine/schema/src/com/cloud/upgrade/dao/Upgrade420to430.java rename to engine/schema/src/com/cloud/upgrade/dao/Upgrade421to430.java index 0e9785578b5..791297c4e43 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/Upgrade420to430.java +++ b/engine/schema/src/com/cloud/upgrade/dao/Upgrade421to430.java @@ -32,12 +32,12 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -public class Upgrade420to430 implements DbUpgrade { - final static Logger s_logger = Logger.getLogger(Upgrade420to430.class); +public class Upgrade421to430 implements DbUpgrade { + final static Logger s_logger = Logger.getLogger(Upgrade421to430.class); @Override public String[] getUpgradableVersionRange() { - return new String[] { "4.2.0", "4.3.0" }; + return new String[] {"4.2.1", "4.3.0"}; } @Override @@ -52,9 +52,9 @@ public class Upgrade420to430 implements DbUpgrade { @Override public File[] getPrepareScripts() { - String script = Script.findScript("", "db/schema-420to430.sql"); + String script = Script.findScript("", "db/schema-421to430.sql"); if (script == null) { - throw new CloudRuntimeException("Unable to find db/schema-420to430.sql"); + throw new CloudRuntimeException("Unable to find db/schema-421to430.sql"); } return new File[] { new File(script) }; @@ -189,9 +189,9 @@ public class Upgrade420to430 implements DbUpgrade { @Override public File[] getCleanupScripts() { - String script = Script.findScript("", "db/schema-420to430-cleanup.sql"); + String script = Script.findScript("", "db/schema-421to430-cleanup.sql"); if (script == null) { - throw new CloudRuntimeException("Unable to find db/schema-420to430-cleanup.sql"); + throw new CloudRuntimeException("Unable to find db/schema-421to430-cleanup.sql"); } return new File[] { new File(script) }; diff --git a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java index 3b2ed7ea63b..12c99b2b6c8 100644 --- a/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java +++ b/engine/schema/src/com/cloud/upgrade/dao/VersionDaoImpl.java @@ -35,12 +35,12 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @Local(value = VersionDao.class) -@DB(txn = false) +@DB() public class VersionDaoImpl extends GenericDaoBase implements VersionDao { private static final Logger s_logger = Logger.getLogger(VersionDaoImpl.class); @@ -51,7 +51,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V super(); CurrentVersionSearch = createSearchBuilder(String.class); - CurrentVersionSearch.selectField(CurrentVersionSearch.entity().getVersion()); + CurrentVersionSearch.selectFields(CurrentVersionSearch.entity().getVersion()); CurrentVersionSearch.and("step", CurrentVersionSearch.entity().getStep(), Op.EQ); CurrentVersionSearch.done(); @@ -79,7 +79,7 @@ public class VersionDaoImpl extends GenericDaoBase implements V try { s_logger.debug("Checking to see if the database is at a version before it was the version table is created"); - conn = Transaction.getStandaloneConnection(); + conn = TransactionLegacy.getStandaloneConnection(); PreparedStatement pstmt = conn.prepareStatement("SHOW TABLES LIKE 'version'"); ResultSet rs = pstmt.executeQuery(); diff --git a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java index ce6d9e4a477..e1e843e9ed5 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageDaoImpl.java @@ -38,7 +38,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -74,7 +74,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void deleteRecordsForAccount(Long accountId) { String sql = ((accountId == null) ? DELETE_ALL : DELETE_ALL_BY_ACCOUNTID); - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -99,7 +99,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void saveAccounts(List accounts) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_ACCOUNT; @@ -133,7 +133,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void updateAccounts(List accounts) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = UPDATE_ACCOUNT; @@ -163,7 +163,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void saveUserStats(List userStats) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_USER_STATS; @@ -204,7 +204,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void updateUserStats(List userStats) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = UPDATE_USER_STATS; @@ -231,7 +231,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public Long getLastAccountId() { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_LAST_ACCOUNT; try { @@ -248,7 +248,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public Long getLastUserStatsId() { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_LAST_USER_STATS; try { @@ -265,7 +265,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public List listPublicTemplatesByAccount(long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_PUBLIC_TEMPLATES_BY_ACCOUNTID; List templateList = new ArrayList(); @@ -284,7 +284,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public Long getLastVmDiskStatsId() { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_LAST_VM_DISK_STATS; try { @@ -301,7 +301,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void updateVmDiskStats(List vmDiskStats) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = UPDATE_VM_DISK_STATS; @@ -335,7 +335,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void saveVmDiskStats(List vmDiskStats) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_VM_DISK_STATS; @@ -381,7 +381,7 @@ public class UsageDaoImpl extends GenericDaoBase implements Usage @Override public void saveUsageRecords(List usageRecords) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_USGAE_RECORDS; diff --git a/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java index 9af4a267097..00145530a9c 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageIPAddressDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.usage.UsageIPAddressVO; import com.cloud.user.Account; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageIPAddressDao.class}) @@ -56,7 +56,7 @@ public class UsageIPAddressDaoImpl extends GenericDaoBase implements @Override public long getLastJobSuccessDateMillis() { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = GET_LAST_JOB_SUCCESS_DATE_MILLIS; try { @@ -61,7 +61,7 @@ public class UsageJobDaoImpl extends GenericDaoBase implements @Override public void updateJobSuccess(Long jobId, long startMillis, long endMillis, long execTime, boolean success) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { txn.start(); @@ -115,7 +115,7 @@ public class UsageJobDaoImpl extends GenericDaoBase implements @Override public UsageJobVO isOwner(String hostname, int pid) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { if ((hostname == null) || (pid <= 0)) { return null; diff --git a/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java index fa632236b79..70b1764498b 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageLoadBalancerPolicyDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageLoadBalancerPolicyVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageLoadBalancerPolicyDao.class}) @@ -56,7 +56,7 @@ public class UsageLoadBalancerPolicyDaoImpl extends GenericDaoBase im @Override public Map getRecentNetworkStats() { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); String sql = SELECT_LATEST_STATS; PreparedStatement pstmt = null; try { @@ -85,7 +85,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im @Override public void deleteOldStats(long maxEventTime) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); String sql = DELETE_OLD_STATS; PreparedStatement pstmt = null; try { @@ -102,7 +102,7 @@ public class UsageNetworkDaoImpl extends GenericDaoBase im @Override public void saveUsageNetworks (List usageNetworks) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_USAGE_NETWORK; diff --git a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java index 4ed7c27d491..6fb03e7252d 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageNetworkOfferingDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageNetworkOfferingVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageNetworkOfferingDao.class}) @@ -55,7 +55,7 @@ public class UsageNetworkOfferingDaoImpl extends GenericDaoBase im } public void removeBy(long accountId, long volId, int storage_type) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -113,7 +113,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase im } public void update(UsageStorageVO usage) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -158,7 +158,7 @@ public class UsageStorageDaoImpl extends GenericDaoBase im sql += " LIMIT " + startIndex + ",500"; } - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java index fc827548781..ef313904956 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVMInstanceDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageVMInstanceVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageVMInstanceDao.class}) @@ -51,7 +51,7 @@ public class UsageVMInstanceDaoImpl extends GenericDaoBase getUsageRecords(long accountId, Date startDate, Date endDate) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; List usageInstances = new ArrayList(); try { diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java index 9f98bbf1be5..b9f5f8ce358 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVMSnapshotDaoImpl.java @@ -20,7 +20,7 @@ package com.cloud.usage.dao; import com.cloud.usage.UsageVMSnapshotVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -51,7 +51,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase usageRecords = new ArrayList(); String sql = GET_USAGE_RECORDS_BY_ACCOUNT; - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { @@ -130,7 +130,7 @@ public class UsageVMSnapshotDaoImpl extends GenericDaoBase usageRecords = new ArrayList(); String sql = PREVIOUS_QUERY; - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { int i = 1; diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVPNUserDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVPNUserDaoImpl.java index d6bf13b41bf..d8d14552cca 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVPNUserDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVPNUserDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageVPNUserVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageVPNUserDao.class}) @@ -55,7 +55,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase im public UsageVPNUserDaoImpl() {} public void update(UsageVPNUserVO usage) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -99,7 +99,7 @@ public class UsageVPNUserDaoImpl extends GenericDaoBase im sql += " LIMIT " + startIndex + ",500"; } - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java index 8436c5955c8..d2cb6730854 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVmDiskDaoImpl.java @@ -29,7 +29,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageVmDiskVO; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -51,7 +51,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl @Override public Map getRecentVmDiskStats() { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); String sql = SELECT_LATEST_STATS; PreparedStatement pstmt = null; try { @@ -89,7 +89,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl @Override public void deleteOldStats(long maxEventTime) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); String sql = DELETE_OLD_STATS; PreparedStatement pstmt = null; try { @@ -106,7 +106,7 @@ public class UsageVmDiskDaoImpl extends GenericDaoBase impl @Override public void saveUsageVmDisks(List usageVmDisks) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { txn.start(); String sql = INSERT_USAGE_VM_DISK; diff --git a/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java b/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java index 039d8f4be90..8bf95b63aa3 100644 --- a/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java +++ b/engine/schema/src/com/cloud/usage/dao/UsageVolumeDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.usage.UsageVolumeVO; import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UsageVolumeDao.class}) @@ -56,7 +56,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl public UsageVolumeDaoImpl() {} public void removeBy(long accountId, long volId) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -75,7 +75,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl } public void update(UsageVolumeVO usage) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { txn.start(); @@ -119,7 +119,7 @@ public class UsageVolumeDaoImpl extends GenericDaoBase impl sql += " LIMIT " + startIndex + ",500"; } - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); PreparedStatement pstmt = null; try { diff --git a/engine/schema/src/com/cloud/user/AccountDetailsDaoImpl.java b/engine/schema/src/com/cloud/user/AccountDetailsDaoImpl.java index 36fd5b4e605..9bdaff60309 100755 --- a/engine/schema/src/com/cloud/user/AccountDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/user/AccountDetailsDaoImpl.java @@ -27,12 +27,11 @@ import org.apache.cloudstack.framework.config.ConfigKey.Scope; import org.apache.cloudstack.framework.config.ScopedConfigStorage; import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Local(value = {AccountDetailsDao.class}) public class AccountDetailsDaoImpl extends GenericDaoBase implements AccountDetailsDao, ScopedConfigStorage { @@ -46,8 +45,8 @@ public class AccountDetailsDaoImpl extends GenericDaoBase @Override public Map findDetails(long accountId) { - SearchCriteriaService sc = SearchCriteria2.create(AccountDetailVO.class); - sc.addAnd(sc.getEntity().getAccountId(), Op.EQ, accountId); + QueryBuilder sc = QueryBuilder.create(AccountDetailVO.class); + sc.and(sc.entity().getAccountId(), Op.EQ, accountId); List results = sc.list(); Map details = new HashMap(results.size()); for (AccountDetailVO r : results) { @@ -58,7 +57,7 @@ public class AccountDetailsDaoImpl extends GenericDaoBase @Override public void persist(long accountId, Map details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = accountSearch.create(); sc.setParameters("accountId", accountId); @@ -72,9 +71,9 @@ public class AccountDetailsDaoImpl extends GenericDaoBase @Override public AccountDetailVO findDetail(long accountId, String name) { - SearchCriteriaService sc = SearchCriteria2.create(AccountDetailVO.class); - sc.addAnd(sc.getEntity().getAccountId(), Op.EQ, accountId); - sc.addAnd(sc.getEntity().getName(), Op.EQ, name); + QueryBuilder sc = QueryBuilder.create(AccountDetailVO.class); + sc.and(sc.entity().getAccountId(), Op.EQ, accountId); + sc.and(sc.entity().getName(), Op.EQ, name); return sc.find(); } diff --git a/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java b/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java index aa67e86bf70..91226e79846 100755 --- a/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java +++ b/engine/schema/src/com/cloud/user/dao/AccountDaoImpl.java @@ -39,7 +39,7 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={AccountDao.class}) @@ -96,7 +96,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A NonProjectAccountSearch.done(); AccountIdsSearch = createSearchBuilder(Long.class); - AccountIdsSearch.selectField(AccountIdsSearch.entity().getId()); + AccountIdsSearch.selectFields(AccountIdsSearch.entity().getId()); AccountIdsSearch.and("ids", AccountIdsSearch.entity().getDomainId(), Op.IN); AccountIdsSearch.done(); } @@ -124,7 +124,7 @@ public class AccountDaoImpl extends GenericDaoBase implements A @Override public Pair findUserAccountByApiKey(String apiKey) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; Pair userAcctPair = null; try { diff --git a/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java b/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java index 4a1a51c6d9f..ae21dfec16e 100644 --- a/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java +++ b/engine/schema/src/com/cloud/user/dao/UserStatisticsDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={UserStatisticsDao.class}) @@ -100,7 +100,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase userStats = new ArrayList(); if (minRemovedDate == null) return userStats; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { String sql = ACTIVE_AND_RECENTLY_DELETED_SEARCH + " LIMIT " + startIndex + "," + limit; PreparedStatement pstmt = null; @@ -120,7 +120,7 @@ public class UserStatisticsDaoImpl extends GenericDaoBase listUpdatedStats() { List userStats = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement pstmt = null; pstmt = txn.prepareAutoCloseStatement(UPDATED_STATS_SEARCH); diff --git a/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java index 02f3406c497..2a3d132eeaa 100644 --- a/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java +++ b/engine/schema/src/com/cloud/user/dao/VmDiskStatisticsDaoImpl.java @@ -33,7 +33,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={VmDiskStatisticsDao.class}) @@ -97,7 +97,7 @@ public class VmDiskStatisticsDaoImpl extends GenericDaoBase vmDiskStats = new ArrayList(); if (minRemovedDate == null) return vmDiskStats; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { String sql = ACTIVE_AND_RECENTLY_DELETED_SEARCH + " LIMIT " + startIndex + "," + limit; PreparedStatement pstmt = null; @@ -117,7 +117,7 @@ public class VmDiskStatisticsDaoImpl extends GenericDaoBase listUpdatedStats() { List vmDiskStats = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement pstmt = null; pstmt = txn.prepareAutoCloseStatement(UPDATED_VM_NETWORK_STATS_SEARCH); diff --git a/engine/schema/src/com/cloud/vm/NicDetailVO.java b/engine/schema/src/com/cloud/vm/NicDetailVO.java index 91499721e80..93d0d32fd5f 100644 --- a/engine/schema/src/com/cloud/vm/NicDetailVO.java +++ b/engine/schema/src/com/cloud/vm/NicDetailVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.vm; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,61 +23,58 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name="nic_details") -public class NicDetailVO implements InternalIdentity { +public class NicDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") private long id; @Column(name="nic_id") - private long nicId; + private long resourceId; @Column(name="name") private String name; @Column(name="value", length=1024) private String value; + + @Column(name="display") + private boolean display; public NicDetailVO() {} public NicDetailVO(long nicId, String name, String value) { - this.nicId = nicId; + this.resourceId = nicId; this.name = name; this.value = value; } + @Override public long getId() { return id; } - public long getNicId() { - return nicId; - } - + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - public void setId(long id) { - this.id = id; + @Override + public long getResourceId() { + return resourceId; } - public void setNicId(long nicId) { - this.nicId = nicId; + @Override + public boolean isDisplay() { + return display; } - - public void setName(String name) { - this.name = name; - } - - public void setValue(String value) { - this.value = value; - } - } diff --git a/engine/schema/src/com/cloud/vm/UserVmDetailVO.java b/engine/schema/src/com/cloud/vm/UserVmDetailVO.java index 245b577bc39..82bf32d49a6 100644 --- a/engine/schema/src/com/cloud/vm/UserVmDetailVO.java +++ b/engine/schema/src/com/cloud/vm/UserVmDetailVO.java @@ -16,8 +16,6 @@ // under the License. package com.cloud.vm; -import org.apache.cloudstack.api.InternalIdentity; - import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.GeneratedValue; @@ -25,16 +23,18 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name="user_vm_details") -public class UserVmDetailVO implements InternalIdentity { +public class UserVmDetailVO implements ResourceDetail { @Id @GeneratedValue(strategy=GenerationType.IDENTITY) @Column(name="id") private long id; @Column(name="vm_id") - private long vmId; + private long resourceId; @Column(name="name") private String name; @@ -42,44 +42,40 @@ public class UserVmDetailVO implements InternalIdentity { @Column(name="value", length=1024) private String value; + @Column(name="display") + private boolean display; + public UserVmDetailVO() {} public UserVmDetailVO(long vmId, String name, String value) { - this.vmId = vmId; + this.resourceId = vmId; this.name = name; this.value = value; } + @Override public long getId() { return id; } - public long getVmId() { - return vmId; - } - + @Override public String getName() { return name; } + @Override public String getValue() { return value; } - public void setId(long id) { - this.id = id; - } - - public void setVmId(long vmId) { - this.vmId = vmId; - } - - public void setName(String name) { - this.name = name; - } - - public void setValue(String value) { - this.value = value; - } + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } } diff --git a/engine/schema/src/com/cloud/vm/dao/ConsoleProxyDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/ConsoleProxyDaoImpl.java index 9af371eb3c8..4238f6a38fd 100644 --- a/engine/schema/src/com/cloud/vm/dao/ConsoleProxyDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/ConsoleProxyDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.VirtualMachine.State; @@ -162,7 +162,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ConsoleProxyVO proxy = createForUpdate(); proxy.setPublicIpAddress(null); @@ -227,7 +227,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im public List> getProxyLoadMatrix() { ArrayList> l = new ArrayList>(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(PROXY_ASSIGNMENT_MATRIX); @@ -245,7 +245,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im public List> getDatacenterStoragePoolHostInfo(long dcId, boolean countAllPoolTypes) { ArrayList> l = new ArrayList>(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { if(countAllPoolTypes) { @@ -267,7 +267,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im @Override public int getProxyStaticLoad(long proxyVmId) { - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(GET_PROXY_LOAD); @@ -285,7 +285,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im @Override public int getProxyActiveLoad(long proxyVmId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(GET_PROXY_ACTIVE_LOAD); @@ -304,7 +304,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im private List getDatacenterLoadMatrix(String sql) { ArrayList l = new ArrayList(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); @@ -325,7 +325,7 @@ public class ConsoleProxyDaoImpl extends GenericDaoBase im @Override public List getRunningProxyListByMsid(long msid) { List l = new ArrayList(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement( diff --git a/engine/schema/src/com/cloud/vm/dao/DomainRouterDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/DomainRouterDaoImpl.java index 65b9d3b27c7..7676e2daf8d 100755 --- a/engine/schema/src/com/cloud/vm/dao/DomainRouterDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/DomainRouterDaoImpl.java @@ -47,7 +47,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.VirtualMachine; @@ -143,7 +143,7 @@ public class DomainRouterDaoImpl extends GenericDaoBase im @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); DomainRouterVO router = createForUpdate(); router.setPublicIpAddress(null); @@ -307,7 +307,7 @@ public class DomainRouterDaoImpl extends GenericDaoBase im @Override @DB public DomainRouterVO persist(DomainRouterVO router, List guestNetworks) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); // 1) create network @@ -330,7 +330,7 @@ public class DomainRouterDaoImpl extends GenericDaoBase im if (_routerNetworkDao.findByRouterAndNetwork(router.getId(), guestNetwork.getId()) == null) { NetworkOffering off = _offDao.findById(guestNetwork.getNetworkOfferingId()); if (!(off.getName().equalsIgnoreCase(NetworkOffering.SystemPrivateGatewayNetworkOffering))) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); //1) add router to network RouterNetworkVO routerNtwkMap = new RouterNetworkVO(router.getId(), guestNetwork.getId(), guestNetwork.getGuestType()); diff --git a/engine/schema/src/com/cloud/vm/dao/NicDetailDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/NicDetailDaoImpl.java deleted file mode 100644 index e1668915245..00000000000 --- a/engine/schema/src/com/cloud/vm/dao/NicDetailDaoImpl.java +++ /dev/null @@ -1,110 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.vm.dao; - -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; -import com.cloud.vm.NicDetailVO; -import org.springframework.stereotype.Component; - -import javax.ejb.Local; -import java.util.List; -import java.util.Map; - -@Component -@Local (value={NicDetailDao.class}) -public class NicDetailDaoImpl extends GenericDaoBase implements NicDetailDao { - protected final SearchBuilder NicSearch; - protected final SearchBuilder DetailSearch; - - public NicDetailDaoImpl() { - NicSearch = createSearchBuilder(); - NicSearch.and("nicId", NicSearch.entity().getNicId(), SearchCriteria.Op.EQ); - NicSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("nicId", DetailSearch.entity().getNicId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - } - - @Override - public void deleteDetails(long nicId) { - SearchCriteria sc = NicSearch.create(); - sc.setParameters("nicId", nicId); - - List results = search(sc, null); - for (NicDetailVO result : results) { - remove(result.getId()); - } - } - - @Override - public NicDetailVO findDetail(long nicId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("nicId", nicId); - sc.setParameters("name", name); - - return findOneBy(sc); - } - - @Override - public List findDetails(long nicId) { - SearchCriteria sc = NicSearch.create(); - sc.setParameters("nicId", nicId); - - List results = search(sc, null); - /*Map details = new HashMap(results.size()); - for (NicDetailVO result : results) { - details.put(result.getName(), result.getValue()); - } */ - - return results; - } - - @Override - public void persist(long nicId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = NicSearch.create(); - sc.setParameters("nicId", nicId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - NicDetailVO vo = new NicDetailVO(nicId, detail.getKey(), detail.getValue()); - persist(vo); - } - txn.commit(); - } - - @Override - public void removeDetails(Long nicId, String key) { - - if(key != null){ - NicDetailVO detail = findDetail(nicId, key); - if(detail != null){ - remove(detail.getId()); - } - }else { - deleteDetails(nicId); - } - - } - -} diff --git a/engine/schema/src/com/cloud/vm/dao/NicDetailDao.java b/engine/schema/src/com/cloud/vm/dao/NicDetailsDao.java similarity index 71% rename from engine/schema/src/com/cloud/vm/dao/NicDetailDao.java rename to engine/schema/src/com/cloud/vm/dao/NicDetailsDao.java index 38eb2f2a3b1..2ca901fa3e5 100644 --- a/engine/schema/src/com/cloud/vm/dao/NicDetailDao.java +++ b/engine/schema/src/com/cloud/vm/dao/NicDetailsDao.java @@ -16,20 +16,10 @@ // under the License. package com.cloud.vm.dao; -import java.util.List; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.utils.db.GenericDao; import com.cloud.vm.NicDetailVO; -public interface NicDetailDao extends GenericDao { - List findDetails(long nicId); - - void persist(long nicId, Map details); - - NicDetailVO findDetail(long nicId, String name); - - void deleteDetails(long nicId); - - void removeDetails(Long id, String key); +public interface NicDetailsDao extends GenericDao, ResourceDetailsDao { } diff --git a/engine/schema/src/com/cloud/vm/dao/NicDetailsDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/NicDetailsDaoImpl.java new file mode 100644 index 00000000000..013c87b23d4 --- /dev/null +++ b/engine/schema/src/com/cloud/vm/dao/NicDetailsDaoImpl.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.vm.dao; + +import javax.ejb.Local; + +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + +import com.cloud.vm.NicDetailVO; + +@Component +@Local (value={NicDetailsDao.class}) +public class NicDetailsDaoImpl extends ResourceDetailsDaoBase implements NicDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new NicDetailVO(resourceId, key, value)); + } +} diff --git a/engine/schema/src/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java index f802a90d39f..2fcc3e7809d 100644 --- a/engine/schema/src/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/SecondaryStorageVmDaoImpl.java @@ -31,7 +31,7 @@ import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.vm.SecondaryStorageVm; import com.cloud.vm.SecondaryStorageVmVO; @@ -105,7 +105,7 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase getRunningSecStorageVmListByMsid(SecondaryStorageVm.Role role, long msid) { List l = new ArrayList(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { String sql; @@ -237,7 +237,7 @@ public class SecondaryStorageVmDaoImpl extends GenericDaoBase listRunningSecStorageOrderByLoad(SecondaryStorageVm.Role role, long zoneId) { List l = new ArrayList(); - Transaction txn = Transaction.currentTxn();; + TransactionLegacy txn = TransactionLegacy.currentTxn();; PreparedStatement pstmt = null; try { String sql; diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java index 174f28350d1..9d54b1ea3d8 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmCloneSettingDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.DB; @Component @Local(value= { UserVmCloneSettingDao.class }) -@DB(txn = false) +@DB() public class UserVmCloneSettingDaoImpl extends GenericDaoBase implements UserVmCloneSettingDao { public static final Logger s_logger = Logger.getLogger(UserVmCloneSettingDaoImpl.class); diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java index 8afce099394..2108069569f 100755 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDaoImpl.java @@ -31,14 +31,10 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; -import com.cloud.configuration.Resource; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.user.Account; - import com.cloud.utils.db.Attribute; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; @@ -46,9 +42,10 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicVO; +import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; @@ -335,22 +332,27 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use @Override public void loadDetails(UserVmVO vm) { - Map details = _detailsDao.findDetails(vm.getId()); + Map details = _detailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); } @Override public void saveDetails(UserVmVO vm) { - Map details = vm.getDetails(); - if (details == null) { + Map detailsStr = vm.getDetails(); + if (detailsStr == null) { return; } - _detailsDao.persist(vm.getId(), details); + List details = new ArrayList(); + for (String key : detailsStr.keySet()) { + details.add(new UserVmDetailVO(vm.getId(), key, detailsStr.get(key))); + } + + _detailsDao.saveDetails(details); } @Override public List listPodIdsHavingVmsforAccount(long zoneId, long accountId){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); @@ -374,7 +376,7 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use @Override public Hashtable listVmDetails(Hashtable userVmDataHash){ - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { @@ -575,9 +577,9 @@ public class UserVmDaoImpl extends GenericDaoBase implements Use @Override public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); - _tagsDao.removeByIdAndType(id, TaggedResourceType.UserVm); + _tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm); boolean result = super.remove(id); txn.commit(); return result; diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDao.java b/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDao.java index bdccec94ef0..c22da6b4ff5 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDao.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDao.java @@ -16,17 +16,10 @@ // under the License. package com.cloud.vm.dao; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; import com.cloud.utils.db.GenericDao; import com.cloud.vm.UserVmDetailVO; -public interface UserVmDetailsDao extends GenericDao { - Map findDetails(long vmId); - - void persist(long vmId, Map details); - - UserVmDetailVO findDetail(long vmId, String name); - - void deleteDetails(long vmId); +public interface UserVmDetailsDao extends GenericDao, ResourceDetailsDao { } diff --git a/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDaoImpl.java index 6ec6f68ada6..fad9ace7ff0 100644 --- a/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/UserVmDetailsDaoImpl.java @@ -16,84 +16,20 @@ // under the License. package com.cloud.vm.dao; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.springframework.stereotype.Component; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.vm.UserVmDetailVO; @Component @Local(value=UserVmDetailsDao.class) -public class UserVmDetailsDaoImpl extends GenericDaoBase implements UserVmDetailsDao { - protected final SearchBuilder VmSearch; - protected final SearchBuilder DetailSearch; +public class UserVmDetailsDaoImpl extends ResourceDetailsDaoBase implements UserVmDetailsDao { - public UserVmDetailsDaoImpl() { - VmSearch = createSearchBuilder(); - VmSearch.and("vmId", VmSearch.entity().getVmId(), SearchCriteria.Op.EQ); - VmSearch.done(); - - DetailSearch = createSearchBuilder(); - DetailSearch.and("vmId", DetailSearch.entity().getVmId(), SearchCriteria.Op.EQ); - DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); - DetailSearch.done(); - } + @Override + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new UserVmDetailVO(resourceId, key, value)); + } - @Override - public void deleteDetails(long vmId) { - SearchCriteria sc = VmSearch.create(); - sc.setParameters("vmId", vmId); - - List results = search(sc, null); - for (UserVmDetailVO result : results) { - remove(result.getId()); - } - } - - @Override - public UserVmDetailVO findDetail(long vmId, String name) { - SearchCriteria sc = DetailSearch.create(); - sc.setParameters("vmId", vmId); - sc.setParameters("name", name); - - return findOneBy(sc); - } - - @Override - public Map findDetails(long vmId) { - SearchCriteria sc = VmSearch.create(); - sc.setParameters("vmId", vmId); - - List results = search(sc, null); - Map details = new HashMap(results.size()); - for (UserVmDetailVO result : results) { - details.put(result.getName(), result.getValue()); - } - - return details; - } - - @Override - public void persist(long vmId, Map details) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchCriteria sc = VmSearch.create(); - sc.setParameters("vmId", vmId); - expunge(sc); - - for (Map.Entry detail : details.entrySet()) { - UserVmDetailVO vo = new UserVmDetailVO(vmId, detail.getKey(), detail.getValue()); - persist(vo); - } - txn.commit(); - } - } diff --git a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java index 3a7dde78a6d..2c62376d642 100644 --- a/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -35,7 +35,7 @@ import org.springframework.stereotype.Component; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.utils.Pair; import com.cloud.utils.db.Attribute; @@ -48,7 +48,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicVO; @@ -198,7 +198,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem HostNameSearch.done(); FindIdsOfVirtualRoutersByAccount = createSearchBuilder(Long.class); - FindIdsOfVirtualRoutersByAccount.selectField(FindIdsOfVirtualRoutersByAccount.entity().getId()); + FindIdsOfVirtualRoutersByAccount.selectFields(FindIdsOfVirtualRoutersByAccount.entity().getId()); FindIdsOfVirtualRoutersByAccount.and("account", FindIdsOfVirtualRoutersByAccount.entity().getAccountId(), SearchCriteria.Op.EQ); FindIdsOfVirtualRoutersByAccount.and("type", FindIdsOfVirtualRoutersByAccount.entity().getType(), SearchCriteria.Op.EQ); FindIdsOfVirtualRoutersByAccount.and("state", FindIdsOfVirtualRoutersByAccount.entity().getState(), SearchCriteria.Op.NIN); @@ -233,7 +233,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem nicSearch.and("networkId", nicSearch.entity().getNetworkId(), SearchCriteria.Op.EQ); DistinctHostNameSearch = createSearchBuilder(String.class); - DistinctHostNameSearch.selectField(DistinctHostNameSearch.entity().getHostName()); + DistinctHostNameSearch.selectFields(DistinctHostNameSearch.entity().getHostName()); DistinctHostNameSearch.and("types", DistinctHostNameSearch.entity().getType(), SearchCriteria.Op.IN); DistinctHostNameSearch.and("removed", DistinctHostNameSearch.entity().getRemoved(), SearchCriteria.Op.NULL); @@ -484,7 +484,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override public Pair, Map> listClusterIdsInZoneByVmCount(long zoneId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map clusterVmCountMap = new HashMap(); @@ -513,7 +513,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override public Pair, Map> listClusterIdsInPodByVmCount(long podId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map clusterVmCountMap = new HashMap(); @@ -543,7 +543,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override public Pair, Map> listPodIdsInZoneByVmCount(long dataCenterId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); Map podVmCountMap = new HashMap(); @@ -569,7 +569,7 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override public List listHostIdsByVmCount(long dcId, Long podId, Long clusterId, long accountId) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; List result = new ArrayList(); try { @@ -654,11 +654,11 @@ public class VMInstanceDaoImpl extends GenericDaoBase implem @Override @DB public boolean remove(Long id) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VMInstanceVO vm = findById(id); if (vm != null && vm.getType() == Type.User) { - _tagsDao.removeByIdAndType(id, TaggedResourceType.UserVm); + _tagsDao.removeByIdAndType(id, ResourceObjectType.UserVm); } boolean result = super.remove(id); txn.commit(); diff --git a/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotDetailsVO.java b/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotDetailsVO.java new file mode 100644 index 00000000000..934dd92cca3 --- /dev/null +++ b/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotDetailsVO.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.vm.snapshot; + +import org.apache.cloudstack.api.InternalIdentity; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; +import javax.persistence.TableGenerator; + +@Entity +@Table(name = "vm_snapshot_details") +public class VMSnapshotDetailsVO implements InternalIdentity { + @Id + @TableGenerator(name = "vm_snapshot_details_seq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshot_details_seq", allocationSize = 1) + @GeneratedValue(strategy = GenerationType.TABLE) + @Column(name = "id") + private long id; + + @Column(name = "vm_snapshot_id") + Long vmSnapshotId; + + @Column(name = "name") + String name; + + @Column(name = "value") + String value; + + public VMSnapshotDetailsVO() { + + } + + public VMSnapshotDetailsVO(Long vmSnapshotId, String name, String value) { + this.vmSnapshotId = vmSnapshotId; + this.name = name; + this.value = value; + } + + public Long getVmSnapshotId() { + return this.vmSnapshotId; + } + + public void setVmSnapshotId(Long vmSnapshotId) { + this.vmSnapshotId = vmSnapshotId; + } + + public String getName() { + return this.name; + } + + public void setName(String name) { + this.name = name; + } + + public String getValue() { + return this.value; + } + + public void setValue(String value) { + this.value = value; + } + + @Override + public long getId() { + return id; + } +} diff --git a/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotVO.java b/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotVO.java index 03d4945fda0..477148cfa3b 100644 --- a/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotVO.java +++ b/engine/schema/src/com/cloud/vm/snapshot/VMSnapshotVO.java @@ -41,7 +41,7 @@ public class VMSnapshotVO implements VMSnapshot { @TableGenerator(name = "vm_snapshots_sq", table = "sequence", pkColumnName = "name", valueColumnName = "value", pkColumnValue = "vm_snapshots_seq", allocationSize = 1) @GeneratedValue(strategy = GenerationType.TABLE) @Column(name = "id") - long id; + Long id; @Column(name = "uuid") String uuid = UUID.randomUUID().toString(); diff --git a/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDao.java b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDao.java new file mode 100644 index 00000000000..e84178cc4fd --- /dev/null +++ b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDao.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.vm.snapshot.dao; + +import com.cloud.utils.db.GenericDao; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; + +import java.util.Map; + +public interface VMSnapshotDetailsDao extends GenericDao { + Map getDetails(Long vmSnapshotId); +} diff --git a/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDaoImpl.java b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDaoImpl.java new file mode 100644 index 00000000000..b528b39290f --- /dev/null +++ b/engine/schema/src/com/cloud/vm/snapshot/dao/VMSnapshotDetailsDaoImpl.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.vm.snapshot.dao; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class VMSnapshotDetailsDaoImpl extends GenericDaoBase implements VMSnapshotDetailsDao { + protected final SearchBuilder searchDetails; + + protected VMSnapshotDetailsDaoImpl() { + super(); + searchDetails = createSearchBuilder(); + searchDetails.and("vmsnapshotId", searchDetails.entity().getVmSnapshotId(), SearchCriteria.Op.EQ); + searchDetails.done(); + } + @Override + public Map getDetails(Long vmSnapshotId) { + SearchCriteria sc = searchDetails.create(); + sc.setParameters("vmsnapshotId", vmSnapshotId); + + List details = listBy(sc); + Map detailsMap = new HashMap(); + for (VMSnapshotDetailsVO detail : details) { + detailsMap.put(detail.getName(), detail.getValue()); + } + + return detailsMap; + } +} diff --git a/engine/schema/src/org/apache/cloudstack/affinity/dao/AffinityGroupVMMapDaoImpl.java b/engine/schema/src/org/apache/cloudstack/affinity/dao/AffinityGroupVMMapDaoImpl.java index 89dfa5aff45..c2d48d42366 100644 --- a/engine/schema/src/org/apache/cloudstack/affinity/dao/AffinityGroupVMMapDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/affinity/dao/AffinityGroupVMMapDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Local(value = { AffinityGroupVMMapDao.class }) public class AffinityGroupVMMapDaoImpl extends GenericDaoBase implements @@ -56,7 +56,7 @@ public class AffinityGroupVMMapDaoImpl extends GenericDaoBase affinityGroupIds) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = createSearchCriteria(); diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMComputeTagDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMComputeTagDaoImpl.java index 6f70b353176..54a557bfcb7 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMComputeTagDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMComputeTagDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { VMComputeTagDao.class }) @@ -52,7 +52,7 @@ public class VMComputeTagDaoImpl extends GenericDaoBase im @Override public void persist(long vmId, List computeTags) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = VmIdSearch.create(); diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java index 3082d21c1ee..fa2ce81a657 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMEntityDaoImpl.java @@ -37,7 +37,7 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.vm.NicProfile; @@ -78,7 +78,7 @@ public class VMEntityDaoImpl extends GenericDaoBase implements @Override @DB public VMEntityVO persist(VMEntityVO vm) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VMEntityVO dbVO = super.persist(vm); diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMNetworkMapDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMNetworkMapDaoImpl.java index 0f2c4ccb77e..f86e788ea1a 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMNetworkMapDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMNetworkMapDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { VMNetworkMapDao.class }) @@ -52,7 +52,7 @@ public class VMNetworkMapDaoImpl extends GenericDaoBase im @Override public void persist(long vmId, List networks) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = VmIdSearch.create(); diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMReservationDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMReservationDaoImpl.java index 73b4dd2194c..31ed5b8650b 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMReservationDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMReservationDaoImpl.java @@ -35,7 +35,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { VMReservationDao.class }) @@ -80,7 +80,7 @@ public class VMReservationDaoImpl extends GenericDaoBase @Override @DB public VMReservationVO persist(VMReservationVO reservation) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); VMReservationVO dbVO = super.persist(reservation); diff --git a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMRootDiskTagDaoImpl.java b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMRootDiskTagDaoImpl.java index be194bbfcaa..cd06b202b7b 100644 --- a/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMRootDiskTagDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/engine/cloud/entity/api/db/dao/VMRootDiskTagDaoImpl.java @@ -31,7 +31,7 @@ import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { VMRootDiskTagDao.class }) @@ -52,7 +52,7 @@ public class VMRootDiskTagDaoImpl extends GenericDaoBase @Override public void persist(long vmId, List rootDiskTags) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); SearchCriteria sc = VmIdSearch.create(); diff --git a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDao.java b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDao.java index 91b51d3763e..c20735e1e33 100644 --- a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDao.java +++ b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDao.java @@ -23,5 +23,5 @@ import com.cloud.utils.db.GenericDao; public interface RegionDao extends GenericDao { RegionVO findByName(String name); - + int getRegionId(); } diff --git a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java index 8f50f939e41..4a636a67d06 100644 --- a/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/region/dao/RegionDaoImpl.java @@ -45,4 +45,9 @@ public class RegionDaoImpl extends GenericDaoBase implements sc.setParameters("name", name); return findOneBy(sc); } + + @Override + public int getRegionId(){ + return 1; + } } diff --git a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java index 2a6e72b6913..421343cbd5b 100644 --- a/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancerLbRuleMapDaoImpl.java @@ -28,7 +28,7 @@ import java.util.List; @Component @Local(value={GlobalLoadBalancerLbRuleMapDao.class}) -@DB(txn = false) +@DB() public class GlobalLoadBalancerLbRuleMapDaoImpl extends GenericDaoBase implements GlobalLoadBalancerLbRuleMapDao { private final SearchBuilder listByGslbRuleId; diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java new file mode 100644 index 00000000000..88d2b0096bb --- /dev/null +++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/FirewallRuleDetailVO.java @@ -0,0 +1,80 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name="firewall_rule_details") +public class FirewallRuleDetailVO implements ResourceDetail{ + @Id + @GeneratedValue(strategy= GenerationType.IDENTITY) + @Column(name="id") + private long id; + + @Column(name="firewall_rule_id") + private long resourceId; + + @Column(name="name") + private String name; + + @Column(name="value", length=1024) + private String value; + + @Column(name="display") + private boolean display; + + public FirewallRuleDetailVO() {} + + public FirewallRuleDetailVO(long networkId, String name, String value) { + this.resourceId = networkId; + this.name = name; + this.value = value; + } + + @Override + public long getId() { + return id; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getResourceId() { + return resourceId; + } + + @Override + public boolean isDisplay() { + return display; + } +} diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java new file mode 100644 index 00000000000..46895439fde --- /dev/null +++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDao.java @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.ResourceDetail; + +import com.cloud.utils.db.GenericDao; + +public interface ResourceDetailsDao extends GenericDao{ + /** + * Finds detail by resourceId and key + * @param resourceId + * @param name + * @return + */ + public R findDetail(long resourceId, String name); + + /** + * Removes all details for the resource specified + * @param resourceId + */ + public void removeDetails(long resourceId); + + /** + * Removes detail having resourceId and key specified (unique combination) + * @param resourceId + * @param key + */ + public void removeDetail(long resourceId, String key); + + /** + * Lists all details for the resourceId + * @param resourceId + * @return list of details each implementing ResourceDetail interface + */ + public List listDetails(long resourceId); + + /** + * List details for resourceId having display field = forDisplay value passed in + * @param resourceId + * @param forDisplay + * @return + */ + public List listDetails(long resourceId, boolean forDisplay); + + public Map listDetailsKeyPairs(long resourceId); + + public Map listDetailsKeyPairs(long resourceId, boolean forDisplay); + + public void saveDetails(List details); + + public void addDetail(long resourceId, String key, String value); + +} diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java new file mode 100644 index 00000000000..4ecebf85f03 --- /dev/null +++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/ResourceDetailsDaoBase.java @@ -0,0 +1,139 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.api.ResourceDetail; + +import com.cloud.utils.db.GenericDaoBase; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; + + +public abstract class ResourceDetailsDaoBase extends GenericDaoBase{ + private SearchBuilder AllFieldsSearch; + + public ResourceDetailsDaoBase() { + AllFieldsSearch = createSearchBuilder(); + AllFieldsSearch.and("resourceId", AllFieldsSearch.entity().getResourceId(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("name", AllFieldsSearch.entity().getName(), SearchCriteria.Op.EQ); + AllFieldsSearch.and("display", AllFieldsSearch.entity().isDisplay(), SearchCriteria.Op.EQ); + AllFieldsSearch.done(); + } + + public R findDetail(long resourceId, String name) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("name", name); + + return findOneBy(sc); + } + + + public Map listDetailsKeyPairs(long resourceId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + + List results = search(sc, null); + Map details = new HashMap(results.size()); + for (R result : results) { + details.put(result.getName(), result.getValue()); + } + return details; + } + + public List listDetails(long resourceId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + + List results = search(sc, null); + return results; + } + + + public void removeDetails(long resourceId) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + remove(sc); + } + + + public void removeDetail(long resourceId, String key) { + if (key != null){ + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("name", key); + remove(sc); + } + } + + + public void saveDetails(List details) { + if (details.isEmpty()) { + return; + } + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", details.get(0).getResourceId()); + expunge(sc); + + for (R detail : details) { + persist(detail); + } + + txn.commit(); + } + + + protected void addDetail(R detail) { + if (detail == null) { + return; + } + R existingDetail = findDetail(detail.getResourceId(), detail.getName()); + if (existingDetail != null) { + remove(existingDetail.getId()); + } + persist(detail); + } + + public Map listDetailsKeyPairs(long resourceId, boolean forDisplay) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("display", forDisplay); + + List results = search(sc, null); + Map details = new HashMap(results.size()); + for (R result : results) { + details.put(result.getName(), result.getValue()); + } + return details; + } + + + public List listDetails(long resourceId, boolean forDisplay) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("resourceId", resourceId); + sc.setParameters("display", forDisplay); + + List results = search(sc, null); + return results; + } +} diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDao.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDao.java new file mode 100644 index 00000000000..9931aaa1a19 --- /dev/null +++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDao.java @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail.dao; + +import org.apache.cloudstack.resourcedetail.FirewallRuleDetailVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; + +import com.cloud.utils.db.GenericDao; + +public interface FirewallRuleDetailsDao extends GenericDao, ResourceDetailsDao{ + +} diff --git a/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDaoImpl.java b/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDaoImpl.java new file mode 100644 index 00000000000..35613d6a52c --- /dev/null +++ b/engine/schema/src/org/apache/cloudstack/resourcedetail/dao/FirewallRuleDetailsDaoImpl.java @@ -0,0 +1,34 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.resourcedetail.dao; + +import javax.ejb.Local; + +import org.apache.cloudstack.resourcedetail.FirewallRuleDetailVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + + +@Component +@Local (value={FirewallRuleDetailsDao.class}) +public class FirewallRuleDetailsDaoImpl extends ResourceDetailsDaoBase implements FirewallRuleDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new FirewallRuleDetailVO(resourceId, key, value)); + } +} diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index f95e66cd498..d0f8fe87850 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -36,4 +36,6 @@ public interface ImageStoreDao extends GenericDao { List findImageCacheByScope(ZoneScope scope); List listImageStores(); + + List listImageCacheStores(); } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java index b39f8444c35..d35aa440f74 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDaoImpl.java @@ -28,6 +28,8 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.springframework.stereotype.Component; + import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.ScopeType; @@ -35,17 +37,17 @@ import com.cloud.storage.StoragePoolStatus; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; + @Local(value = { PrimaryDataStoreDao.class }) -@DB(txn = false) +@DB() public class PrimaryDataStoreDaoImpl extends GenericDaoBase implements PrimaryDataStoreDao { protected final SearchBuilder AllFieldSearch; protected final SearchBuilder DcPodSearch; @@ -53,8 +55,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase protected final SearchBuilder DeleteLvmSearch; protected final GenericSearchBuilder StatusCountSearch; - @Inject - protected StoragePoolDetailsDao _detailsDao; + @Inject protected StoragePoolDetailsDao _detailsDao; private final String DetailsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and ("; private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?"; @@ -227,7 +228,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase @Override @DB public StoragePoolVO persist(StoragePoolVO pool, Map details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); pool = super.persist(pool); if (details != null) { @@ -255,7 +256,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } sql.delete(sql.length() - 4, sql.length()); sql.append(DetailsSqlSuffix); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); @@ -316,10 +317,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase public List findZoneWideStoragePoolsByTags(long dcId, String[] tags) { List storagePools = null; if (tags == null || tags.length == 0) { - SearchCriteriaService sc = SearchCriteria2.create(StoragePoolVO.class); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE); + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); + sc.and(sc.entity().getStatus(), Op.EQ,Status.Up); + sc.and(sc.entity().getScope(), Op.EQ,ScopeType.ZONE); return sc.list(); } else { Map details = tagsToDetails(tags); @@ -332,7 +333,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase } sql.delete(sql.length() - 4, sql.length()); sql.append(ZoneWideDetailsSqlSuffix); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); @@ -358,7 +359,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase StringBuilder sql = new StringBuilder(FindPoolTagDetails); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); @@ -381,13 +382,17 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase @Override public void updateDetails(long poolId, Map details) { if (details != null) { - _detailsDao.update(poolId, details); + List detailsVO = new ArrayList(); + for (String key : details.keySet()) { + detailsVO.add(new StoragePoolDetailVO(poolId, key, details.get(key))); + } + _detailsDao.saveDetails(detailsVO); } } @Override public Map getDetails(long poolId) { - return _detailsDao.getDetails(poolId); + return _detailsDao.listDetailsKeyPairs(poolId); } @Override @@ -421,11 +426,11 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase @Override public List findZoneWideStoragePoolsByHypervisor(long dataCenterId, HypervisorType hypervisorType) { - SearchCriteriaService sc = SearchCriteria2.create(StoragePoolVO.class); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dataCenterId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - sc.addAnd(sc.getEntity().getScope(), Op.EQ, ScopeType.ZONE); - sc.addAnd(sc.getEntity().getHypervisor(), Op.EQ, hypervisorType); + QueryBuilder sc = QueryBuilder.create(StoragePoolVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dataCenterId); + sc.and(sc.entity().getStatus(), Op.EQ,Status.Up); + sc.and(sc.entity().getScope(), Op.EQ,ScopeType.ZONE); + sc.and(sc.entity().getHypervisor(), Op.EQ,hypervisorType); return sc.list(); } } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java index 0d9af4b5fb3..68da2e222b1 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailVO.java @@ -23,57 +23,59 @@ import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.ResourceDetail; + @Entity @Table(name = "storage_pool_details") -public class PrimaryDataStoreDetailVO { +public class PrimaryDataStoreDetailVO implements ResourceDetail{ @Id @GeneratedValue(strategy = GenerationType.IDENTITY) @Column(name = "id") long id; @Column(name = "pool_id") - long poolId; + long resourceId; @Column(name = "name") String name; @Column(name = "value") String value; + + @Column(name="display") + private boolean display; public PrimaryDataStoreDetailVO(long poolId, String name, String value) { - this.poolId = poolId; + this.resourceId = poolId; this.name = name; this.value = value; } + + protected PrimaryDataStoreDetailVO() { + } + @Override public long getId() { return id; } - public long getPoolId() { - return poolId; - } - - public void setPoolId(long poolId) { - this.poolId = poolId; + @Override + public long getResourceId() { + return resourceId; } + @Override public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - + @Override public String getValue() { return value; } - - public void setValue(String value) { - this.value = value; - } - - protected PrimaryDataStoreDetailVO() { + + @Override + public boolean isDisplay() { + return display; } } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java index 18e2f1c7018..5bab4602624 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/PrimaryDataStoreDetailsDao.java @@ -16,13 +16,7 @@ // under the License. package org.apache.cloudstack.storage.datastore.db; -import java.util.Map; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; -import com.cloud.utils.db.GenericDao; - -public interface PrimaryDataStoreDetailsDao extends GenericDao { - - void update(long poolId, Map details); - - Map getDetails(long poolId); +public interface PrimaryDataStoreDetailsDao extends ResourceDetailsDao { } \ No newline at end of file diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index f9037150c93..dfa03adf29b 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -40,4 +40,14 @@ StateDao listDestroyed(long storeId); + List findBySnapshotId(long snapshotId); + + void duplicateCacheRecordsOnRegionStore(long storeId); + + // delete the snapshot entry on primary data store to make sure that next snapshot will be full snapshot + void deleteSnapshotRecordsOnPrimary(); + + List listOnCache(long snapshotId); + + void updateStoreRoleToCache(long storeId); } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java index 0fe5e088043..db86c3f24b7 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreVO.java @@ -34,7 +34,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import com.cloud.storage.DataStoreRole; -import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.fsm.StateObject; @@ -98,6 +97,7 @@ public class SnapshotDataStoreVO implements StateObject { - - void update(long poolId, Map details); - - Map getDetails(long poolId); - - StoragePoolDetailVO findDetail(long poolId, String name); +public interface StoragePoolDetailsDao extends GenericDao, ResourceDetailsDao { } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java index 9350751ec63..93adaaf5c8b 100644 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java @@ -18,9 +18,9 @@ package org.apache.cloudstack.storage.datastore.db; import java.util.List; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStorageResourceAssoc; @@ -62,4 +62,12 @@ StateDao listByTemplate(long templateId); + + void duplicateCacheRecordsOnRegionStore(long storeId); + + TemplateDataStoreVO findReadyOnCache(long templateId); + + List listOnCache(long templateId); + + void updateStoreRoleToCachce(long storeId); } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java index a890e4b337b..a3696d8d75f 100755 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreVO.java @@ -35,7 +35,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; - import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.fsm.StateObject; @@ -117,17 +116,17 @@ public class TemplateDataStoreVO implements StateObject listDestroyed(long storeId); + + void duplicateCacheRecordsOnRegionStore(long storeId); } diff --git a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java index e11071b702e..e34d4a60998 100755 --- a/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java +++ b/engine/schema/src/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java @@ -33,8 +33,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; -import com.cloud.storage.Storage; -import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.fsm.StateObject; @@ -117,6 +115,7 @@ public class VolumeDataStoreVO implements StateObject - +--> - - +--> org.springframework.web.util.Log4jConfigListener + CXF Servlet CXFServlet - CXF Servlet org.apache.cxf.transport.servlet.CXFServlet diff --git a/engine/storage/cache/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml b/engine/storage/cache/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml new file mode 100644 index 00000000000..7b8e2a4ab2c --- /dev/null +++ b/engine/storage/cache/resources/META-INF/cloudstack/core/spring-engine-storage-cache-core-context.xml @@ -0,0 +1,38 @@ + + + + + + + + + diff --git a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java index 083b7c1bf15..bb8d67d321f 100644 --- a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java +++ b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheManagerImpl.java @@ -18,36 +18,48 @@ */ package org.apache.cloudstack.storage.cache.manager; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.cloudstack.storage.cache.allocator.StorageCacheAllocator; +import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; + import com.cloud.configuration.Config; import com.cloud.storage.DataStoreRole; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.Manager; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; -import org.apache.cloudstack.engine.subsystem.api.storage.*; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.cache.allocator.StorageCacheAllocator; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; - -import org.apache.log4j.Logger; - -import javax.inject.Inject; -import javax.naming.ConfigurationException; - -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - public class StorageCacheManagerImpl implements StorageCacheManager, Manager { private static final Logger s_logger = Logger.getLogger(StorageCacheManagerImpl.class); @Inject @@ -79,8 +91,8 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager { } protected List getCacheStores() { - SearchCriteriaService sc = SearchCriteria2.create(ImageStoreVO.class); - sc.addAnd(sc.getEntity().getRole(), SearchCriteria.Op.EQ, DataStoreRole.ImageCache); + QueryBuilder sc = QueryBuilder.create(ImageStoreVO.class); + sc.and(sc.entity().getRole(), SearchCriteria.Op.EQ,DataStoreRole.ImageCache); List imageStoreVOs = sc.list(); List stores = new ArrayList(); for (ImageStoreVO vo : imageStoreVOs) { @@ -134,10 +146,10 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager { return true; } - protected class CacheReplacementRunner implements Runnable { + protected class CacheReplacementRunner extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { GlobalLock replacementLock = null; try { replacementLock = GlobalLock.getInternLock("storageCacheMgr.replacement"); @@ -232,7 +244,7 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager { @Override public DataObject createCacheObject(DataObject data, Scope scope) { - DataStore cacheStore = this.getCacheStorage(scope); + DataStore cacheStore = getCacheStorage(scope); if (cacheStore == null) { @@ -244,7 +256,7 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager { @Override public DataObject getCacheObject(DataObject data, Scope scope) { - DataStore cacheStore = this.getCacheStorage(scope); + DataStore cacheStore = getCacheStorage(scope); DataObject objOnCacheStore = cacheStore.create(data); objOnCacheStore.incRefCount(); return objOnCacheStore; @@ -260,4 +272,4 @@ public class StorageCacheManagerImpl implements StorageCacheManager, Manager { public boolean deleteCacheObject(DataObject data) { return data.getDataStore().delete(data); } -} \ No newline at end of file +} diff --git a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheReplacementAlgorithmLRU.java b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheReplacementAlgorithmLRU.java index 424a8fb73e7..ed1fc1aa9aa 100644 --- a/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheReplacementAlgorithmLRU.java +++ b/engine/storage/cache/src/org/apache/cloudstack/storage/cache/manager/StorageCacheReplacementAlgorithmLRU.java @@ -17,27 +17,29 @@ * under the License. */ package org.apache.cloudstack.storage.cache.manager; -import com.cloud.configuration.Config; -import com.cloud.utils.DateUtil; -import com.cloud.utils.NumbersUtil; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; - -import org.apache.cloudstack.engine.subsystem.api.storage.*; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; -import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; - -import org.apache.commons.lang.math.NumberUtils; - import java.util.Calendar; import java.util.Date; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; + +import com.cloud.configuration.Config; +import com.cloud.utils.DateUtil; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.SearchCriteria; + public class StorageCacheReplacementAlgorithmLRU implements StorageCacheReplacementAlgorithm { @@ -58,7 +60,10 @@ public class StorageCacheReplacementAlgorithmLRU implements StorageCacheReplacem @PostConstruct public void initialize() { - unusedTimeInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementLRUTimeInterval.key()), 30); + /* Avoid using configDao at this time, we can't be sure that the database is already upgraded + * and there might be fatal errors when using a dao. + */ + //unusedTimeInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementLRUTimeInterval.key()), 30); } public void setUnusedTimeInterval(Integer interval) { @@ -67,38 +72,41 @@ public class StorageCacheReplacementAlgorithmLRU implements StorageCacheReplacem @Override public DataObject chooseOneToBeReplaced(DataStore store) { + if (unusedTimeInterval == null) { + unusedTimeInterval = NumbersUtil.parseInt(configDao.getValue(Config.StorageCacheReplacementLRUTimeInterval.key()), 30); + } Calendar cal = Calendar.getInstance(); cal.setTime(DateUtil.now()); cal.add(Calendar.DAY_OF_MONTH, -unusedTimeInterval.intValue()); Date bef = cal.getTime(); - SearchCriteriaService sc = SearchCriteria2.create(TemplateDataStoreVO.class); - sc.addAnd(sc.getEntity().getLastUpdated(), SearchCriteria.Op.LT, bef); - sc.addAnd(sc.getEntity().getState(), SearchCriteria.Op.EQ, ObjectInDataStoreStateMachine.State.Ready); - sc.addAnd(sc.getEntity().getDataStoreId(), SearchCriteria.Op.EQ, store.getId()); - sc.addAnd(sc.getEntity().getDataStoreRole(), SearchCriteria.Op.EQ, store.getRole()); - sc.addAnd(sc.getEntity().getRefCnt(), SearchCriteria.Op.EQ, 0); + QueryBuilder sc = QueryBuilder.create(TemplateDataStoreVO.class); + sc.and(sc.entity().getLastUpdated(), SearchCriteria.Op.LT, bef); + sc.and(sc.entity().getState(), SearchCriteria.Op.EQ,ObjectInDataStoreStateMachine.State.Ready); + sc.and(sc.entity().getDataStoreId(), SearchCriteria.Op.EQ,store.getId()); + sc.and(sc.entity().getDataStoreRole(), SearchCriteria.Op.EQ,store.getRole()); + sc.and(sc.entity().getRefCnt(), SearchCriteria.Op.EQ,0); TemplateDataStoreVO template = sc.find(); if (template != null) { return templateFactory.getTemplate(template.getTemplateId(), store); } - SearchCriteriaService volSc = SearchCriteria2.create(VolumeDataStoreVO.class); - volSc.addAnd(volSc.getEntity().getLastUpdated(), SearchCriteria.Op.LT, bef); - volSc.addAnd(volSc.getEntity().getState(), SearchCriteria.Op.EQ, ObjectInDataStoreStateMachine.State.Ready); - volSc.addAnd(volSc.getEntity().getDataStoreId(), SearchCriteria.Op.EQ, store.getId()); - volSc.addAnd(volSc.getEntity().getRefCnt(), SearchCriteria.Op.EQ, 0); + QueryBuilder volSc = QueryBuilder.create(VolumeDataStoreVO.class); + volSc.and(volSc.entity().getLastUpdated(), SearchCriteria.Op.LT, bef); + volSc.and(volSc.entity().getState(), SearchCriteria.Op.EQ,ObjectInDataStoreStateMachine.State.Ready); + volSc.and(volSc.entity().getDataStoreId(), SearchCriteria.Op.EQ,store.getId()); + volSc.and(volSc.entity().getRefCnt(), SearchCriteria.Op.EQ,0); VolumeDataStoreVO volume = volSc.find(); if (volume != null) { return volumeFactory.getVolume(volume.getVolumeId(), store); } - SearchCriteriaService snapshotSc = SearchCriteria2.create(SnapshotDataStoreVO.class); - snapshotSc.addAnd(snapshotSc.getEntity().getLastUpdated(), SearchCriteria.Op.LT, bef); - snapshotSc.addAnd(snapshotSc.getEntity().getState(), SearchCriteria.Op.EQ, ObjectInDataStoreStateMachine.State.Ready); - snapshotSc.addAnd(snapshotSc.getEntity().getDataStoreId(), SearchCriteria.Op.EQ, store.getId()); - snapshotSc.addAnd(snapshotSc.getEntity().getRole(), SearchCriteria.Op.EQ, store.getRole()); - snapshotSc.addAnd(snapshotSc.getEntity().getRefCnt(), SearchCriteria.Op.EQ, 0); + QueryBuilder snapshotSc = QueryBuilder.create(SnapshotDataStoreVO.class); + snapshotSc.and(snapshotSc.entity().getLastUpdated(), SearchCriteria.Op.LT, bef); + snapshotSc.and(snapshotSc.entity().getState(), SearchCriteria.Op.EQ,ObjectInDataStoreStateMachine.State.Ready); + snapshotSc.and(snapshotSc.entity().getDataStoreId(), SearchCriteria.Op.EQ,store.getId()); + snapshotSc.and(snapshotSc.entity().getRole(), SearchCriteria.Op.EQ,store.getRole()); + snapshotSc.and(snapshotSc.entity().getRefCnt(), SearchCriteria.Op.EQ,0); SnapshotDataStoreVO snapshot = snapshotSc.find(); if (snapshot != null) { return snapshotFactory.getSnapshot(snapshot.getSnapshotId(), store); diff --git a/engine/storage/datamotion/resources/META-INF/cloudstack/core/spring-engine-storage-datamotion-core-context.xml b/engine/storage/datamotion/resources/META-INF/cloudstack/core/spring-engine-storage-datamotion-core-context.xml new file mode 100644 index 00000000000..3bde7686848 --- /dev/null +++ b/engine/storage/datamotion/resources/META-INF/cloudstack/core/spring-engine-storage-datamotion-core-context.xml @@ -0,0 +1,34 @@ + + + + + + + diff --git a/engine/storage/datamotion/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml b/engine/storage/datamotion/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml new file mode 100644 index 00000000000..725f7d35be5 --- /dev/null +++ b/engine/storage/datamotion/resources/META-INF/cloudstack/storage/spring-engine-storage-datamotion-storage-context.xml @@ -0,0 +1,34 @@ + + + + + + diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java index 96d1f5ab785..a451ca47288 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/AncientDataMotionStrategy.java @@ -22,6 +22,9 @@ import java.util.Map; import javax.inject.Inject; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; @@ -36,6 +39,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreState import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -48,9 +52,6 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.MigrateVolumeAnswer; import com.cloud.agent.api.storage.MigrateVolumeCommand; @@ -62,7 +63,6 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.configuration.Config; import com.cloud.host.Host; import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.server.ManagementService; import com.cloud.storage.DataStoreRole; import com.cloud.storage.StorageManager; @@ -81,56 +81,29 @@ import com.cloud.utils.exception.CloudRuntimeException; @Component public class - AncientDataMotionStrategy implements DataMotionStrategy { +AncientDataMotionStrategy implements DataMotionStrategy { private static final Logger s_logger = Logger.getLogger(AncientDataMotionStrategy.class); @Inject EndPointSelector selector; @Inject - TemplateManager templateMgr; - @Inject - VolumeDataStoreDao volumeStoreDao; - @Inject - HostDao hostDao; - @Inject ConfigurationDao configDao; @Inject - StorageManager storageMgr; - @Inject VolumeDao volDao; @Inject - VMTemplateDao templateDao; - @Inject - SnapshotManager snapshotMgr; - @Inject - SnapshotDao snapshotDao; - @Inject - SnapshotDataStoreDao _snapshotStoreDao; - @Inject - PrimaryDataStoreDao primaryDataStoreDao; - @Inject DataStoreManager dataStoreMgr; @Inject - TemplateDataStoreDao templateStoreDao; - @Inject - DiskOfferingDao diskOfferingDao; - @Inject - VMTemplatePoolDao templatePoolDao; - @Inject - VolumeOrchestrationService volumeMgr; - @Inject StorageCacheManager cacheMgr; @Inject ManagementService _mgmtServer; @Override - public boolean canHandle(DataObject srcData, DataObject destData) { - // TODO Auto-generated method stub - return true; + public StrategyPriority canHandle(DataObject srcData, DataObject destData) { + return StrategyPriority.DEFAULT; } @Override - public boolean canHandle(Map volumeMap, Host srcHost, Host destHost) { - return false; + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + return StrategyPriority.CANT_HANDLE; } protected boolean needCacheStorage(DataObject srcData, DataObject destData) { @@ -139,6 +112,8 @@ public class DataStoreTO srcStoreTO = srcTO.getDataStore(); DataStoreTO destStoreTO = destTO.getDataStore(); if (srcStoreTO instanceof NfsTO || srcStoreTO.getRole() == DataStoreRole.ImageCache) { + //|| + // (srcStoreTO instanceof PrimaryDataStoreTO && ((PrimaryDataStoreTO)srcStoreTO).getPoolType() == StoragePoolType.NetworkFilesystem)) { return false; } @@ -210,7 +185,8 @@ public class cacheMgr.deleteCacheObject(srcForCopy); } else { // for template, we want to leave it on cache for performance reason - if (answer == null || !answer.getResult()) { + if ((answer == null || !answer.getResult()) && srcForCopy.getRefCount() < 2) { + // cache object created by this copy, not already there cacheMgr.deleteCacheObject(srcForCopy); } else { cacheMgr.releaseCacheObject(srcForCopy); @@ -247,6 +223,13 @@ public class } } + protected void releaseSnapshotCacheChain(SnapshotInfo snapshot) { + while (snapshot != null) { + cacheMgr.releaseCacheObject(snapshot); + snapshot = snapshot.getParent(); + } + } + protected Answer copyVolumeFromSnapshot(DataObject snapObj, DataObject volObj) { SnapshotInfo snapshot = (SnapshotInfo) snapObj; StoragePool pool = (StoragePool) volObj.getDataStore(); @@ -264,8 +247,14 @@ public class int _createVolumeFromSnapshotWait = NumbersUtil.parseInt(value, Integer.parseInt(Config.CreateVolumeFromSnapshotWait.getDefaultValue())); + EndPoint ep = null; + if (srcData.getDataStore().getRole() == DataStoreRole.Primary) { + ep = selector.select(volObj); + } else { + ep = selector.select(snapObj, volObj); + } + CopyCommand cmd = new CopyCommand(srcData.getTO(), volObj.getTO(), _createVolumeFromSnapshotWait, _mgmtServer.getExecuteInSequence()); - EndPoint ep = selector.select(snapObj, volObj); Answer answer = ep.sendMessage(cmd); return answer; @@ -274,7 +263,8 @@ public class throw new CloudRuntimeException(basicErrMsg); } finally { if (!(storTO instanceof NfsTO)) { - deleteSnapshotCacheChain((SnapshotInfo) srcData); + // still keep snapshot on cache which may be migrated from previous secondary storage + releaseSnapshotCacheChain((SnapshotInfo)srcData); } } } @@ -300,7 +290,7 @@ public class if (cacheStore == null) { // need to find a nfs or cifs image store, assuming that can't copy volume // directly to s3 - ImageStoreEntity imageStore = (ImageStoreEntity) this.dataStoreMgr.getImageStore(destScope.getScopeId()); + ImageStoreEntity imageStore = (ImageStoreEntity) dataStoreMgr.getImageStore(destScope.getScopeId()); if (!imageStore.getProtocol().equalsIgnoreCase("nfs") && !imageStore.getProtocol().equalsIgnoreCase("cifs")) { s_logger.debug("can't find a nfs (or cifs) image store to satisfy the need for a staging store"); return null; @@ -309,7 +299,7 @@ public class DataObject objOnImageStore = imageStore.create(srcData); objOnImageStore.processEvent(Event.CreateOnlyRequested); - Answer answer = this.copyObject(srcData, objOnImageStore); + Answer answer = copyObject(srcData, objOnImageStore); if (answer == null || !answer.getResult()) { if (answer != null) { s_logger.debug("copy to image store failed: " + answer.getDetails()); @@ -355,7 +345,7 @@ public class protected Answer migrateVolumeToPool(DataObject srcData, DataObject destData) { VolumeInfo volume = (VolumeInfo)srcData; - StoragePool destPool = (StoragePool)this.dataStoreMgr.getDataStore(destData.getDataStore().getId(), DataStoreRole.Primary); + StoragePool destPool = (StoragePool)dataStoreMgr.getDataStore(destData.getDataStore().getId(), DataStoreRole.Primary); MigrateVolumeCommand command = new MigrateVolumeCommand(volume.getId(), volume.getPath(), destPool); EndPoint ep = selector.select(volume.getDataStore()); MigrateVolumeAnswer answer = (MigrateVolumeAnswer) ep.sendMessage(command); @@ -364,14 +354,14 @@ public class throw new CloudRuntimeException("Failed to migrate volume " + volume + " to storage pool " + destPool); } else { // Update the volume details after migration. - VolumeVO volumeVo = this.volDao.findById(volume.getId()); + VolumeVO volumeVo = volDao.findById(volume.getId()); Long oldPoolId = volume.getPoolId(); volumeVo.setPath(answer.getVolumePath()); volumeVo.setFolder(destPool.getPath()); volumeVo.setPodId(destPool.getPodId()); volumeVo.setPoolId(destPool.getId()); volumeVo.setLastPoolId(oldPoolId); - this.volDao.update(volume.getId(), volumeVo); + volDao.update(volume.getId(), volumeVo); } return answer; @@ -433,13 +423,19 @@ public class srcData = cacheSnapshotChain(snapshot); } + EndPoint ep = null; + if (srcData.getDataStore().getRole() == DataStoreRole.Primary) { + ep = selector.select(destData); + } else { + ep = selector.select(srcData, destData); + } + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _createprivatetemplatefromsnapshotwait, _mgmtServer.getExecuteInSequence()); - EndPoint ep = selector.select(srcData, destData); Answer answer = ep.sendMessage(cmd); - - // clean up snapshot copied to staging + + // clean up snapshot copied to staging if (needCache && srcData != null) { - cacheMgr.deleteCacheObject(srcData); + cacheMgr.releaseCacheObject(srcData); // reduce ref count, but keep it there on cache which is converted from previous secondary storage } return answer; } diff --git a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index c1cbdc772cc..a1f05e1bb65 100644 --- a/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -18,29 +18,39 @@ */ package org.apache.cloudstack.storage.motion; -import com.cloud.agent.api.to.VirtualMachineTO; -import com.cloud.host.Host; -import com.cloud.utils.exception.CloudRuntimeException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.springframework.stereotype.Component; -import javax.inject.Inject; -import java.util.List; -import java.util.Map; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.host.Host; +import com.cloud.utils.StringUtils; +import com.cloud.utils.exception.CloudRuntimeException; @Component public class DataMotionServiceImpl implements DataMotionService { @Inject - List strategies; + StorageStrategyFactory storageStrategyFactory; @Override public void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { + if (srcData.getDataStore() == null || destData.getDataStore() == null) { + throw new CloudRuntimeException("can't find data store"); + } if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { srcData.getDataStore().getDriver().copyAsync(srcData, destData, callback); @@ -50,28 +60,32 @@ public class DataMotionServiceImpl implements DataMotionService { return; } - for (DataMotionStrategy strategy : strategies) { - if (strategy.canHandle(srcData, destData)) { - strategy.copyAsync(srcData, destData, callback); - return; - } + DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(srcData, destData); + if (strategy == null) { + throw new CloudRuntimeException("Can't find strategy to move data. "+ + "Source: "+srcData.getType().name()+" '"+srcData.getUuid()+ + ", Destination: "+destData.getType().name()+" '"+destData.getUuid()+"'"); } - throw new CloudRuntimeException("can't find strategy to move data"); + + strategy.copyAsync(srcData, destData, callback); } @Override public void copyAsync(Map volumeMap, VirtualMachineTO vmTo, Host srcHost, Host destHost, AsyncCompletionCallback callback) { - for (DataMotionStrategy strategy : strategies) { - if (strategy.canHandle(volumeMap, srcHost, destHost)) { - strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback); - return; - } - } - throw new CloudRuntimeException("can't find strategy to move data"); - } - public void setStrategies(List strategies) { - this.strategies = strategies; + DataMotionStrategy strategy = storageStrategyFactory.getDataMotionStrategy(volumeMap, srcHost, destHost); + if (strategy == null) { + List volumeIds = new LinkedList(); + for (final VolumeInfo volumeInfo : volumeMap.keySet()) { + volumeIds.add(volumeInfo.getUuid()); + } + + throw new CloudRuntimeException("Can't find strategy to move data. "+ + "Source Host: "+srcHost.getName()+", Destination Host: "+destHost.getName()+ + ", Volume UUIDs: "+StringUtils.join(volumeIds, ",")); + } + + strategy.copyAsync(volumeMap, vmTo, srcHost, destHost, callback); } } diff --git a/engine/storage/image/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml b/engine/storage/image/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml new file mode 100644 index 00000000000..db517dbd863 --- /dev/null +++ b/engine/storage/image/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index be0ce4e6b99..63693112cac 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -18,8 +18,14 @@ */ package org.apache.cloudstack.storage.image; +import java.util.ArrayList; +import java.util.List; + import javax.inject.Inject; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -28,8 +34,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.store.TemplateObject; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStoragePoolVO; @@ -70,8 +74,12 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { } } - if (!found) { - s_logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); + if (s_logger.isDebugEnabled()) { + if (!found) { + s_logger.debug("template " + templateId + " is not in store:" + store.getId() + ", type:" + store.getRole()); + } else { + s_logger.debug("template " + templateId + " is already in store:" + store.getId() + ", type:" + store.getRole()); + } } TemplateObject tmpl = TemplateObject.getTemplate(templ, store); @@ -83,7 +91,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { TemplateDataStoreVO tmplStore = templateStoreDao.findByTemplate(templateId, storeRole); DataStore store = null; if (tmplStore != null) { - store = this.storeMgr.getDataStore(tmplStore.getDataStoreId(), storeRole); + store = storeMgr.getDataStore(tmplStore.getDataStoreId(), storeRole); } return this.getTemplate(templateId, store); } @@ -93,7 +101,7 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { TemplateDataStoreVO tmplStore = templateStoreDao.findByTemplateZone(templateId, zoneId, storeRole); DataStore store = null; if (tmplStore != null) { - store = this.storeMgr.getDataStore(tmplStore.getDataStoreId(), storeRole); + store = storeMgr.getDataStore(tmplStore.getDataStoreId(), storeRole); } return this.getTemplate(templateId, store); } @@ -109,4 +117,30 @@ public class TemplateDataFactoryImpl implements TemplateDataFactory { tmpObj.setUrl(origTmpl.getUrl()); return tmpObj; } + + @Override + public TemplateInfo getReadyTemplateOnCache(long templateId) { + TemplateDataStoreVO tmplStore = templateStoreDao.findReadyOnCache(templateId); + if (tmplStore != null) { + DataStore store = storeMgr.getDataStore(tmplStore.getDataStoreId(), DataStoreRole.ImageCache); + return getTemplate(templateId, store); + } else { + return null; + } + + } + + @Override + public List listTemplateOnCache(long templateId) { + List cacheTmpls = templateStoreDao.listOnCache(templateId); + List tmplObjs = new ArrayList(); + for (TemplateDataStoreVO cacheTmpl : cacheTmpls) { + long storeId = cacheTmpl.getDataStoreId(); + DataStore store = storeMgr.getDataStore(storeId, DataStoreRole.ImageCache); + TemplateInfo tmplObj = getTemplate(templateId, store); + tmplObjs.add(tmplObj); + } + return tmplObjs; + } + } diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index a68e40c63f3..ce6198dc21e 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -28,6 +28,9 @@ import java.util.Set; import javax.inject.Inject; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; @@ -39,6 +42,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageCacheManager; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; @@ -59,9 +63,6 @@ import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.store.TemplateObject; import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ListTemplateAnswer; import com.cloud.agent.api.storage.ListTemplateCommand; @@ -73,8 +74,9 @@ import com.cloud.dc.dao.DataCenterDao; import com.cloud.exception.ResourceAllocationException; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.StoragePool; +import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.TemplateType; +import com.cloud.storage.StoragePool; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; @@ -128,6 +130,8 @@ public class TemplateServiceImpl implements TemplateService { TemplateManager _tmpltMgr; @Inject ConfigurationDao _configDao; + @Inject + StorageCacheManager _cacheMgr; class TemplateOpContext extends AsyncRpcContext { final TemplateObject template; @@ -466,7 +470,8 @@ public class TemplateServiceImpl implements TemplateService { // persist entry in template_zone_ref table. zoneId can be empty for // region-wide image store, in that case, // we will associate the template to all the zones. - private void associateTemplateToZone(long templateId, Long zoneId) { + @Override + public void associateTemplateToZone(long templateId, Long zoneId) { List dcs = new ArrayList(); if (zoneId != null) { dcs.add(zoneId); @@ -608,6 +613,86 @@ public class TemplateServiceImpl implements TemplateService { return copyAsync(volume, template, store); } + private AsyncCallFuture syncToRegionStoreAsync(TemplateInfo template, DataStore store) { + AsyncCallFuture future = new AsyncCallFuture(); + // no need to create entry on template_store_ref here, since entries are already created when prepareSecondaryStorageForMigration is invoked. + // But we need to set default install path so that sync can be done in the right s3 path + TemplateInfo templateOnStore = _templateFactory.getTemplate(template, store); + String installPath = TemplateConstants.DEFAULT_TMPLT_ROOT_DIR + "/" + + TemplateConstants.DEFAULT_TMPLT_FIRST_LEVEL_DIR + + template.getAccountId() + "/" + template.getId() + "/" + template.getUniqueName(); + ((TemplateObject)templateOnStore).setInstallPath(installPath); + TemplateOpContext context = new TemplateOpContext(null, + (TemplateObject)templateOnStore, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().syncTemplateCallBack(null, null)).setContext(context); + _motionSrv.copyAsync(template, templateOnStore, caller); + return future; + } + + protected Void syncTemplateCallBack(AsyncCallbackDispatcher callback, + TemplateOpContext context) { + TemplateInfo destTemplate = context.getTemplate(); + CopyCommandResult result = callback.getResult(); + AsyncCallFuture future = context.getFuture(); + TemplateApiResult res = new TemplateApiResult(destTemplate); + try { + if (result.isFailed()) { + res.setResult(result.getResult()); + // no change to existing template_store_ref, will try to re-sync later if other call triggers this sync operation, like copy template + } else { + // this will update install path properly, next time it will not sync anymore. + destTemplate.processEvent(Event.OperationSuccessed, result.getAnswer()); + } + future.complete(res); + } catch (Exception e) { + s_logger.debug("Failed to process sync template callback", e); + res.setResult(e.toString()); + future.complete(res); + } + + return null; + } + + private boolean isRegionStore(DataStore store) { + if (store.getScope().getScopeType() == ScopeType.ZONE && store.getScope().getScopeId() == null) + return true; + else + return false; + } + + // This routine is used to push templates currently on cache store, but not in region store to region store. + // used in migrating existing NFS secondary storage to S3. + @Override + public void syncTemplateToRegionStore(long templateId, DataStore store) { + if (isRegionStore(store)) { + // if template is on region wide object store, check if it is really downloaded there (by checking install_path). Sync template to region + // wide store if it is not there physically. + TemplateInfo tmplOnStore = _templateFactory.getTemplate(templateId, store); + if (tmplOnStore == null) { + throw new CloudRuntimeException("Cannot find an entry in template_store_ref for template " + templateId + " on region store: " + store.getName()); + } + if (tmplOnStore.getInstallPath() == null || tmplOnStore.getInstallPath().length() == 0) { + // template is not on region store yet, sync to region store + TemplateInfo srcTemplate = _templateFactory.getReadyTemplateOnCache(templateId); + if (srcTemplate == null) { + throw new CloudRuntimeException("Cannot find template " + templateId + " on cache store"); + } + AsyncCallFuture future = syncToRegionStoreAsync(srcTemplate, store); + try { + TemplateApiResult result = future.get(); + if (result.isFailed()) { + throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName() + ":" + + result.getResult()); + } + _cacheMgr.releaseCacheObject(srcTemplate); // reduce reference count for template on cache, so it can recycled by schedule + } catch (Exception ex) { + throw new CloudRuntimeException("sync template from cache to region wide store failed for image store " + store.getName()); + } + } + } + } + @Override public AsyncCallFuture copyTemplate(TemplateInfo srcTemplate, DataStore destStore) { // generate a URL from source template ssvm to download to destination data store diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 64ef78f8e09..0991860c4d5 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -26,6 +26,9 @@ import java.util.Map; import javax.annotation.PostConstruct; import javax.inject.Inject; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; @@ -37,8 +40,6 @@ import org.apache.cloudstack.storage.image.ImageStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager; import org.apache.cloudstack.storage.image.store.ImageStoreImpl; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.VMTemplateDao; @@ -94,6 +95,16 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager return imageStores; } + @Override + public List listImageCacheStores() { + List stores = dataStoreDao.listImageCacheStores(); + List imageStores = new ArrayList(); + for (ImageStoreVO store : stores) { + imageStores.add(getImageStore(store.getId())); + } + return imageStores; + } + @Override public List listImageStoresByScope(ZoneScope scope) { List stores = dataStoreDao.findByScope(scope); diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java index 855d8cbfe0f..d77658b4314 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/ImageStoreImpl.java @@ -24,7 +24,6 @@ import java.util.concurrent.ExecutionException; import javax.inject.Inject; -import com.cloud.capacity.dao.CapacityDao; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; @@ -39,9 +38,11 @@ import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.ImageStoreDriver; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; +import org.apache.cloudstack.storage.to.ImageStoreTO; import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.capacity.dao.CapacityDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.dao.VMTemplateDao; @@ -181,7 +182,16 @@ public class ImageStoreImpl implements ImageStoreEntity { @Override public DataStoreTO getTO() { - return getDriver().getStoreTO(this); + DataStoreTO to = getDriver().getStoreTO(this); + if (to == null) { + ImageStoreTO primaryTO = new ImageStoreTO(); + primaryTO.setProviderName(getProviderName()); + primaryTO.setRole(getRole()); + primaryTO.setType(getProtocol()); + primaryTO.setUri(getUri()); + return primaryTO; + } + return to; } @Override diff --git a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java index f0675f3ee27..0a5b60880e5 100644 --- a/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -58,6 +58,7 @@ public class TemplateObject implements TemplateInfo { private VMTemplateVO imageVO; private DataStore dataStore; private String url; + private String installPath; // temporarily set installPath before passing to resource for entries with empty installPath for object store migration case @Inject VMTemplateDao imageDao; @Inject @@ -293,6 +294,9 @@ public class TemplateObject implements TemplateInfo { @Override public String getInstallPath() { + if (installPath != null) + return installPath; + if (dataStore == null) { return null; } @@ -300,6 +304,10 @@ public class TemplateObject implements TemplateInfo { return obj.getInstallPath(); } + public void setInstallPath(String installPath) { + this.installPath = installPath; + } + @Override public long getAccountId() { return imageVO.getAccountId(); diff --git a/engine/storage/integration-test/pom.xml b/engine/storage/integration-test/pom.xml index 46c316116ca..dc4065e5988 100644 --- a/engine/storage/integration-test/pom.xml +++ b/engine/storage/integration-test/pom.xml @@ -119,13 +119,13 @@ org.apache.httpcomponents httpclient + 4.2.2 compile mysql mysql-connector-java - ${cs.mysql.version} provided @@ -150,7 +150,6 @@
maven-antrun-plugin - 1.7 generate-resource diff --git a/engine/storage/integration-test/test/com/cloud/vm/snapshot/dao/VmSnapshotDaoTest.java b/engine/storage/integration-test/test/com/cloud/vm/snapshot/dao/VmSnapshotDaoTest.java new file mode 100644 index 00000000000..fc52f89f5d0 --- /dev/null +++ b/engine/storage/integration-test/test/com/cloud/vm/snapshot/dao/VmSnapshotDaoTest.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.vm.snapshot.dao; + +import com.cloud.vm.snapshot.VMSnapshotDetailsVO; +import junit.framework.Assert; +import org.apache.cloudstack.storage.test.CloudStackTestNGBase; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import javax.inject.Inject; +import java.util.Map; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(locations = "classpath:/storageContext.xml") +public class VmSnapshotDaoTest extends CloudStackTestNGBase { + @Inject + VMSnapshotDetailsDao vmsnapshotDetailsDao; + + @Test + public void testVmSnapshotDetails() { + VMSnapshotDetailsVO detailsVO = new VMSnapshotDetailsVO(1L, "test", "foo"); + vmsnapshotDetailsDao.persist(detailsVO); + Map details = vmsnapshotDetailsDao.getDetails(1L); + Assert.assertTrue(details.containsKey("test")); + } + +} diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java index 902f5953eb1..e51155d9d13 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/AopTestAdvice.java @@ -19,10 +19,11 @@ package org.apache.cloudstack.storage.test; import org.aspectj.lang.ProceedingJoinPoint; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class AopTestAdvice { public Object AopTestMethod(ProceedingJoinPoint call) throws Throwable { - Transaction txn = Transaction.open(call.getSignature().getName()); + TransactionLegacy txn = TransactionLegacy.open(call.getSignature().getName()); Object ret = null; try { ret = call.proceed(); diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java index 0f97f311dcd..a1f2c07c1f5 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/ChildTestConfiguration.java @@ -16,28 +16,6 @@ // under the License. package org.apache.cloudstack.storage.test; -import java.io.IOException; - -import org.apache.cloudstack.acl.APIChecker; -import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.service.api.OrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; -import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; -import org.apache.cloudstack.framework.rpc.RpcProvider; -import org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl; -import org.apache.cloudstack.storage.test.ChildTestConfiguration.Library; -import org.apache.cloudstack.test.utils.SpringUtils; - -import org.mockito.Mockito; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.ComponentScan; -import org.springframework.context.annotation.ComponentScan.Filter; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.FilterType; -import org.springframework.core.type.classreading.MetadataReader; -import org.springframework.core.type.classreading.MetadataReaderFactory; -import org.springframework.core.type.filter.TypeFilter; - import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; import com.cloud.capacity.dao.CapacityDaoImpl; @@ -49,10 +27,12 @@ import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterVnetDaoImpl; -import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.DataCenterDetailsDaoImpl; import com.cloud.dc.dao.HostPodDaoImpl; import com.cloud.dc.dao.PodVlanDaoImpl; import com.cloud.domain.dao.DomainDaoImpl; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.dao.EventDaoImpl; import com.cloud.host.dao.HostDao; import com.cloud.host.dao.HostDaoImpl; import com.cloud.host.dao.HostDetailsDaoImpl; @@ -78,7 +58,6 @@ import com.cloud.storage.dao.VolumeDaoImpl; import com.cloud.storage.dao.VolumeHostDaoImpl; import com.cloud.storage.download.DownloadMonitorImpl; import com.cloud.storage.secondary.SecondaryStorageVmManager; -import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.tags.dao.ResourceTagsDaoImpl; import com.cloud.template.TemplateManager; import com.cloud.user.AccountManager; @@ -94,6 +73,26 @@ import com.cloud.vm.dao.UserVmDaoImpl; import com.cloud.vm.dao.UserVmDetailsDaoImpl; import com.cloud.vm.dao.VMInstanceDaoImpl; import com.cloud.vm.snapshot.dao.VMSnapshotDaoImpl; +import org.apache.cloudstack.acl.APIChecker; +import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; +import org.apache.cloudstack.engine.service.api.OrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; +import org.apache.cloudstack.framework.rpc.RpcProvider; +import org.apache.cloudstack.storage.cache.manager.StorageCacheManagerImpl; +import org.apache.cloudstack.storage.test.ChildTestConfiguration.Library; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.ComponentScan.Filter; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; + +import java.io.IOException; @Configuration @ComponentScan(basePackageClasses = { NicDaoImpl.class, VMInstanceDaoImpl.class, VMTemplateHostDaoImpl.class, @@ -102,12 +101,12 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDaoImpl; HostPodDaoImpl.class, VMTemplateZoneDaoImpl.class, VMTemplateDetailsDaoImpl.class, HostDetailsDaoImpl.class, HostTagsDaoImpl.class, HostTransferMapDaoImpl.class, DataCenterIpAddressDaoImpl.class, DataCenterLinkLocalIpAddressDaoImpl.class, DataCenterVnetDaoImpl.class, PodVlanDaoImpl.class, - DcDetailsDaoImpl.class, DiskOfferingDaoImpl.class, StoragePoolHostDaoImpl.class, UserVmDaoImpl.class, + DataCenterDetailsDaoImpl.class, DiskOfferingDaoImpl.class, StoragePoolHostDaoImpl.class, UserVmDaoImpl.class, UserVmDetailsDaoImpl.class, ServiceOfferingDaoImpl.class, CapacityDaoImpl.class, SnapshotDaoImpl.class, VMSnapshotDaoImpl.class, OCFS2ManagerImpl.class, ClusterDetailsDaoImpl.class, SecondaryStorageVmDaoImpl.class, ConsoleProxyDaoImpl.class, StoragePoolWorkDaoImpl.class, StorageCacheManagerImpl.class, UserDaoImpl.class, DataCenterDaoImpl.class, StoragePoolDetailsDaoImpl.class, DomainDaoImpl.class, DownloadMonitorImpl.class, - AccountDaoImpl.class }, includeFilters = { @Filter(value = Library.class, type = FilterType.CUSTOM) }, + AccountDaoImpl.class, ActionEventUtils.class, EventDaoImpl.class}, includeFilters = { @Filter(value = Library.class, type = FilterType.CUSTOM) }, useDefaultFilters = false) public class ChildTestConfiguration extends TestConfiguration { @@ -186,12 +185,6 @@ public class ChildTestConfiguration extends TestConfiguration { return Mockito.mock(VirtualMachineManager.class); } - - @Bean - public SnapshotManager snapshotMgr() { - return Mockito.mock(SnapshotManager.class); - } - @Bean public ResourceManager resourceMgr() { return Mockito.mock(ResourceManager.class); diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java index 26f4c5da0e9..c2b9466f90b 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/CloudStackTestNGBase.java @@ -26,6 +26,7 @@ import org.testng.annotations.Test; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class CloudStackTestNGBase extends AbstractTestNGSpringContextTests { private String hostGateway; @@ -39,7 +40,7 @@ public class CloudStackTestNGBase extends AbstractTestNGSpringContextTests { private String imageInstallPath; private String scriptPath; private HypervisorType hypervisor; - private Transaction txn; + private TransactionLegacy txn; private String s3AccessKey; private String s3SecretKey; @@ -54,7 +55,7 @@ public class CloudStackTestNGBase extends AbstractTestNGSpringContextTests { @BeforeMethod(alwaysRun = true) protected void injectDB(Method testMethod) throws Exception { - txn = Transaction.open(testMethod.getName()); + txn = TransactionLegacy.open(testMethod.getName()); } @Test diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java index ec681192aae..fc926af558d 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/DirectAgentManagerSimpleImpl.java @@ -26,6 +26,10 @@ import java.util.Map; import javax.inject.Inject; import javax.naming.ConfigurationException; +import com.cloud.host.Host; +import com.cloud.host.Status; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -246,6 +250,7 @@ public class DirectAgentManagerSimpleImpl extends ManagerBase implements AgentMa e1.printStackTrace(); //To change body of catch statement use File | Settings | File Templates. } return true; + } @Override diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/EndpointSelectorTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/EndpointSelectorTest.java index d645deb1b4e..7b8dd3ab642 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/EndpointSelectorTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/EndpointSelectorTest.java @@ -18,6 +18,7 @@ */ package org.apache.cloudstack.storage.test; + import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -28,6 +29,7 @@ import java.util.UUID; import javax.inject.Inject; +import com.cloud.server.LockMasterListener; import junit.framework.Assert; import org.junit.Before; @@ -58,6 +60,9 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import com.cloud.agent.AgentManager; + +import com.cloud.agent.AgentManager; + import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; @@ -88,6 +93,7 @@ import com.cloud.user.User; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.Merovingian2; + @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = { "classpath:/fakeDriverTestContext.xml" }) public class EndpointSelectorTest { @@ -121,6 +127,8 @@ public class EndpointSelectorTest { ImageStoreVO imageStore; @Inject AccountManager accountManager; + + LockMasterListener lockMasterListener; VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); @Inject diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakeDriverTestConfiguration.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakeDriverTestConfiguration.java index 75eda90c864..9022d9703ab 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakeDriverTestConfiguration.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakeDriverTestConfiguration.java @@ -18,10 +18,12 @@ */ package org.apache.cloudstack.storage.test; + import com.cloud.storage.snapshot.SnapshotScheduler; import com.cloud.storage.snapshot.SnapshotSchedulerImpl; import com.cloud.user.DomainManager; import com.cloud.utils.component.ComponentContext; + import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; @@ -63,5 +65,5 @@ public class FakeDriverTestConfiguration extends ChildTestConfiguration{ public EndPointSelector selector() { return ComponentContext.inject(DefaultEndPointSelector.class); } - } + diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java index 810afd11577..de64b8f5ca2 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/FakePrimaryDataStoreDriver.java @@ -18,8 +18,8 @@ */ package org.apache.cloudstack.storage.test; -import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.DataTO; +import java.util.UUID; + import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; @@ -33,7 +33,8 @@ import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import java.util.UUID; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; public class FakePrimaryDataStoreDriver implements PrimaryDataStoreDriver { boolean snapshotResult = true; diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java index 52ccf410c8d..2e77ac33ed8 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/MockStorageMotionStrategy.java @@ -19,33 +19,71 @@ package org.apache.cloudstack.storage.test; import java.util.Map; +import java.util.UUID; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import com.cloud.agent.api.to.DataObjectType; +import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.host.Host; +import com.cloud.storage.Storage; public class MockStorageMotionStrategy implements DataMotionStrategy { + boolean success = true; @Override - public boolean canHandle(DataObject srcData, DataObject destData) { - // TODO Auto-generated method stub - return true; + public StrategyPriority canHandle(DataObject srcData, DataObject destData) { + return StrategyPriority.HIGHEST; + } + + public void makeBackupSnapshotSucceed(boolean success) { + this.success = success; } @Override - public boolean canHandle(Map volumeMap, Host srcHost, Host destHost) { - return true; + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + return StrategyPriority.HIGHEST; } @Override public Void copyAsync(DataObject srcData, DataObject destData, AsyncCompletionCallback callback) { - CopyCommandResult result = new CopyCommandResult("something", null); + CopyCmdAnswer answer = null; + DataTO data = null; + if (!success) { + CopyCommandResult result = new CopyCommandResult(null, null); + result.setResult("Failed"); + callback.complete(result); + } + if (destData.getType() == DataObjectType.SNAPSHOT) { + SnapshotInfo srcSnapshot = (SnapshotInfo)srcData; + + SnapshotObjectTO newSnapshot = new SnapshotObjectTO(); + newSnapshot.setPath(UUID.randomUUID().toString()); + if (srcSnapshot.getParent() != null) { + newSnapshot.setParentSnapshotPath(srcSnapshot.getParent().getPath()); + } + data = newSnapshot; + } else if (destData.getType() == DataObjectType.TEMPLATE) { + TemplateObjectTO newTemplate = new TemplateObjectTO(); + newTemplate.setPath(UUID.randomUUID().toString()); + newTemplate.setFormat(Storage.ImageFormat.QCOW2); + newTemplate.setSize(10L); + newTemplate.setPhysicalSize(10L); + data = newTemplate; + } + answer = new CopyCmdAnswer(data); + CopyCommandResult result = new CopyCommandResult("something", answer); callback.complete(result); return null; } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java index f1eed3a4b62..550a6bfc7b5 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTest.java @@ -18,52 +18,6 @@ */ package org.apache.cloudstack.storage.test; -import java.net.URI; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; - -import javax.inject.Inject; - -import junit.framework.Assert; - -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.storage.LocalHostEndpoint; -import org.apache.cloudstack.storage.MockLocalNfsSecondaryStorageResource; -import org.apache.cloudstack.storage.RemoteHostEndPoint; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; -import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.mockito.Matchers; -import org.mockito.Mockito; -import org.springframework.test.context.ContextConfiguration; -import org.testng.AssertJUnit; -import org.testng.annotations.Test; - import com.cloud.agent.AgentManager; import com.cloud.agent.api.Command; import com.cloud.dc.ClusterVO; @@ -99,6 +53,52 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; +import junit.framework.Assert; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.LocalHostEndpoint; +import org.apache.cloudstack.storage.MockLocalNfsSecondaryStorageResource; +import org.apache.cloudstack.storage.RemoteHostEndPoint; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.springframework.test.context.ContextConfiguration; +import org.testng.AssertJUnit; +import org.testng.annotations.Test; + +import javax.inject.Inject; +import java.net.URI; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; @ContextConfiguration(locations = { "classpath:/storageContext.xml" }) public class SnapshotTest extends CloudStackTestNGBase { @@ -148,6 +148,8 @@ public class SnapshotTest extends CloudStackTestNGBase { @Inject SnapshotDataFactory snapshotFactory; @Inject + StorageStrategyFactory storageStrategyFactory; + @Inject List snapshotStrategies; @Inject SnapshotService snapshotSvr; @@ -281,7 +283,7 @@ public class SnapshotTest extends CloudStackTestNGBase { Mockito.when(epSelector.select(Matchers.any(DataObject.class))).thenReturn(remoteEp); Mockito.when(epSelector.select(Matchers.any(DataStore.class))).thenReturn(remoteEp); Mockito.when(hyGuruMgr.getGuruProcessedCommandTargetHost(Matchers.anyLong(), Matchers.any(Command.class))) - .thenReturn(this.host.getId()); + .thenReturn(this.host.getId()); } @@ -367,10 +369,10 @@ public class SnapshotTest extends CloudStackTestNGBase { result = future.get(); Assert.assertTrue(result.isSuccess()); return result.getVolume(); - + } - + private VMTemplateVO createTemplateInDb() { VMTemplateVO image = new VMTemplateVO(); @@ -400,11 +402,12 @@ public class SnapshotTest extends CloudStackTestNGBase { SnapshotVO snapshotVO = createSnapshotInDb(vol); SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotVO.getId(), vol.getDataStore()); boolean result = false; - for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { - snapshot = strategy.takeSnapshot(snapshot); - result = true; - } + + + SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); + if (snapshotStrategy != null) { + snapshot = snapshotStrategy.takeSnapshot(snapshot); + result = true; } AssertJUnit.assertTrue(result); @@ -422,16 +425,18 @@ public class SnapshotTest extends CloudStackTestNGBase { SnapshotVO snapshotVO = createSnapshotInDb(vol); SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotVO.getId(), vol.getDataStore()); SnapshotInfo newSnapshot = null; - for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { - newSnapshot = strategy.takeSnapshot(snapshot); - } + + + SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); + if (snapshotStrategy != null) { + newSnapshot = snapshotStrategy.takeSnapshot(snapshot); + } AssertJUnit.assertNotNull(newSnapshot); // create another snapshot for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { + if (strategy.canHandle(snapshot, SnapshotOperation.DELETE) != StrategyPriority.CANT_HANDLE) { strategy.deleteSnapshot(newSnapshot.getId()); } } @@ -444,11 +449,11 @@ public class SnapshotTest extends CloudStackTestNGBase { SnapshotVO snapshotVO = createSnapshotInDb(vol); SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotVO.getId(), vol.getDataStore()); boolean result = false; - for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { - snapshot = strategy.takeSnapshot(snapshot); - result = true; - } + + SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); + if (snapshotStrategy != null) { + snapshot = snapshotStrategy.takeSnapshot(snapshot); + result = true; } AssertJUnit.assertTrue(result); @@ -467,18 +472,20 @@ public class SnapshotTest extends CloudStackTestNGBase { Mockito.when(epSelector.select(Matchers.any(DataObject.class), Matchers.any(DataObject.class))).thenReturn(remoteEp); } } - + @Test public void createSnapshot() throws InterruptedException, ExecutionException { VolumeInfo vol = createCopyBaseImage(); SnapshotVO snapshotVO = createSnapshotInDb(vol); SnapshotInfo snapshot = this.snapshotFactory.getSnapshot(snapshotVO.getId(), vol.getDataStore()); SnapshotInfo newSnapshot = null; - for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { - newSnapshot = strategy.takeSnapshot(snapshot); - } + + + SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); + if (snapshotStrategy != null) { + newSnapshot = snapshotStrategy.takeSnapshot(snapshot); } + AssertJUnit.assertNotNull(newSnapshot); LocalHostEndpoint ep = new MockLocalHostEndPoint(); @@ -487,7 +494,7 @@ public class SnapshotTest extends CloudStackTestNGBase { try { for (SnapshotStrategy strategy : this.snapshotStrategies) { - if (strategy.canHandle(snapshot)) { + if (strategy.canHandle(snapshot, SnapshotOperation.DELETE) != StrategyPriority.CANT_HANDLE) { boolean res = strategy.deleteSnapshot(newSnapshot.getId()); Assert.assertTrue(res); } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java index c98f7056662..c73d1672d27 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/SnapshotTestWithFakeData.java @@ -18,23 +18,27 @@ */ package org.apache.cloudstack.storage.test; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.storage.ScopeType; -import com.cloud.storage.Snapshot; -import com.cloud.storage.SnapshotVO; -import com.cloud.storage.Storage; -import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.utils.component.ComponentContext; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +import javax.inject.Inject; + import junit.framework.Assert; + import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; @@ -43,11 +47,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -55,16 +62,43 @@ import org.mockito.Mockito; import org.springframework.test.context.ContextConfiguration; import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; -import javax.inject.Inject; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; +import com.cloud.dc.ClusterVO; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.HostPodVO; +import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.HostPodDao; +import com.cloud.hypervisor.Hypervisor; +import com.cloud.org.Cluster; +import com.cloud.org.Managed; +import com.cloud.server.LockMasterListener; +import com.cloud.storage.CreateSnapshotPayload; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Snapshot; +import com.cloud.storage.SnapshotPolicyVO; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.SnapshotPolicyDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.User; +import com.cloud.utils.DateUtil; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.Merovingian2; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(locations = { "classpath:/fakeDriverTestContext.xml" }) -public class SnapshotTestWithFakeData { +public class SnapshotTestWithFakeData { @Inject SnapshotService snapshotService; @Inject @@ -85,30 +119,106 @@ public class SnapshotTestWithFakeData { VolumeService volumeService; @Inject VolumeDataFactory volumeDataFactory; + @Inject + DataCenterDao dcDao; + Long dcId; + @Inject + HostPodDao podDao; + Long podId; + @Inject + ClusterDao clusterDao; + Long clusterId; + @Inject + ImageStoreDao imageStoreDao; + ImageStoreVO imageStore; + @Inject + AccountManager accountManager; + LockMasterListener lockMasterListener; + VolumeInfo vol = null; FakePrimaryDataStoreDriver driver = new FakePrimaryDataStoreDriver(); + @Inject + MockStorageMotionStrategy mockStorageMotionStrategy; + Merovingian2 _lockMaster; + @Inject + SnapshotPolicyDao snapshotPolicyDao; @Before public void setUp() { - Mockito.when(primaryDataStoreProvider.configure(Mockito.anyMap())).thenReturn(true); + // create data center + + DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, + "10.0.0.1/24", null, null, DataCenter.NetworkType.Basic, null, null, true, true, null, null); + dc = dcDao.persist(dc); + dcId = dc.getId(); + // create pod + + HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), dc.getId(), "10.223.0.1", + "10.233.2.2/25", 8, "test"); + pod = podDao.persist(pod); + podId = pod.getId(); + // create xen cluster + ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster"); + cluster.setHypervisorType(Hypervisor.HypervisorType.XenServer.toString()); + cluster.setClusterType(Cluster.ClusterType.CloudManaged); + cluster.setManagedState(Managed.ManagedState.Managed); + cluster = clusterDao.persist(cluster); + clusterId = cluster.getId(); + + imageStore = new ImageStoreVO(); + imageStore.setName(UUID.randomUUID().toString()); + imageStore.setDataCenterId(dcId); + imageStore.setProviderName(DataStoreProvider.NFS_IMAGE); + imageStore.setRole(DataStoreRole.Image); + imageStore.setUrl(UUID.randomUUID().toString()); + imageStore.setUuid(UUID.randomUUID().toString()); + imageStore.setProtocol("nfs"); + imageStore = imageStoreDao.persist(imageStore); + + when(primaryDataStoreProvider.configure(Mockito.anyMap())).thenReturn(true); Set types = new HashSet(); types.add(DataStoreProvider.DataStoreProviderType.PRIMARY); - Mockito.when(primaryDataStoreProvider.getTypes()).thenReturn(types); - Mockito.when(primaryDataStoreProvider.getName()).thenReturn(DataStoreProvider.DEFAULT_PRIMARY); - Mockito.when(primaryDataStoreProvider.getDataStoreDriver()).thenReturn(driver); + when(primaryDataStoreProvider.getTypes()).thenReturn(types); + when(primaryDataStoreProvider.getName()).thenReturn(DataStoreProvider.DEFAULT_PRIMARY); + when(primaryDataStoreProvider.getDataStoreDriver()).thenReturn(driver); + User user = mock(User.class); + when(user.getId()).thenReturn(1L); + Account account = mock(Account.class); + when(account.getId()).thenReturn(1L); + when(accountManager.getSystemAccount()).thenReturn(account); + when(accountManager.getSystemUser()).thenReturn(user); + if(Merovingian2.getLockMaster() == null) { + _lockMaster = Merovingian2.createLockMaster(1234); + } else { + _lockMaster = Merovingian2.getLockMaster(); + } + _lockMaster.cleanupThisServer(); ComponentContext.initComponentsLifeCycle(); } + + @After + public void tearDown() throws Exception { + _lockMaster.cleanupThisServer(); + } private SnapshotVO createSnapshotInDb() { - Snapshot.Type snapshotType = Snapshot.Type.MANUAL; - SnapshotVO snapshotVO = new SnapshotVO(1, 2, 1, 1L, 1L, UUID.randomUUID() + Snapshot.Type snapshotType = Snapshot.Type.RECURRING; + SnapshotVO snapshotVO = new SnapshotVO(dcId, 2, 1, 1L, 1L, UUID.randomUUID() + .toString(), (short) snapshotType.ordinal(), snapshotType.name(), 100, + Hypervisor.HypervisorType.XenServer); + return this.snapshotDao.persist(snapshotVO); + } + + private SnapshotVO createSnapshotInDb(Long volumeId) { + Snapshot.Type snapshotType = Snapshot.Type.DAILY; + SnapshotVO snapshotVO = new SnapshotVO(dcId, 2, 1, volumeId, 1L, UUID.randomUUID() .toString(), (short) snapshotType.ordinal(), snapshotType.name(), 100, Hypervisor.HypervisorType.XenServer); return this.snapshotDao.persist(snapshotVO); } private VolumeInfo createVolume(Long templateId, DataStore store) { - VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), 1L, 1L, 1L, 1L, 1000, 0L, 0L, ""); + VolumeVO volume = new VolumeVO(Volume.Type.DATADISK, UUID.randomUUID().toString(), dcId, 1L, 1L, 1L, 1000, 0L, 0L, ""); ; volume.setPoolId(store.getId()); @@ -120,8 +230,8 @@ public class SnapshotTestWithFakeData { } private DataStore createDataStore() throws URISyntaxException { StoragePoolVO pool = new StoragePoolVO(); - pool.setClusterId(1L); - pool.setDataCenterId(1); + pool.setClusterId(clusterId); + pool.setDataCenterId(dcId); URI uri = new URI("nfs://jfkdkf/fjdkfj"); pool.setHostAddress(uri.getHost()); pool.setPath(uri.getPath()); @@ -130,14 +240,14 @@ public class SnapshotTestWithFakeData { pool.setUuid(UUID.randomUUID().toString()); pool.setStatus(StoragePoolStatus.Up); pool.setPoolType(Storage.StoragePoolType.NetworkFilesystem); - pool.setPodId(1L); + pool.setPodId(podId); pool.setScope(ScopeType.CLUSTER); pool.setStorageProviderName(DataStoreProvider.DEFAULT_PRIMARY); pool = this.primaryDataStoreDao.persist(pool); DataStore store = this.dataStoreManager.getPrimaryDataStore(pool.getId()); return store; } - @Test + //@Test public void testTakeSnapshot() throws URISyntaxException { SnapshotVO snapshotVO = createSnapshotInDb(); DataStore store = createDataStore(); @@ -158,7 +268,7 @@ public class SnapshotTestWithFakeData { } } - @Test + //@Test public void testTakeSnapshotWithFailed() throws URISyntaxException { SnapshotVO snapshotVO = createSnapshotInDb(); DataStore store = null; @@ -179,7 +289,7 @@ public class SnapshotTestWithFakeData { } } - @Test + //@Test public void testTakeSnapshotFromVolume() throws URISyntaxException { DataStore store = createDataStore(); FakePrimaryDataStoreDriver dataStoreDriver = (FakePrimaryDataStoreDriver)store.getDriver(); @@ -191,4 +301,57 @@ public class SnapshotTestWithFakeData { Assert.assertTrue(result == null); } + protected SnapshotPolicyVO createSnapshotPolicy(Long volId) { + SnapshotPolicyVO policyVO = new SnapshotPolicyVO(volId, "jfkd", "fdfd", DateUtil.IntervalType.DAILY, 8); + policyVO = snapshotPolicyDao.persist(policyVO); + return policyVO; + } + + @Test + public void testConcurrentSnapshot() throws URISyntaxException, InterruptedException, ExecutionException { + DataStore store = createDataStore(); + final FakePrimaryDataStoreDriver dataStoreDriver = (FakePrimaryDataStoreDriver)store.getDriver(); + dataStoreDriver.makeTakeSnapshotSucceed(true); + final VolumeInfo volumeInfo = createVolume(1L, store); + Assert.assertTrue(volumeInfo.getState() == Volume.State.Ready); + vol = volumeInfo; + // final SnapshotPolicyVO policyVO = createSnapshotPolicy(vol.getId()); + + + ExecutorService pool = Executors.newFixedThreadPool(2); + boolean result = false; + List> future = new ArrayList>(); + for(int i = 0; i < 12; i++) { + final int cnt = i; + Future task = pool.submit(new Callable() { + @Override + public Boolean call() throws Exception { + boolean r = true; + try { + SnapshotVO snapshotVO = createSnapshotInDb(vol.getId()); + VolumeObject volumeObject = (VolumeObject)vol; + Account account = mock(Account.class); + when(account.getId()).thenReturn(1L); + CreateSnapshotPayload createSnapshotPayload = mock(CreateSnapshotPayload.class); + when(createSnapshotPayload.getAccount()).thenReturn(account); + when(createSnapshotPayload.getSnapshotId()).thenReturn(snapshotVO.getId()); + when(createSnapshotPayload.getSnapshotPolicyId()).thenReturn(0L); + volumeObject.addPayload(createSnapshotPayload); + if (cnt > 8) { + mockStorageMotionStrategy.makeBackupSnapshotSucceed(false); + } + SnapshotInfo newSnapshot = volumeService.takeSnapshot(vol); + if (newSnapshot == null) { + r = false; + } + } catch (Exception e) { + r = false; + } + return r; + } + }); + Assert.assertTrue(task.get()); + } + + } } diff --git a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java index 515c5c80532..44ead367f64 100644 --- a/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java +++ b/engine/storage/integration-test/test/org/apache/cloudstack/storage/test/TestNGAop.java @@ -26,7 +26,7 @@ import org.testng.ITestNGMethod; import org.testng.internal.ConstructorOrMethod; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class TestNGAop implements IMethodInterceptor { @@ -39,7 +39,7 @@ public class TestNGAop implements IMethodInterceptor { if (m != null) { DB db = m.getAnnotation(DB.class); if (db != null) { - Transaction txn = Transaction.open(m.getName()); + TransactionLegacy txn = TransactionLegacy.open(m.getName()); } } } diff --git a/engine/storage/integration-test/test/resource/fakeDriverTestContext.xml b/engine/storage/integration-test/test/resource/fakeDriverTestContext.xml index 3abcf08090b..b7ef363ff04 100644 --- a/engine/storage/integration-test/test/resource/fakeDriverTestContext.xml +++ b/engine/storage/integration-test/test/resource/fakeDriverTestContext.xml @@ -48,7 +48,6 @@ - @@ -84,4 +83,9 @@ + + + + + diff --git a/engine/storage/integration-test/test/resources/storageContext.xml b/engine/storage/integration-test/test/resources/storageContext.xml index 664f1e3a290..0dcd6a83407 100644 --- a/engine/storage/integration-test/test/resources/storageContext.xml +++ b/engine/storage/integration-test/test/resources/storageContext.xml @@ -85,4 +85,10 @@ +<<<<<<< HEAD + +======= + + +>>>>>>> pluggable_vm_snapshot diff --git a/engine/storage/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml b/engine/storage/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml new file mode 100644 index 00000000000..76ec23e37d1 --- /dev/null +++ b/engine/storage/resources/META-INF/cloudstack/core/spring-engine-storage-core-context.xml @@ -0,0 +1,70 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/resources/META-INF/cloudstack/storage-allocator/module.properties b/engine/storage/resources/META-INF/cloudstack/storage-allocator/module.properties new file mode 100644 index 00000000000..6c96e91a22d --- /dev/null +++ b/engine/storage/resources/META-INF/cloudstack/storage-allocator/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-allocator +parent=storage \ No newline at end of file diff --git a/engine/storage/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml b/engine/storage/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml new file mode 100644 index 00000000000..e4e02aa210e --- /dev/null +++ b/engine/storage/resources/META-INF/cloudstack/storage-allocator/spring-engine-storage-storage-allocator-context.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + + diff --git a/engine/storage/snapshot/pom.xml b/engine/storage/snapshot/pom.xml index 8a847040cd8..808d0c207a6 100644 --- a/engine/storage/snapshot/pom.xml +++ b/engine/storage/snapshot/pom.xml @@ -30,5 +30,37 @@ cloud-engine-api ${project.version} + + org.apache.cloudstack + cloud-api + ${project.version} + test-jar + test + + + + + maven-compiler-plugin + + + + testCompile + + + + + + maven-surefire-plugin + + + integration-test + + test + + + + + +
diff --git a/engine/storage/snapshot/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml b/engine/storage/snapshot/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml new file mode 100644 index 00000000000..308899280dc --- /dev/null +++ b/engine/storage/snapshot/resources/META-INF/cloudstack/core/spring-engine-storage-snapshot-core-context.xml @@ -0,0 +1,41 @@ + + + + + + + + + + \ No newline at end of file diff --git a/engine/storage/snapshot/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml b/engine/storage/snapshot/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml new file mode 100644 index 00000000000..d25aeea0250 --- /dev/null +++ b/engine/storage/snapshot/resources/META-INF/cloudstack/storage/spring-engine-storage-snapshot-storage-context.xml @@ -0,0 +1,38 @@ + + + + + + + + + diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java index aafdad05ff0..6205fe40deb 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotDataFactoryImpl.java @@ -18,8 +18,13 @@ */ package org.apache.cloudstack.storage.snapshot; +import java.util.ArrayList; +import java.util.List; + import javax.inject.Inject; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -28,7 +33,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.springframework.stereotype.Component; import com.cloud.storage.DataStoreRole; import com.cloud.storage.SnapshotVO; @@ -70,8 +74,22 @@ public class SnapshotDataFactoryImpl implements SnapshotDataFactory { if (snapshotStore == null) { return null; } - DataStore store = this.storeMgr.getDataStore(snapshotStore.getDataStoreId(), role); + DataStore store = storeMgr.getDataStore(snapshotStore.getDataStoreId(), role); SnapshotObject so = SnapshotObject.getSnapshotObject(snapshot, store); return so; } + + @Override + public List listSnapshotOnCache(long snapshotId) { + List cacheSnapshots = snapshotStoreDao.listOnCache(snapshotId); + List snapObjs = new ArrayList(); + for (SnapshotDataStoreVO cacheSnap : cacheSnapshots) { + long storeId = cacheSnap.getDataStoreId(); + DataStore store = storeMgr.getDataStore(storeId, DataStoreRole.ImageCache); + SnapshotInfo tmplObj = getSnapshot(snapshotId, store); + snapObjs.add(tmplObj); + } + return snapObjs; + } + } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index e69881c6006..daf6477be02 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -19,10 +19,21 @@ package org.apache.cloudstack.storage.snapshot; import java.util.Date; +import java.util.List; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; @@ -42,9 +53,8 @@ import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -66,6 +76,8 @@ public class SnapshotObject implements SnapshotInfo { ObjectInDataStoreManager objectInStoreMgr; @Inject SnapshotDataStoreDao snapshotStoreDao; + @Inject + StorageStrategyFactory storageStrategyFactory; public SnapshotObject() { @@ -83,19 +95,19 @@ public class SnapshotObject implements SnapshotInfo { } public DataStore getStore() { - return this.store; + return store; } @Override public SnapshotInfo getParent() { - SnapshotDataStoreVO snapStoreVO = this.snapshotStoreDao.findByStoreSnapshot(this.store.getRole(), - this.store.getId(), this.snapshot.getId()); + SnapshotDataStoreVO snapStoreVO = snapshotStoreDao.findByStoreSnapshot(store.getRole(), + store.getId(), snapshot.getId()); Long parentId = null; if (snapStoreVO != null) { parentId = snapStoreVO.getParentSnapshotId(); if (parentId != null && parentId != 0) { - return this.snapshotFactory.getSnapshot(parentId, store); + return snapshotFactory.getSnapshot(parentId, store); } } @@ -104,42 +116,51 @@ public class SnapshotObject implements SnapshotInfo { @Override public SnapshotInfo getChild() { - SearchCriteriaService sc = SearchCriteria2 - .create(SnapshotDataStoreVO.class); - sc.addAnd(sc.getEntity().getDataStoreId(), Op.EQ, this.store.getId()); - sc.addAnd(sc.getEntity().getRole(), Op.EQ, this.store.getRole()); - sc.addAnd(sc.getEntity().getState(), Op.NIN, State.Destroying, State.Destroyed, State.Error); - sc.addAnd(sc.getEntity().getParentSnapshotId(), Op.EQ, this.getId()); + QueryBuilder sc = QueryBuilder.create(SnapshotDataStoreVO.class); + sc.and(sc.entity().getDataStoreId(), Op.EQ,store.getId()); + sc.and(sc.entity().getRole(), Op.EQ,store.getRole()); + sc.and(sc.entity().getState(), Op.NIN, State.Destroying, State.Destroyed, State.Error); + sc.and(sc.entity().getParentSnapshotId(), Op.EQ,getId()); SnapshotDataStoreVO vo = sc.find(); if (vo == null) { return null; } - return this.snapshotFactory.getSnapshot(vo.getId(), store); + return snapshotFactory.getSnapshot(vo.getId(), store); + } + + @Override + public boolean isRevertable() { + SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); + if (snapshotStrategy != null) { + return true; + } + + return false; } @Override public VolumeInfo getBaseVolume() { - return volFactory.getVolume(this.snapshot.getVolumeId()); + return volFactory.getVolume(snapshot.getVolumeId()); } @Override public long getId() { - return this.snapshot.getId(); + return snapshot.getId(); } @Override public String getUri() { - return this.snapshot.getUuid(); + return snapshot.getUuid(); } @Override public DataStore getDataStore() { - return this.store; + return store; } @Override public Long getSize() { - return this.snapshot.getSize(); + return snapshot.getSize(); } @Override @@ -149,7 +170,7 @@ public class SnapshotObject implements SnapshotInfo { @Override public String getUuid() { - return this.snapshot.getUuid(); + return snapshot.getUuid(); } @Override @@ -168,17 +189,17 @@ public class SnapshotObject implements SnapshotInfo { @Override public long getAccountId() { - return this.snapshot.getAccountId(); + return snapshot.getAccountId(); } @Override public long getVolumeId() { - return this.snapshot.getVolumeId(); + return snapshot.getVolumeId(); } @Override public String getPath() { - DataObjectInStore objectInStore = this.objectInStoreMgr.findObject(this, getDataStore()); + DataObjectInStore objectInStore = objectInStoreMgr.findObject(this, getDataStore()); if (objectInStore != null) { return objectInStore.getInstallPath(); } @@ -187,60 +208,60 @@ public class SnapshotObject implements SnapshotInfo { @Override public String getName() { - return this.snapshot.getName(); + return snapshot.getName(); } @Override public Date getCreated() { - return this.snapshot.getCreated(); + return snapshot.getCreated(); } @Override public Type getRecurringType() { - return this.snapshot.getRecurringType(); + return snapshot.getRecurringType(); } @Override public State getState() { - return this.snapshot.getState(); + return snapshot.getState(); } @Override public HypervisorType getHypervisorType() { - return this.snapshot.getHypervisorType(); + return snapshot.getHypervisorType(); } @Override public boolean isRecursive() { - return this.snapshot.isRecursive(); + return snapshot.isRecursive(); } @Override public short getsnapshotType() { - return this.snapshot.getsnapshotType(); + return snapshot.getsnapshotType(); } @Override public long getDomainId() { - return this.snapshot.getDomainId(); + return snapshot.getDomainId(); } @Override public Long getDataCenterId() { - return this.snapshot.getDataCenterId(); + return snapshot.getDataCenterId(); } public void processEvent(Snapshot.Event event) throws NoTransitionException { - stateMachineMgr.processEvent(this.snapshot, event); + stateMachineMgr.processEvent(snapshot, event); } public SnapshotVO getSnapshotVO() { - return this.snapshot; + return snapshot; } @Override public DataTO getTO() { - DataTO to = this.store.getDriver().getTO(this); + DataTO to = store.getDriver().getTO(this); if (to == null) { return new SnapshotObjectTO(this); } @@ -250,31 +271,31 @@ public class SnapshotObject implements SnapshotInfo { @Override public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answer) { try { - SnapshotDataStoreVO snapshotStore = this.snapshotStoreDao.findByStoreSnapshot( - this.getDataStore().getRole(), this.getDataStore().getId(), this.getId()); + SnapshotDataStoreVO snapshotStore = snapshotStoreDao.findByStoreSnapshot( + getDataStore().getRole(), getDataStore().getId(), getId()); if (answer instanceof CreateObjectAnswer) { SnapshotObjectTO snapshotTO = (SnapshotObjectTO) ((CreateObjectAnswer) answer).getData(); snapshotStore.setInstallPath(snapshotTO.getPath()); - this.snapshotStoreDao.update(snapshotStore.getId(), snapshotStore); + snapshotStoreDao.update(snapshotStore.getId(), snapshotStore); } else if (answer instanceof CopyCmdAnswer) { SnapshotObjectTO snapshotTO = (SnapshotObjectTO) ((CopyCmdAnswer) answer).getNewData(); snapshotStore.setInstallPath(snapshotTO.getPath()); if (snapshotTO.getParentSnapshotPath() == null) { snapshotStore.setParentSnapshotId(0L); } - this.snapshotStoreDao.update(snapshotStore.getId(), snapshotStore); - + snapshotStoreDao.update(snapshotStore.getId(), snapshotStore); + // update side-effect of snapshot operation - if(snapshotTO.getVolume().getPath() != null) { - VolumeVO vol = this.volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); - if(vol != null) { - s_logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " - + vol.getPath() + "->" + snapshotTO.getVolume().getPath()); - vol.setPath(snapshotTO.getVolume().getPath()); - this.volumeDao.update(vol.getId(), vol); - } else { - s_logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); - } + if(snapshotTO.getVolume() != null && snapshotTO.getVolume().getPath() != null) { + VolumeVO vol = volumeDao.findByUuid(snapshotTO.getVolume().getUuid()); + if(vol != null) { + s_logger.info("Update volume path change due to snapshot operation, volume " + vol.getId() + " path: " + + vol.getPath() + "->" + snapshotTO.getVolume().getPath()); + vol.setPath(snapshotTO.getVolume().getPath()); + volumeDao.update(vol.getId(), vol); + } else { + s_logger.error("Cound't find the original volume with uuid: " + snapshotTO.getVolume().getUuid()); + } } } else { throw new CloudRuntimeException("Unknown answer: " + answer.getClass()); @@ -290,13 +311,13 @@ public class SnapshotObject implements SnapshotInfo { @Override public void incRefCount() { - if (this.store == null) { + if (store == null) { return; } - if (this.store.getRole() == DataStoreRole.Image || this.store.getRole() == DataStoreRole.ImageCache) { + if (store.getRole() == DataStoreRole.Image || store.getRole() == DataStoreRole.ImageCache) { SnapshotDataStoreVO store = snapshotStoreDao.findByStoreSnapshot(this.store.getRole(), this.store.getId(), - this.getId()); + getId()); store.incrRefCnt(); store.setLastUpdated(new Date()); snapshotStoreDao.update(store.getId(), store); @@ -305,12 +326,12 @@ public class SnapshotObject implements SnapshotInfo { @Override public void decRefCount() { - if (this.store == null) { + if (store == null) { return; } - if (this.store.getRole() == DataStoreRole.Image || this.store.getRole() == DataStoreRole.ImageCache) { + if (store.getRole() == DataStoreRole.Image || store.getRole() == DataStoreRole.ImageCache) { SnapshotDataStoreVO store = snapshotStoreDao.findByStoreSnapshot(this.store.getRole(), this.store.getId(), - this.getId()); + getId()); store.decrRefCnt(); store.setLastUpdated(new Date()); snapshotStoreDao.update(store.getId(), store); @@ -319,12 +340,12 @@ public class SnapshotObject implements SnapshotInfo { @Override public Long getRefCount() { - if (this.store == null) { + if (store == null) { return null; } - if (this.store.getRole() == DataStoreRole.Image || this.store.getRole() == DataStoreRole.ImageCache) { + if (store.getRole() == DataStoreRole.Image || store.getRole() == DataStoreRole.ImageCache) { SnapshotDataStoreVO store = snapshotStoreDao.findByStoreSnapshot(this.store.getRole(), this.store.getId(), - this.getId()); + getId()); return store.getRefCnt(); } return null; @@ -332,7 +353,7 @@ public class SnapshotObject implements SnapshotInfo { @Override public ObjectInDataStoreStateMachine.State getStatus() { - return this.objectInStoreMgr.findObject(this, store).getObjectInStoreState(); + return objectInStoreMgr.findObject(this, store).getObjectInStoreState(); } @Override diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java index 3ead93f9c5f..0799721312d 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotServiceImpl.java @@ -17,70 +17,49 @@ package org.apache.cloudstack.storage.snapshot; -import com.cloud.dc.dao.ClusterDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; - -import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import javax.inject.Inject; - import java.util.concurrent.ExecutionException; @Component public class SnapshotServiceImpl implements SnapshotService { private static final Logger s_logger = Logger.getLogger(SnapshotServiceImpl.class); @Inject - protected VolumeDao _volsDao; - @Inject - protected UserVmDao _vmDao; - @Inject - protected PrimaryDataStoreDao _storagePoolDao; - @Inject - protected ClusterDao _clusterDao; - @Inject - protected SnapshotDao _snapshotDao; - @Inject protected SnapshotDataStoreDao _snapshotStoreDao; - - @Inject - protected SnapshotManager snapshotMgr; - @Inject - protected VolumeOrchestrationService volumeMgr; - @Inject - protected SnapshotStateMachineManager stateMachineManager; @Inject SnapshotDataFactory snapshotfactory; @Inject DataStoreManager dataStoreMgr; @Inject DataMotionService motionSrv; - @Inject - ObjectInDataStoreManager objInStoreMgr; - @Inject - VMSnapshotDao _vmSnapshotDao; static private class CreateSnapshotContext extends AsyncRpcContext { final SnapshotInfo snapshot; @@ -122,6 +101,19 @@ public class SnapshotServiceImpl implements SnapshotService { } + static private class RevertSnapshotContext extends AsyncRpcContext { + final SnapshotInfo snapshot; + final AsyncCallFuture future; + + public RevertSnapshotContext(AsyncCompletionCallback callback, SnapshotInfo snapshot, + AsyncCallFuture future) { + super(callback); + this.snapshot = snapshot; + this.future = future; + } + + } + protected Void createSnapshotAsyncCallback(AsyncCallbackDispatcher callback, CreateSnapshotContext context) { CreateCmdResult result = callback.getResult(); @@ -354,6 +346,28 @@ public class SnapshotServiceImpl implements SnapshotService { return null; } + protected Void revertSnapshotCallback(AsyncCallbackDispatcher callback, + RevertSnapshotContext context) { + + CommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + SnapshotResult res = null; + try { + if (result.isFailed()) { + s_logger.debug("revert snapshot failed" + result.getResult()); + res = new SnapshotResult(context.snapshot, null); + res.setResult(result.getResult()); + } else { + res = new SnapshotResult(context.snapshot, null); + } + } catch (Exception e) { + s_logger.debug("Failed to in revertSnapshotCallback", e); + res.setResult(e.toString()); + } + future.complete(res); + return null; + } + @Override public boolean deleteSnapshot(SnapshotInfo snapInfo) { snapInfo.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested); @@ -383,7 +397,30 @@ public class SnapshotServiceImpl implements SnapshotService { } @Override - public boolean revertSnapshot(SnapshotInfo snapshot) { + public boolean revertSnapshot(Long snapshotId) { + SnapshotInfo snapshot = snapshotfactory.getSnapshot(snapshotId, DataStoreRole.Primary); + PrimaryDataStore store = (PrimaryDataStore)snapshot.getDataStore(); + + AsyncCallFuture future = new AsyncCallFuture(); + RevertSnapshotContext context = new RevertSnapshotContext(null, snapshot, future); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().revertSnapshotCallback(null, null)).setContext(context); + + ((PrimaryDataStoreDriver)store.getDriver()).revertSnapshot(snapshot, caller); + + SnapshotResult result = null; + try { + result = future.get(); + if (result.isFailed()) { + throw new CloudRuntimeException(result.getResult()); + } + return true; + } catch (InterruptedException e) { + s_logger.debug("revert snapshot is failed: " + e.toString()); + } catch (ExecutionException e) { + s_logger.debug("revert snapshot is failed: " + e.toString()); + } + return false; } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java index 1b579227f84..6db8343214b 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/SnapshotStrategyBase.java @@ -11,7 +11,7 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.storage.snapshot; @@ -35,4 +35,9 @@ public abstract class SnapshotStrategyBase implements SnapshotStrategy { public SnapshotInfo backupSnapshot(SnapshotInfo snapshot) { return snapshotSvr.backupSnapshot(snapshot); } + + @Override + public boolean revertSnapshot(Long snapshotId) { + return snapshotSvr.revertSnapshot(snapshotId); + } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java index 5653ab4d16c..7eec5ffbb23 100644 --- a/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/snapshot/XenserverSnapshotStrategy.java @@ -11,32 +11,44 @@ // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.apache.cloudstack.storage.snapshot; + +import java.util.List; + import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import com.cloud.exception.InvalidParameterValueException; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Volume; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.snapshot.SnapshotManager; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.DB; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -44,8 +56,6 @@ import com.cloud.utils.fsm.NoTransitionException; public class XenserverSnapshotStrategy extends SnapshotStrategyBase { private static final Logger s_logger = Logger.getLogger(XenserverSnapshotStrategy.class); - @Inject - SnapshotManager snapshotMgr; @Inject SnapshotService snapshotSvr; @Inject @@ -66,7 +76,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { if (parentSnapshot != null && snapshot.getPath().equalsIgnoreCase(parentSnapshot.getPath())) { s_logger.debug("backup an empty snapshot"); // don't need to backup this snapshot - SnapshotDataStoreVO parentSnapshotOnBackupStore = this.snapshotStoreDao.findBySnapshot( + SnapshotDataStoreVO parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot( parentSnapshot.getId(), DataStoreRole.Image); if (parentSnapshotOnBackupStore != null && parentSnapshotOnBackupStore.getState() == State.Ready) { DataStore store = dataStoreMgr.getDataStore(parentSnapshotOnBackupStore.getDataStoreId(), @@ -88,7 +98,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { s_logger.debug("Failed to change state: " + snapshot.getId() + ": " + e.toString()); throw new CloudRuntimeException(e.toString()); } - return this.snapshotDataFactory.getSnapshot(snapObj.getId(), store); + return snapshotDataFactory.getSnapshot(snapObj.getId(), store); } else { s_logger.debug("parent snapshot hasn't been backed up yet"); } @@ -106,7 +116,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { int i; SnapshotDataStoreVO parentSnapshotOnBackupStore = null; for (i = 1; i < deltaSnap; i++) { - parentSnapshotOnBackupStore = this.snapshotStoreDao.findBySnapshot(parentSnapshot.getId(), + parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(parentSnapshot.getId(), DataStoreRole.Image); if (parentSnapshotOnBackupStore == null) { break; @@ -117,7 +127,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { break; } - parentSnapshotOnBackupStore = this.snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image); + parentSnapshotOnBackupStore = snapshotStoreDao.findBySnapshot(prevBackupId, DataStoreRole.Image); } if (i >= deltaSnap) { fullBackup = true; @@ -125,7 +135,7 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { } snapshot.addPayload(fullBackup); - return this.snapshotSvr.backupSnapshot(snapshot); + return snapshotSvr.backupSnapshot(snapshot); } protected boolean deleteSnapshotChain(SnapshotInfo snapshot) { @@ -159,7 +169,15 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { } } if (!deleted) { - boolean r = this.snapshotSvr.deleteSnapshot(snapshot); + boolean r = snapshotSvr.deleteSnapshot(snapshot); + if (r) { + // delete snapshot in cache if there is + List cacheSnaps = snapshotDataFactory.listSnapshotOnCache(snapshot.getId()); + for (SnapshotInfo cacheSnap : cacheSnaps) { + s_logger.debug("Delete snapshot " + snapshot.getId() + " from image cache store: " + cacheSnap.getDataStore().getName()); + cacheSnap.delete(); + } + } if (!resultIsSet) { result = r; resultIsSet = true; @@ -189,13 +207,13 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { if (!Snapshot.State.BackedUp.equals(snapshotVO.getState())) { throw new InvalidParameterValueException("Can't delete snapshotshot " + snapshotId - + " due to it is not in BackedUp Status"); + + " due to it is in " + snapshotVO.getState() + " Status"); } // first mark the snapshot as destroyed, so that ui can't see it, but we // may not destroy the snapshot on the storage, as other snapshots may // depend on it. - SnapshotInfo snapshotOnImage = this.snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image); + SnapshotInfo snapshotOnImage = snapshotDataFactory.getSnapshot(snapshotId, DataStoreRole.Image); if (snapshotOnImage == null) { s_logger.debug("Can't find snapshot on backup storage, delete it in db"); snapshotDao.remove(snapshotId); @@ -235,44 +253,78 @@ public class XenserverSnapshotStrategy extends SnapshotStrategyBase { } @Override - public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { - SnapshotResult result = snapshotSvr.takeSnapshot(snapshot); - if (result.isFailed()) { - s_logger.debug("Failed to take snapshot: " + result.getResult()); - throw new CloudRuntimeException(result.getResult()); - } - snapshot = result.getSnashot(); - DataStore primaryStore = snapshot.getDataStore(); - - SnapshotInfo backupedSnapshot = this.backupSnapshot(snapshot); - try { - SnapshotInfo parent = snapshot.getParent(); - if (backupedSnapshot != null && parent != null) { - Long parentSnapshotId = parent.getId(); - while (parentSnapshotId != null && parentSnapshotId != 0L) { - SnapshotDataStoreVO snapshotDataStoreVO = snapshotStoreDao.findByStoreSnapshot(primaryStore.getRole(),primaryStore.getId(), parentSnapshotId); - if (snapshotDataStoreVO != null) { - parentSnapshotId = snapshotDataStoreVO.getParentSnapshotId(); - snapshotStoreDao.remove(snapshotDataStoreVO.getId()); - } else { - parentSnapshotId = null; - } - } - SnapshotDataStoreVO snapshotDataStoreVO = snapshotStoreDao.findByStoreSnapshot(primaryStore.getRole(), primaryStore.getId(), - snapshot.getId()); - if (snapshotDataStoreVO != null) { - snapshotDataStoreVO.setParentSnapshotId(0L); - snapshotStoreDao.update(snapshotDataStoreVO.getId(), snapshotDataStoreVO); - } - } - } catch (Exception e) { - s_logger.debug("Failed to clean up snapshots on primary storage", e); - } - return backupedSnapshot; + public boolean revertSnapshot(Long snapshotId) { + throw new CloudRuntimeException("revert Snapshot is not supported"); } @Override - public boolean canHandle(Snapshot snapshot) { - return true; + @DB + public SnapshotInfo takeSnapshot(SnapshotInfo snapshot) { + SnapshotVO snapshotVO = snapshotDao.acquireInLockTable(snapshot.getId()); + if (snapshotVO == null) { + throw new CloudRuntimeException("Failed to get lock on snapshot:" + snapshot.getId()); + } + + try { + VolumeInfo volumeInfo = snapshot.getBaseVolume(); + volumeInfo.stateTransit(Volume.Event.SnapshotRequested); + SnapshotResult result = null; + try { + result = snapshotSvr.takeSnapshot(snapshot); + if (result.isFailed()) { + s_logger.debug("Failed to take snapshot: " + result.getResult()); + throw new CloudRuntimeException(result.getResult()); + } + } finally { + if (result != null && result.isSuccess()) { + volumeInfo.stateTransit(Volume.Event.OperationSucceeded); + } else { + volumeInfo.stateTransit(Volume.Event.OperationFailed); + } + } + + snapshot = result.getSnashot(); + DataStore primaryStore = snapshot.getDataStore(); + + SnapshotInfo backupedSnapshot = backupSnapshot(snapshot); + + try { + SnapshotInfo parent = snapshot.getParent(); + if (backupedSnapshot != null && parent != null) { + Long parentSnapshotId = parent.getId(); + while (parentSnapshotId != null && parentSnapshotId != 0L) { + SnapshotDataStoreVO snapshotDataStoreVO = snapshotStoreDao.findByStoreSnapshot(primaryStore.getRole(),primaryStore.getId(), parentSnapshotId); + if (snapshotDataStoreVO != null) { + parentSnapshotId = snapshotDataStoreVO.getParentSnapshotId(); + snapshotStoreDao.remove(snapshotDataStoreVO.getId()); + } else { + parentSnapshotId = null; + } + } + SnapshotDataStoreVO snapshotDataStoreVO = snapshotStoreDao.findByStoreSnapshot(primaryStore.getRole(), primaryStore.getId(), + snapshot.getId()); + if (snapshotDataStoreVO != null) { + snapshotDataStoreVO.setParentSnapshotId(0L); + snapshotStoreDao.update(snapshotDataStoreVO.getId(), snapshotDataStoreVO); + } + } + } catch (Exception e) { + s_logger.debug("Failed to clean up snapshots on primary storage", e); + } + return backupedSnapshot; + } finally { + if (snapshotVO != null) { + snapshotDao.releaseFromLockTable(snapshot.getId()); + } + } + } + + @Override + public StrategyPriority canHandle(Snapshot snapshot, SnapshotOperation op) { + if (op == SnapshotOperation.REVERT) { + return StrategyPriority.CANT_HANDLE; + } + + return StrategyPriority.DEFAULT; } } diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java new file mode 100644 index 00000000000..be3cce94da9 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/DefaultVMSnapshotStrategy.java @@ -0,0 +1,371 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.CreateVMSnapshotAnswer; +import com.cloud.agent.api.CreateVMSnapshotCommand; +import com.cloud.agent.api.DeleteVMSnapshotAnswer; +import com.cloud.agent.api.DeleteVMSnapshotCommand; +import com.cloud.agent.api.RevertToVMSnapshotAnswer; +import com.cloud.agent.api.RevertToVMSnapshotCommand; +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.event.EventTypes; +import com.cloud.event.UsageEventUtils; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.uservm.UserVm; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.DB; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; + +public class DefaultVMSnapshotStrategy extends ManagerBase implements VMSnapshotStrategy { + private static final Logger s_logger = Logger.getLogger(DefaultVMSnapshotStrategy.class); + @Inject + VMSnapshotHelper vmSnapshotHelper; + @Inject + GuestOSDao guestOSDao; + @Inject + UserVmDao userVmDao; + @Inject + VMSnapshotDao vmSnapshotDao; + int _wait; + @Inject + ConfigurationDao configurationDao; + @Inject + AgentManager agentMgr; + @Inject + VolumeDao volumeDao; + @Inject + DiskOfferingDao diskOfferingDao; + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + String value = configurationDao.getValue("vmsnapshot.create.wait"); + _wait = NumbersUtil.parseInt(value, 1800); + return true; + } + + public VMSnapshot takeVMSnapshot(VMSnapshot vmSnapshot) { + Long hostId = vmSnapshotHelper.pickRunningHost(vmSnapshot.getVmId()); + UserVm userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.CreateRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + CreateVMSnapshotAnswer answer = null; + boolean result = false; + try { + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + + VMSnapshotTO current = null; + VMSnapshotVO currentSnapshot = vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId()); + if (currentSnapshot != null) + current = vmSnapshotHelper.getSnapshotWithParents(currentSnapshot); + VMSnapshotTO target = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(), vmSnapshot.getType(), null, vmSnapshot.getDescription(), false, + current); + if (current == null) + vmSnapshotVO.setParent(null); + else + vmSnapshotVO.setParent(current.getId()); + + CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(userVm.getInstanceName(),target ,volumeTOs, guestOS.getDisplayName(),userVm.getState()); + ccmd.setWait(_wait); + + answer = (CreateVMSnapshotAnswer)agentMgr.send(hostId, ccmd); + if (answer != null && answer.getResult()) { + processAnswer(vmSnapshotVO, userVm, answer, hostId); + s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); + result = true; + + for (VolumeObjectTO volumeTo : answer.getVolumeTOs()){ + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE,vmSnapshot,userVm,volumeTo); + } + return vmSnapshot; + } else { + String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed"; + if(answer != null && answer.getDetails() != null) + errMsg = errMsg + " due to " + answer.getDetails(); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } catch (OperationTimedoutException e) { + s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + } catch (AgentUnavailableException e) { + s_logger.debug("Creating VM snapshot: " + vmSnapshot.getName() + " failed", e); + throw new CloudRuntimeException("Creating VM snapshot: " + vmSnapshot.getName() + " failed: " + e.toString()); + } finally{ + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + } + + @Override + public boolean deleteVMSnapshot(VMSnapshot vmSnapshot) { + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot,VMSnapshot.Event.ExpungeRequested); + } catch (NoTransitionException e) { + s_logger.debug("Failed to change vm snapshot state with event ExpungeRequested"); + throw new CloudRuntimeException("Failed to change vm snapshot state with event ExpungeRequested: " + e.getMessage()); + } + + try { + Long hostId = vmSnapshotHelper.pickRunningHost(vmSnapshot.getVmId()); + + List volumeTOs = vmSnapshotHelper.getVolumeTOList(vmSnapshot.getVmId()); + + String vmInstanceName = userVm.getInstanceName(); + VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(vmSnapshotVO).getParent(); + VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(), vmSnapshot.getType(), + vmSnapshot.getCreated().getTime(), vmSnapshot.getDescription(), vmSnapshot.getCurrent(), parent); + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + DeleteVMSnapshotCommand deleteSnapshotCommand = new DeleteVMSnapshotCommand(vmInstanceName, vmSnapshotTO, volumeTOs,guestOS.getDisplayName()); + + Answer answer = agentMgr.send(hostId, deleteSnapshotCommand); + + if (answer != null && answer.getResult()) { + DeleteVMSnapshotAnswer deleteVMSnapshotAnswer = (DeleteVMSnapshotAnswer)answer; + processAnswer(vmSnapshotVO, userVm, answer, hostId); + for (VolumeObjectTO volumeTo : deleteVMSnapshotAnswer.getVolumeTOs()){ + publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE,vmSnapshot,userVm,volumeTo); + } + return true; + } else { + String errMsg = (answer == null) ? null : answer.getDetails(); + s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); + throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + errMsg); + } + } catch (OperationTimedoutException e) { + throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage()); + } catch (AgentUnavailableException e) { + throw new CloudRuntimeException("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage()); + } + } + + @DB + protected void processAnswer(final VMSnapshotVO vmSnapshot, UserVm userVm, final Answer as, Long hostId) { + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NoTransitionException { + if (as instanceof CreateVMSnapshotAnswer) { + CreateVMSnapshotAnswer answer = (CreateVMSnapshotAnswer) as; + finalizeCreate(vmSnapshot, answer.getVolumeTOs()); + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } else if (as instanceof RevertToVMSnapshotAnswer) { + RevertToVMSnapshotAnswer answer = (RevertToVMSnapshotAnswer) as; + finalizeRevert(vmSnapshot, answer.getVolumeTOs()); + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); + } else if (as instanceof DeleteVMSnapshotAnswer) { + DeleteVMSnapshotAnswer answer = (DeleteVMSnapshotAnswer) as; + finalizeDelete(vmSnapshot, answer.getVolumeTOs()); + vmSnapshotDao.remove(vmSnapshot.getId()); + } + } + }); + } catch (Exception e) { + String errMsg = "Error while process answer: " + as.getClass() + " due to " + e.getMessage(); + s_logger.error(errMsg, e); + throw new CloudRuntimeException(errMsg); + } + } + + protected void finalizeDelete(VMSnapshotVO vmSnapshot, List VolumeTOs) { + // update volumes path + updateVolumePath(VolumeTOs); + + // update children's parent snapshots + List children= vmSnapshotDao.listByParent(vmSnapshot.getId()); + for (VMSnapshotVO child : children) { + child.setParent(vmSnapshot.getParent()); + vmSnapshotDao.persist(child); + } + + // update current snapshot + VMSnapshotVO current = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if(current != null && current.getId() == vmSnapshot.getId() && vmSnapshot.getParent() != null){ + VMSnapshotVO parent = vmSnapshotDao.findById(vmSnapshot.getParent()); + parent.setCurrent(true); + vmSnapshotDao.persist(parent); + } + vmSnapshot.setCurrent(false); + vmSnapshotDao.persist(vmSnapshot); + } + + protected void finalizeCreate(VMSnapshotVO vmSnapshot, List VolumeTOs) { + // update volumes path + updateVolumePath(VolumeTOs); + + vmSnapshot.setCurrent(true); + + // change current snapshot + if (vmSnapshot.getParent() != null) { + VMSnapshotVO previousCurrent = vmSnapshotDao.findById(vmSnapshot.getParent()); + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshotDao.persist(vmSnapshot); + } + + protected void finalizeRevert(VMSnapshotVO vmSnapshot, List volumeToList) { + // update volumes path + updateVolumePath(volumeToList); + + // update current snapshot, current snapshot is the one reverted to + VMSnapshotVO previousCurrent = vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); + if(previousCurrent != null){ + previousCurrent.setCurrent(false); + vmSnapshotDao.persist(previousCurrent); + } + vmSnapshot.setCurrent(true); + vmSnapshotDao.persist(vmSnapshot); + } + + private void updateVolumePath(List volumeTOs) { + for (VolumeObjectTO volume : volumeTOs) { + if (volume.getPath() != null) { + VolumeVO volumeVO = volumeDao.findById(volume.getId()); + volumeVO.setPath(volume.getPath()); + volumeVO.setVmSnapshotChainSize(volume.getSize()); + volumeDao.persist(volumeVO); + } + } + } + + private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeObjectTO volumeTo){ + VolumeVO volume = volumeDao.findById(volumeTo.getId()); + Long diskOfferingId = volume.getDiskOfferingId(); + Long offeringId = null; + if (diskOfferingId != null) { + DiskOfferingVO offering = diskOfferingDao.findById(diskOfferingId); + if (offering != null + && (offering.getType() == DiskOfferingVO.Type.Disk)) { + offeringId = offering.getId(); + } + } + UsageEventUtils.publishUsageEvent( + type, + vmSnapshot.getAccountId(), + userVm.getDataCenterId(), + userVm.getId(), + vmSnapshot.getName(), + offeringId, + volume.getId(), // save volume's id into templateId field + volumeTo.getSize(), + VMSnapshot.class.getName(), vmSnapshot.getUuid()); + } + + @Override + public boolean revertVMSnapshot(VMSnapshot vmSnapshot) { + VMSnapshotVO vmSnapshotVO = (VMSnapshotVO)vmSnapshot; + UserVmVO userVm = userVmDao.findById(vmSnapshot.getVmId()); + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshotVO, VMSnapshot.Event.RevertRequested); + } catch (NoTransitionException e) { + throw new CloudRuntimeException(e.getMessage()); + } + + boolean result = false; + try { + VMSnapshotVO snapshot = vmSnapshotDao.findById(vmSnapshotVO.getId()); + List volumeTOs = vmSnapshotHelper.getVolumeTOList(userVm.getId()); + String vmInstanceName = userVm.getInstanceName(); + VMSnapshotTO parent = vmSnapshotHelper.getSnapshotWithParents(snapshot).getParent(); + + VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(snapshot.getId(), snapshot.getName(), snapshot.getType(), + snapshot.getCreated().getTime(), snapshot.getDescription(), snapshot.getCurrent(), parent); + Long hostId = vmSnapshotHelper.pickRunningHost(vmSnapshot.getVmId()); + GuestOSVO guestOS = guestOSDao.findById(userVm.getGuestOSId()); + RevertToVMSnapshotCommand revertToSnapshotCommand = new RevertToVMSnapshotCommand(vmInstanceName, vmSnapshotTO, volumeTOs, guestOS.getDisplayName()); + + RevertToVMSnapshotAnswer answer = (RevertToVMSnapshotAnswer) agentMgr.send(hostId, revertToSnapshotCommand); + if (answer != null && answer.getResult()) { + processAnswer(vmSnapshotVO, userVm, answer, hostId); + result = true; + } else { + String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: "+ vmSnapshotVO.getName() + " failed"; + if(answer != null && answer.getDetails() != null) + errMsg = errMsg + " due to " + answer.getDetails(); + s_logger.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + } catch (OperationTimedoutException e) { + s_logger.debug("Failed to revert vm snapshot", e); + throw new CloudRuntimeException(e.getMessage()); + } catch (AgentUnavailableException e) { + s_logger.debug("Failed to revert vm snapshot", e); + throw new CloudRuntimeException(e.getMessage()); + } finally { + if (!result) { + try { + vmSnapshotHelper.vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); + } catch (NoTransitionException e1) { + s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); + } + } + } + return result; + } + + @Override + public StrategyPriority canHandle(VMSnapshot vmSnapshot) { + return StrategyPriority.DEFAULT; + } +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java new file mode 100644 index 00000000000..1437f800c21 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelper.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import java.util.List; + +public interface VMSnapshotHelper { + boolean vmSnapshotStateTransitTo(VMSnapshot vsnp, VMSnapshot.Event event) throws NoTransitionException; + + Long pickRunningHost(Long vmId); + + List getVolumeTOList(Long vmId); + + VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot); +} diff --git a/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelperImpl.java b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelperImpl.java new file mode 100644 index 00000000000..320a59ce207 --- /dev/null +++ b/engine/storage/snapshot/src/org/apache/cloudstack/storage/vmsnapshot/VMSnapshotHelperImpl.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.vmsnapshot; + +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.host.Host; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.StoragePool; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.fsm.NoTransitionException; +import com.cloud.utils.fsm.StateMachine2; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class VMSnapshotHelperImpl implements VMSnapshotHelper { + @Inject + VMSnapshotDao _vmSnapshotDao; + @Inject + UserVmDao userVmDao; + @Inject + HostDao hostDao; + @Inject + VolumeDao volumeDao; + @Inject + PrimaryDataStoreDao primaryDataStoreDao; + @Inject + VolumeDataFactory volumeDataFactory; + + StateMachine2 _vmSnapshottateMachine ; + public VMSnapshotHelperImpl() { + _vmSnapshottateMachine = VMSnapshot.State.getStateMachine(); + } + @Override + public boolean vmSnapshotStateTransitTo(VMSnapshot vsnp, VMSnapshot.Event event) throws NoTransitionException { + return _vmSnapshottateMachine.transitTo(vsnp, event, null, _vmSnapshotDao); + } + + @Override + public Long pickRunningHost(Long vmId) { + UserVmVO vm = userVmDao.findById(vmId); + // use VM's host if VM is running + if(vm.getState() == VirtualMachine.State.Running) + return vm.getHostId(); + + // check if lastHostId is available + if(vm.getLastHostId() != null){ + HostVO lastHost = hostDao.findById(vm.getLastHostId()); + if(lastHost.getStatus() == com.cloud.host.Status.Up && !lastHost.isInMaintenanceStates()) + return lastHost.getId(); + } + + List listVolumes = volumeDao.findByInstance(vmId); + if (listVolumes == null || listVolumes.size() == 0) { + throw new InvalidParameterValueException("vmInstance has no volumes"); + } + VolumeVO volume = listVolumes.get(0); + Long poolId = volume.getPoolId(); + if (poolId == null) { + throw new InvalidParameterValueException("pool id is not found"); + } + StoragePoolVO storagePool = primaryDataStoreDao.findById(poolId); + if (storagePool == null) { + throw new InvalidParameterValueException("storage pool is not found"); + } + List listHost = hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), + storagePool.getDataCenterId(), null); + if (listHost == null || listHost.size() == 0) { + throw new InvalidParameterValueException("no host in up state is found"); + } + return listHost.get(0).getId(); + } + + @Override + public List getVolumeTOList(Long vmId) { + List volumeTOs = new ArrayList(); + List volumeVos = volumeDao.findByInstance(vmId); + VolumeInfo volumeInfo = null; + for (VolumeVO volume : volumeVos) { + volumeInfo = volumeDataFactory.getVolume(volume.getId()); + + volumeTOs.add((VolumeObjectTO)volumeInfo.getTO()); + } + return volumeTOs; + } + + + private VMSnapshotTO convert2VMSnapshotTO(VMSnapshotVO vo) { + return new VMSnapshotTO(vo.getId(), vo.getName(), vo.getType(), vo.getCreated().getTime(), vo.getDescription(), + vo.getCurrent(), null); + } + + @Override + public VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { + Map snapshotMap = new HashMap(); + List allSnapshots = _vmSnapshotDao.findByVm(snapshot.getVmId()); + for (VMSnapshotVO vmSnapshotVO : allSnapshots) { + snapshotMap.put(vmSnapshotVO.getId(), vmSnapshotVO); + } + + VMSnapshotTO currentTO = convert2VMSnapshotTO(snapshot); + VMSnapshotTO result = currentTO; + VMSnapshotVO current = snapshot; + while (current.getParent() != null) { + VMSnapshotVO parent = snapshotMap.get(current.getParent()); + currentTO.setParent(convert2VMSnapshotTO(parent)); + current = snapshotMap.get(current.getParent()); + currentTO = currentTO.getParent(); + } + return result; + } + +} diff --git a/engine/storage/snapshot/test/resources/db.properties b/engine/storage/snapshot/test/resources/db.properties new file mode 100644 index 00000000000..18bf54c2b61 --- /dev/null +++ b/engine/storage/snapshot/test/resources/db.properties @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# management server clustering parameters, change cluster.node.IP to the machine IP address +# in which the management server(Tomcat) is running +cluster.node.IP=127.0.0.1 +cluster.servlet.port=9090 + +# CloudStack database settings +db.cloud.username=cloud +db.cloud.password=cloud +db.root.password= +db.cloud.host=localhost +db.cloud.port=3306 +db.cloud.name=cloud + +# CloudStack database tuning parameters +db.cloud.maxActive=250 +db.cloud.maxIdle=30 +db.cloud.maxWait=10000 +db.cloud.autoReconnect=true +db.cloud.validationQuery=SELECT 1 +db.cloud.testOnBorrow=true +db.cloud.testWhileIdle=true +db.cloud.timeBetweenEvictionRunsMillis=40000 +db.cloud.minEvictableIdleTimeMillis=240000 +db.cloud.poolPreparedStatements=false +db.cloud.url.params=prepStmtCacheSize=517&cachePrepStmts=true&prepStmtCacheSqlLimit=4096 + +# usage database settings +db.usage.username=cloud +db.usage.password=cloud +db.usage.host=localhost +db.usage.port=3306 +db.usage.name=cloud_usage + +# usage database tuning parameters +db.usage.maxActive=100 +db.usage.maxIdle=30 +db.usage.maxWait=10000 +db.usage.autoReconnect=true + +# awsapi database settings +db.awsapi.name=cloudbridge + +# Simulator database settings +db.simulator.username=cloud +db.simulator.password=cloud +db.simulator.host=localhost +db.simulator.port=3306 +db.simulator.name=simulator +db.simulator.maxActive=250 +db.simulator.maxIdle=30 +db.simulator.maxWait=10000 +db.simulator.autoReconnect=true diff --git a/engine/storage/snapshot/test/src/VMSnapshotStrategyTest.java b/engine/storage/snapshot/test/src/VMSnapshotStrategyTest.java new file mode 100644 index 00000000000..8e36fafb5d2 --- /dev/null +++ b/engine/storage/snapshot/test/src/VMSnapshotStrategyTest.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package src; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.cloudstack.storage.vmsnapshot.DefaultVMSnapshotStrategy; +import org.apache.cloudstack.storage.vmsnapshot.VMSnapshotHelper; +import org.apache.cloudstack.test.utils.SpringUtils; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mockito; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.FilterType; +import org.springframework.core.type.classreading.MetadataReader; +import org.springframework.core.type.classreading.MetadataReaderFactory; +import org.springframework.core.type.filter.TypeFilter; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; +import org.springframework.test.context.support.AnnotationConfigContextLoader; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Command; +import com.cloud.agent.api.CreateVMSnapshotAnswer; +import com.cloud.agent.api.DeleteVMSnapshotAnswer; +import com.cloud.agent.api.RevertToVMSnapshotAnswer; +import com.cloud.agent.api.VMSnapshotTO; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.dao.DiskOfferingDao; +import com.cloud.storage.dao.GuestOSDao; +import com.cloud.storage.dao.VolumeDao; +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.NetUtils; +import com.cloud.vm.UserVmVO; +import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.snapshot.VMSnapshot; +import com.cloud.vm.snapshot.VMSnapshotVO; +import com.cloud.vm.snapshot.dao.VMSnapshotDao; + +import junit.framework.TestCase; + +@RunWith(SpringJUnit4ClassRunner.class) +@ContextConfiguration(loader = AnnotationConfigContextLoader.class) +public class VMSnapshotStrategyTest extends TestCase { + @Inject + VMSnapshotStrategy vmSnapshotStrategy; + @Inject + VMSnapshotHelper vmSnapshotHelper; + @Inject UserVmDao userVmDao; + @Inject + GuestOSDao guestOSDao; + @Inject + AgentManager agentMgr; + @Inject + VMSnapshotDao vmSnapshotDao; + @Override + @Before + public void setUp() { + ComponentContext.initComponentsLifeCycle(); + } + + + @Test + public void testCreateVMSnapshot() throws AgentUnavailableException, OperationTimedoutException { + Long hostId = 1L; + Long vmId = 1L; + Long guestOsId = 1L; + List volumeObjectTOs = new ArrayList(); + VMSnapshotVO vmSnapshot = Mockito.mock(VMSnapshotVO.class); + UserVmVO userVmVO = Mockito.mock(UserVmVO.class); + Mockito.when(userVmVO.getGuestOSId()).thenReturn(guestOsId); + Mockito.when(vmSnapshot.getVmId()).thenReturn(vmId); + Mockito.when(vmSnapshotHelper.pickRunningHost(Mockito.anyLong())).thenReturn(hostId); + Mockito.when(vmSnapshotHelper.getVolumeTOList(Mockito.anyLong())).thenReturn(volumeObjectTOs); + Mockito.when(userVmDao.findById(Mockito.anyLong())).thenReturn(userVmVO); + GuestOSVO guestOSVO = Mockito.mock(GuestOSVO.class); + Mockito.when(guestOSDao.findById(Mockito.anyLong())).thenReturn(guestOSVO); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(null); + Exception e = null; + try { + vmSnapshotStrategy.takeVMSnapshot(vmSnapshot); + } catch (CloudRuntimeException e1) { + e = e1; + } + + assertNotNull(e); + CreateVMSnapshotAnswer answer = Mockito.mock(CreateVMSnapshotAnswer.class); + Mockito.when(answer.getResult()).thenReturn(true); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(answer); + Mockito.when(vmSnapshotDao.findById(Mockito.anyLong())).thenReturn(vmSnapshot); + VMSnapshot snapshot = null; + snapshot = vmSnapshotStrategy.takeVMSnapshot(vmSnapshot); + assertNotNull(snapshot); + } + + @Test + public void testRevertSnapshot() throws AgentUnavailableException, OperationTimedoutException { + Long hostId = 1L; + Long vmId = 1L; + Long guestOsId = 1L; + List volumeObjectTOs = new ArrayList(); + VMSnapshotVO vmSnapshot = Mockito.mock(VMSnapshotVO.class); + UserVmVO userVmVO = Mockito.mock(UserVmVO.class); + Mockito.when(userVmVO.getGuestOSId()).thenReturn(guestOsId); + Mockito.when(vmSnapshot.getVmId()).thenReturn(vmId); + Mockito.when(vmSnapshotHelper.pickRunningHost(Mockito.anyLong())).thenReturn(hostId); + Mockito.when(vmSnapshotHelper.getVolumeTOList(Mockito.anyLong())).thenReturn(volumeObjectTOs); + Mockito.when(userVmDao.findById(Mockito.anyLong())).thenReturn(userVmVO); + GuestOSVO guestOSVO = Mockito.mock(GuestOSVO.class); + Mockito.when(guestOSDao.findById(Mockito.anyLong())).thenReturn(guestOSVO); + VMSnapshotTO vmSnapshotTO = Mockito.mock(VMSnapshotTO.class); + Mockito.when(vmSnapshotHelper.getSnapshotWithParents(Mockito.any(VMSnapshotVO.class))).thenReturn(vmSnapshotTO); + Mockito.when(vmSnapshotDao.findById(Mockito.anyLong())).thenReturn(vmSnapshot); + Mockito.when(vmSnapshot.getId()).thenReturn(1L); + Mockito.when(vmSnapshot.getCreated()).thenReturn(new Date()); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(null); + Exception e = null; + try { + vmSnapshotStrategy.revertVMSnapshot(vmSnapshot); + } catch (CloudRuntimeException e1) { + e = e1; + } + + assertNotNull(e); + + RevertToVMSnapshotAnswer answer = Mockito.mock(RevertToVMSnapshotAnswer.class); + Mockito.when(answer.getResult()).thenReturn(Boolean.TRUE); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(answer); + boolean result = vmSnapshotStrategy.revertVMSnapshot(vmSnapshot); + assertTrue(result); + } + + @Test + public void testDeleteVMSnapshot() throws AgentUnavailableException, OperationTimedoutException { + Long hostId = 1L; + Long vmId = 1L; + Long guestOsId = 1L; + List volumeObjectTOs = new ArrayList(); + VMSnapshotVO vmSnapshot = Mockito.mock(VMSnapshotVO.class); + UserVmVO userVmVO = Mockito.mock(UserVmVO.class); + Mockito.when(userVmVO.getGuestOSId()).thenReturn(guestOsId); + Mockito.when(vmSnapshot.getVmId()).thenReturn(vmId); + Mockito.when(vmSnapshotHelper.pickRunningHost(Mockito.anyLong())).thenReturn(hostId); + Mockito.when(vmSnapshotHelper.getVolumeTOList(Mockito.anyLong())).thenReturn(volumeObjectTOs); + Mockito.when(userVmDao.findById(Mockito.anyLong())).thenReturn(userVmVO); + GuestOSVO guestOSVO = Mockito.mock(GuestOSVO.class); + Mockito.when(guestOSDao.findById(Mockito.anyLong())).thenReturn(guestOSVO); + VMSnapshotTO vmSnapshotTO = Mockito.mock(VMSnapshotTO.class); + Mockito.when(vmSnapshotHelper.getSnapshotWithParents(Mockito.any(VMSnapshotVO.class))).thenReturn(vmSnapshotTO); + Mockito.when(vmSnapshotDao.findById(Mockito.anyLong())).thenReturn(vmSnapshot); + Mockito.when(vmSnapshot.getId()).thenReturn(1L); + Mockito.when(vmSnapshot.getCreated()).thenReturn(new Date()); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(null); + + Exception e = null; + try { + vmSnapshotStrategy.deleteVMSnapshot(vmSnapshot); + } catch (CloudRuntimeException e1) { + e = e1; + } + + assertNotNull(e); + + DeleteVMSnapshotAnswer answer = Mockito.mock(DeleteVMSnapshotAnswer.class); + Mockito.when(answer.getResult()).thenReturn(true); + Mockito.when(agentMgr.send(Mockito.anyLong(), Mockito.any(Command.class))).thenReturn(answer); + + boolean result = vmSnapshotStrategy.deleteVMSnapshot(vmSnapshot); + assertTrue(result); + } + + + @Configuration + @ComponentScan(basePackageClasses = {NetUtils.class, DefaultVMSnapshotStrategy.class}, includeFilters = {@ComponentScan.Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, useDefaultFilters = false) + public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { + + public static class Library implements TypeFilter { + @Override + public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException { + mdr.getClassMetadata().getClassName(); + ComponentScan cs = TestConfiguration.class.getAnnotation(ComponentScan.class); + return SpringUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs); + } + } + + @Bean + public VMSnapshotHelper vmSnapshotHelper() { + return Mockito.mock(VMSnapshotHelper.class); + } + + @Bean + public GuestOSDao guestOSDao() { + return Mockito.mock(GuestOSDao.class); + } + + @Bean + public UserVmDao userVmDao() { + return Mockito.mock(UserVmDao.class); + } + + @Bean + public VMSnapshotDao vmSnapshotDao() { + return Mockito.mock(VMSnapshotDao.class); + } + + @Bean + public ConfigurationDao configurationDao() { + return Mockito.mock(ConfigurationDao.class); + } + + @Bean + public AgentManager agentManager() { + return Mockito.mock(AgentManager.class); + } + + @Bean + public VolumeDao volumeDao() { + return Mockito.mock(VolumeDao.class); + } + + @Bean + public DiskOfferingDao diskOfferingDao() { + return Mockito.mock(DiskOfferingDao.class); + } + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java b/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java index 83d34a07f4a..53803bf594c 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java +++ b/engine/storage/src/org/apache/cloudstack/storage/LocalHostEndpoint.java @@ -21,24 +21,23 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import com.cloud.configuration.Config; -import com.cloud.utils.component.ComponentContext; +import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.config.dao.ConfigurationDaoImpl; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.resource.LocalNfsSecondaryStorageResource; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; +import com.cloud.configuration.Config; import com.cloud.resource.ServerResource; +import com.cloud.utils.component.ComponentContext; import com.cloud.utils.net.NetUtils; -import javax.inject.Inject; - public class LocalHostEndpoint implements EndPoint { private ScheduledExecutorService executor; protected ServerResource resource; @@ -97,7 +96,7 @@ public class LocalHostEndpoint implements EndPoint { return new Answer(cmd, false, "unsupported command:" + cmd.toString()); } - private class CmdRunner implements Runnable { + private class CmdRunner extends ManagedContextRunnable { final Command cmd; final AsyncCompletionCallback callback; @@ -107,7 +106,7 @@ public class LocalHostEndpoint implements EndPoint { } @Override - public void run() { + protected void runInContext() { Answer answer = sendMessage(cmd); callback.complete(answer); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java b/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java index 369381358a8..3cae2b95f1d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java +++ b/engine/storage/src/org/apache/cloudstack/storage/RemoteHostEndPoint.java @@ -26,6 +26,7 @@ import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -104,7 +105,7 @@ public class RemoteHostEndPoint implements EndPoint { throw new CloudRuntimeException("Failed to send command, due to Agent:" + getId() + ", " + errMsg); } - private class CmdRunner implements Listener, Runnable { + private class CmdRunner extends ManagedContextRunnable implements Listener { final AsyncCompletionCallback callback; Answer answer; @@ -162,7 +163,7 @@ public class RemoteHostEndPoint implements EndPoint { } @Override - public void run() { + protected void runInContext() { callback.complete(answer); } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java index 6b076d39f1a..d48edd6eb80 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ClusterScopeStoragePoolAllocator.java @@ -51,8 +51,13 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - s_logger.debug("ClusterScopeStoragePoolAllocator looking for storage pool"); + + if (dskCh.useLocalStorage()) { + // cluster wide allocator should bail out in case of local disk + return null; + } + List suitablePools = new ArrayList(); long dcId = plan.getDataCenterId(); @@ -84,9 +89,7 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat if (pools.size() == 0) { if (s_logger.isDebugEnabled()) { - String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() - : ServiceOffering.StorageType.shared.toString(); - s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning"); + s_logger.debug("No storage pools available for " + ServiceOffering.StorageType.shared.toString() + " volume allocation, returning"); } return suitablePools; } @@ -95,16 +98,16 @@ public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocat if (suitablePools.size() == returnUpTo) { break; } - StoragePool pol = (StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId()); - if (filter(avoid, pol, dskCh, plan)) { - suitablePools.add(pol); + StoragePool storagePool = (StoragePool) dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, storagePool, dskCh, plan)) { + suitablePools.add(storagePool); } else { avoid.addPool(pool.getId()); } } if (s_logger.isDebugEnabled()) { - s_logger.debug("FirstFitStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); + s_logger.debug("ClusterScopeStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools"); } return suitablePools; diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java index 3ea2c462087..1f61e8b948d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/LocalStoragePoolAllocator.java @@ -68,25 +68,24 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { @Override protected List select(DiskProfile dskCh, VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - - List suitablePools = new ArrayList(); - s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm"); if (!dskCh.useLocalStorage()) { - return suitablePools; + return null; } + List suitablePools = new ArrayList(); + // data disk and host identified from deploying vm (attach volume case) if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) { List hostPools = _poolHostDao.listByHostId(plan.getHostId()); for (StoragePoolHostVO hostPool : hostPools) { StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId()); if (pool != null && pool.isLocal()) { - StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId()); - if (filter(avoid, pol, dskCh, plan)) { + StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, storagePool, dskCh, plan)) { s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list"); - suitablePools.add(pol); + suitablePools.add(storagePool); } else { avoid.addPool(pool.getId()); } @@ -107,9 +106,9 @@ public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator { if (suitablePools.size() == returnUpTo) { break; } - StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId()); - if (filter(avoid, pol, dskCh, plan)) { - suitablePools.add(pol); + StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(pool.getId()); + if (filter(avoid, storagePool, dskCh, plan)) { + suitablePools.add(storagePool); } else { avoid.addPool(pool.getId()); } diff --git a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java index 38724fa8214..b58bcb53cb6 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java +++ b/engine/storage/src/org/apache/cloudstack/storage/allocator/ZoneWideStoragePoolAllocator.java @@ -57,21 +57,25 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool); } - @Override - protected List select(DiskProfile dskCh, - VirtualMachineProfile vmProfile, - DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { - s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool"); - List suitablePools = new ArrayList(); + + @Override + protected List select(DiskProfile dskCh, + VirtualMachineProfile vmProfile, + DeploymentPlan plan, ExcludeList avoid, int returnUpTo) { + s_logger.debug("ZoneWideStoragePoolAllocator to find storage pool"); + + if (dskCh.useLocalStorage()) { + return null; + } + + List suitablePools = new ArrayList(); List storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags()); - if (storagePools == null) { storagePools = new ArrayList(); } List anyHypervisorStoragePools = new ArrayList(); - for (StoragePoolVO storagePool : storagePools) { if (HypervisorType.Any.equals(storagePool.getHypervisor())) { anyHypervisorStoragePools.add(storagePool); @@ -79,9 +83,7 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { } List storagePoolsByHypervisor = _storagePoolDao.findZoneWideStoragePoolsByHypervisor(plan.getDataCenterId(), dskCh.getHypervisorType()); - storagePools.retainAll(storagePoolsByHypervisor); - storagePools.addAll(anyHypervisorStoragePools); // add remaining pools in zone, that did not match tags, to avoid set @@ -95,15 +97,16 @@ public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator { if (suitablePools.size() == returnUpTo) { break; } - StoragePool pol = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(storage.getId()); - if (filter(avoid, pol, dskCh, plan)) { - suitablePools.add(pol); + StoragePool storagePool = (StoragePool) this.dataStoreMgr.getPrimaryDataStore(storage.getId()); + if (filter(avoid, storagePool, dskCh, plan)) { + suitablePools.add(storagePool); } else { - avoid.addPool(pol.getId()); + avoid.addPool(storagePool.getId()); } } return suitablePools; } + @Override protected List reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List pools, Account account) { diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index a9263a98879..88061aa5e12 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -107,6 +107,11 @@ public class DataStoreManagerImpl implements DataStoreManager { return imageDataStoreMgr.listImageStores(); } + @Override + public List listImageCacheStores() { + return imageDataStoreMgr.listImageCacheStores(); + } + public void setPrimaryStoreMgr(PrimaryDataStoreProviderManager primaryStoreMgr) { this.primaryStoreMgr = primaryStoreMgr; } diff --git a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java index 92b4e7a5479..e186bd82017 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/datastore/provider/DataStoreProviderManagerImpl.java @@ -20,6 +20,8 @@ package org.apache.cloudstack.storage.datastore.provider; import com.cloud.exception.InvalidParameterValueException; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.component.Registry; + import org.apache.cloudstack.api.response.StorageProviderResponse; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider.DataStoreProviderType; @@ -33,18 +35,22 @@ import org.springframework.stereotype.Component; import javax.inject.Inject; import javax.naming.ConfigurationException; + import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; @Component -public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager { +public class DataStoreProviderManagerImpl extends ManagerBase implements DataStoreProviderManager, Registry { private static final Logger s_logger = Logger.getLogger(DataStoreProviderManagerImpl.class); - @Inject + List providers; - protected Map providerMap = new HashMap(); + protected Map providerMap = new ConcurrentHashMap(); @Inject PrimaryDataStoreProviderManager primaryDataStoreProviderMgr; @Inject @@ -52,6 +58,9 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Override public DataStoreProvider getDataStoreProvider(String name) { + if (name == null) + return null; + return providerMap.get(name); } @@ -96,43 +105,54 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto @Override public boolean configure(String name, Map params) throws ConfigurationException { - Map copyParams = new HashMap(params); - for (DataStoreProvider provider : providers) { - String providerName = provider.getName(); - if (providerMap.get(providerName) != null) { - s_logger.debug("Failed to register data store provider, provider name: " + providerName - + " is not unique"); - return false; - } - - s_logger.debug("registering data store provider:" + provider.getName()); - - providerMap.put(providerName, provider); - try { - boolean registrationResult = provider.configure(copyParams); - if (!registrationResult) { - providerMap.remove(providerName); - s_logger.debug("Failed to register data store provider: " + providerName); - return false; - } - - Set types = provider.getTypes(); - if (types.contains(DataStoreProviderType.PRIMARY)) { - primaryDataStoreProviderMgr.registerDriver(provider.getName(), - (PrimaryDataStoreDriver) provider.getDataStoreDriver()); - primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener()); - } else if (types.contains(DataStoreProviderType.IMAGE)) { - imageStoreProviderMgr.registerDriver(provider.getName(), - (ImageStoreDriver) provider.getDataStoreDriver()); - } - } catch (Exception e) { - s_logger.debug("configure provider failed", e); - providerMap.remove(providerName); - return false; + if ( providers != null ) { + for (DataStoreProvider provider : providers) { + registerProvider(provider); } } + providers = new CopyOnWriteArrayList(providers); + + return true; + } + + protected boolean registerProvider(DataStoreProvider provider) { + Map copyParams = new HashMap(); + + String providerName = provider.getName(); + if (providerMap.get(providerName) != null) { + s_logger.debug("Did not register data store provider, provider name: " + providerName + + " is not unique"); + return false; + } + + s_logger.debug("registering data store provider:" + provider.getName()); + + providerMap.put(providerName, provider); + try { + boolean registrationResult = provider.configure(copyParams); + if (!registrationResult) { + providerMap.remove(providerName); + s_logger.debug("Failed to register data store provider: " + providerName); + return false; + } + + Set types = provider.getTypes(); + if (types.contains(DataStoreProviderType.PRIMARY)) { + primaryDataStoreProviderMgr.registerDriver(provider.getName(), + (PrimaryDataStoreDriver) provider.getDataStoreDriver()); + primaryDataStoreProviderMgr.registerHostListener(provider.getName(), provider.getHostListener()); + } else if (types.contains(DataStoreProviderType.IMAGE)) { + imageStoreProviderMgr.registerDriver(provider.getName(), + (ImageStoreDriver) provider.getDataStoreDriver()); + } + } catch (Exception e) { + s_logger.debug("configure provider failed", e); + providerMap.remove(providerName); + return false; + } + return true; } @@ -167,6 +187,27 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto } } + @Override + public boolean register(DataStoreProvider type) { + if ( registerProvider(type) ) { + providers.add(type); + return true; + } + + return false; + } + + @Override + public void unregister(DataStoreProvider type) { + /* Sorry, no unregister supported... */ + } + + @Override + public List getRegistered() { + return Collections.unmodifiableList(providers); + } + + @Inject public void setProviders(List providers) { this.providers = providers; } @@ -178,4 +219,9 @@ public class DataStoreProviderManagerImpl extends ManagerBase implements DataSto public void setImageStoreProviderMgr(ImageStoreProviderManager imageDataStoreProviderMgr) { this.imageStoreProviderMgr = imageDataStoreProviderMgr; } + + public List getProviders() { + return providers; + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index 98c6a3fc001..196b08b2f42 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -32,8 +32,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; -import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.cloudstack.storage.LocalHostEndpoint; +import org.apache.cloudstack.storage.RemoteHostEndPoint; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -44,10 +44,10 @@ import com.cloud.host.dao.HostDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -55,7 +55,7 @@ public class DefaultEndPointSelector implements EndPointSelector { private static final Logger s_logger = Logger.getLogger(DefaultEndPointSelector.class); @Inject HostDao hostDao; - private String findOneHostOnPrimaryStorage = "select h.id from host h, storage_pool_host_ref s where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and" + + private final String findOneHostOnPrimaryStorage = "select h.id from host h, storage_pool_host_ref s where h.status = 'Up' and h.type = 'Routing' and h.resource_state = 'Enabled' and" + " h.id = s.host_id and s.pool_id = ? "; protected boolean moveBetweenPrimaryImage(DataStore srcStore, DataStore destStore) { @@ -111,7 +111,7 @@ public class DefaultEndPointSelector implements EndPointSelector { PreparedStatement pstmt = null; ResultSet rs = null; HostVO host = null; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { pstmt = txn.prepareStatement(sql); @@ -220,12 +220,12 @@ public class DefaultEndPointSelector implements EndPointSelector { } private List listUpAndConnectingSecondaryStorageVmHost(Long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); + QueryBuilder sc = QueryBuilder.create(HostVO.class); if (dcId != null) { - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); } - sc.addAnd(sc.getEntity().getStatus(), Op.IN, com.cloud.host.Status.Up, com.cloud.host.Status.Connecting); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.SecondaryStorageVM); + sc.and(sc.entity().getStatus(), Op.IN, Status.Up, Status.Connecting); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.SecondaryStorageVM); return sc.list(); } @@ -250,6 +250,11 @@ public class DefaultEndPointSelector implements EndPointSelector { } } + @Override + public EndPoint select(Scope scope, Long storeId) { + return findEndPointInScope(scope, findOneHostOnPrimaryStorage, storeId); + } + @Override public List selectAll(DataStore store) { List endPoints = new ArrayList(); @@ -258,10 +263,10 @@ public class DefaultEndPointSelector implements EndPointSelector { endPoints.add(RemoteHostEndPoint.getHypervisorHostEndPoint(host.getId(), host.getPrivateIpAddress(), host.getPublicIpAddress())); } else if (store.getScope().getScopeType() == ScopeType.CLUSTER) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, store.getScope().getScopeId()); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - List hosts = sc.find(); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getClusterId(), Op.EQ, store.getScope().getScopeId()); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); + List hosts = sc.list(); for (HostVO host : hosts) { endPoints.add(RemoteHostEndPoint.getHypervisorHostEndPoint(host.getId(), host.getPrivateIpAddress(), host.getPublicIpAddress())); diff --git a/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java new file mode 100644 index 00000000000..40ced1d832c --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelper.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.helper; + +import com.cloud.agent.api.to.DataTO; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; + +public interface HypervisorHelper { + DataTO introduceObject(DataTO object, Scope scope, Long storeId); + boolean forgetObject(DataTO object, Scope scope, Long storeId); + SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope); + boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope); +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java new file mode 100644 index 00000000000..81e6f7c69c5 --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/helper/HypervisorHelperImpl.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.helper; + +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataTO; +import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectAnswer; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; + +import javax.inject.Inject; + +public class HypervisorHelperImpl implements HypervisorHelper { + private static final Logger s_logger = Logger.getLogger(HypervisorHelperImpl.class); + @Inject + EndPointSelector selector; + + @Override + public DataTO introduceObject(DataTO object, Scope scope, Long storeId) { + EndPoint ep = selector.select(scope, storeId); + IntroduceObjectCmd cmd = new IntroduceObjectCmd(object); + Answer answer = ep.sendMessage(cmd); + if (answer == null || !answer.getResult()) { + String errMsg = answer == null ? null : answer.getDetails(); + throw new CloudRuntimeException("Failed to introduce object, due to " + errMsg); + } + IntroduceObjectAnswer introduceObjectAnswer = (IntroduceObjectAnswer)answer; + return introduceObjectAnswer.getDataTO(); + } + + @Override + public boolean forgetObject(DataTO object, Scope scope, Long storeId) { + EndPoint ep = selector.select(scope, storeId); + ForgetObjectCmd cmd = new ForgetObjectCmd(object); + Answer answer = ep.sendMessage(cmd); + if (answer == null || !answer.getResult()) { + String errMsg = answer == null ? null : answer.getDetails(); + if (errMsg != null) { + s_logger.debug("Failed to forget object: " + errMsg); + } + return false; + } + return true; + } + + @Override + public SnapshotObjectTO takeSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) { + return null; //To change body of implemented methods use File | Settings | File Templates. + } + + @Override + public boolean revertSnapshot(SnapshotObjectTO snapshotObjectTO, Scope scope) { + return false; //To change body of implemented methods use File | Settings | File Templates. + } +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/helper/StorageStrategyFactoryImpl.java b/engine/storage/src/org/apache/cloudstack/storage/helper/StorageStrategyFactoryImpl.java new file mode 100644 index 00000000000..a1d128bec0a --- /dev/null +++ b/engine/storage/src/org/apache/cloudstack/storage/helper/StorageStrategyFactoryImpl.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.helper; + +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; + +import com.cloud.host.Host; +import com.cloud.storage.Snapshot; +import com.cloud.vm.snapshot.VMSnapshot; + +public class StorageStrategyFactoryImpl implements StorageStrategyFactory { + + List snapshotStrategies; + List dataMotionStrategies; + List vmSnapshotStrategies; + + @Override + public DataMotionStrategy getDataMotionStrategy(final DataObject srcData, final DataObject destData) { + return bestMatch(dataMotionStrategies, new CanHandle() { + @Override + public StrategyPriority canHandle(DataMotionStrategy strategy) { + return strategy.canHandle(srcData, destData); + } + }); + } + + @Override + public DataMotionStrategy getDataMotionStrategy(final Map volumeMap, final Host srcHost, final Host destHost) { + return bestMatch(dataMotionStrategies, new CanHandle() { + @Override + public StrategyPriority canHandle(DataMotionStrategy strategy) { + return strategy.canHandle(volumeMap, srcHost, destHost); + } + }); + } + + @Override + public SnapshotStrategy getSnapshotStrategy(final Snapshot snapshot, final SnapshotOperation op) { + return bestMatch(snapshotStrategies, new CanHandle() { + @Override + public StrategyPriority canHandle(SnapshotStrategy strategy) { + return strategy.canHandle(snapshot, op); + } + }); + } + + @Override + public VMSnapshotStrategy getVmSnapshotStrategy(final VMSnapshot vmSnapshot) { + return bestMatch(vmSnapshotStrategies, new CanHandle() { + @Override + public StrategyPriority canHandle(VMSnapshotStrategy strategy) { + return strategy.canHandle(vmSnapshot); + } + }); + } + + private static T bestMatch(Collection collection, final CanHandle canHandle) { + if (collection.size() == 0) + return null; + + StrategyPriority highestPriority = StrategyPriority.CANT_HANDLE; + + T strategyToUse = null; + for (T strategy : collection) { + StrategyPriority priority = canHandle.canHandle(strategy); + if (priority.ordinal() > highestPriority.ordinal()) { + highestPriority = priority; + strategyToUse = strategy; + } + } + + return strategyToUse; + } + + private static interface CanHandle { + StrategyPriority canHandle(T strategy); + } + + public List getSnapshotStrategies() { + return snapshotStrategies; + } + + @Inject + public void setSnapshotStrategies(List snapshotStrategies) { + this.snapshotStrategies = snapshotStrategies; + } + + public List getDataMotionStrategies() { + return dataMotionStrategies; + } + + @Inject + public void setDataMotionStrategies(List dataMotionStrategies) { + this.dataMotionStrategies = dataMotionStrategies; + } + + @Inject + public void setVmSnapshotStrategies(List vmSnapshotStrategies) { + this.vmSnapshotStrategies = vmSnapshotStrategies; + } + + public List getVmSnapshotStrategies() { + return vmSnapshotStrategies; + } + +} diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java index a64114691c8..e2c48ea51e8 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreHelper.java @@ -24,11 +24,14 @@ import java.util.UUID; import javax.inject.Inject; +import org.springframework.stereotype.Component; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailVO; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; -import org.springframework.stereotype.Component; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -40,6 +43,8 @@ public class ImageStoreHelper { ImageStoreDao imageStoreDao; @Inject ImageStoreDetailsDao imageStoreDetailsDao; + @Inject + SnapshotDataStoreDao snapshotStoreDao; public ImageStoreVO createImageStore(Map params) { ImageStoreVO store = imageStoreDao.findByName((String) params.get("name")); @@ -115,4 +120,18 @@ public class ImageStoreHelper { imageStoreDao.remove(id); return true; } + + /** + * Convert current NFS secondary storage to Staging store to be ready to migrate to S3 object store. + * @param store NFS image store. + * @return true if successful. + */ + public boolean convertToStagingStore(DataStore store) { + ImageStoreVO nfsStore = imageStoreDao.findById(store.getId()); + nfsStore.setRole(DataStoreRole.ImageCache); + imageStoreDao.update(store.getId(), nfsStore); + // clear snapshot entry on primary store to make next snapshot become full snapshot + snapshotStoreDao.deleteSnapshotRecordsOnPrimary(); + return true; + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java index be66cc51401..8afb3d9f31a 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java @@ -32,6 +32,8 @@ public interface ImageStoreProviderManager { List listImageStores(); + List listImageCacheStores(); + List listImageStoresByScope(ZoneScope scope); List listImageStoreByProvider(String provider); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDaoImpl.java index b9ef9c307af..13a7f470b0f 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDaoImpl.java @@ -23,10 +23,11 @@ import java.util.Map; import javax.naming.ConfigurationException; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; -import org.springframework.stereotype.Component; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -118,4 +119,11 @@ public class ImageStoreDaoImpl extends GenericDaoBase implem return listBy(sc); } + @Override + public List listImageCacheStores() { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("role", SearchCriteria.Op.EQ, DataStoreRole.ImageCache); + return listBy(sc); + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDetailsDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDetailsDaoImpl.java index ad52042bc7c..3c766cf80c7 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDetailsDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/ImageStoreDetailsDaoImpl.java @@ -29,7 +29,7 @@ import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = ImageStoreDetailsDao.class) @@ -46,7 +46,7 @@ public class ImageStoreDetailsDaoImpl extends GenericDaoBase details) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); SearchCriteria sc = storeSearch.create(); sc.setParameters("store", storeId); diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java index d8e6abcf110..ee00dd5d0e9 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java @@ -25,21 +25,23 @@ import java.util.Map; import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.storage.DataStoreRole; +import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; @Component @@ -51,7 +53,8 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase cacheSearch; private SearchBuilder snapshotSearch; private SearchBuilder storeSnapshotSearch; - private String parentSearch = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where store_id = ? " + + private SearchBuilder snapshotIdSearch; + private final String parentSearch = "select store_id, store_role, snapshot_id from cloud.snapshot_store_ref where store_id = ? " + " and store_role = ? and volume_id = ? and state = 'Ready'" + " order by created DESC " + " limit 1"; @@ -101,6 +104,10 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase sc = storeSearch.create(); sc.setParameters("store_id", id); sc.setParameters("store_role", role); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + txn.start(); + remove(sc); + txn.commit(); + } + + @Override + public void deleteSnapshotRecordsOnPrimary() { + SearchCriteria sc = storeSearch.create(); + sc.setParameters("store_role", DataStoreRole.Primary); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); @@ -173,8 +190,9 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase findBySnapshotId(long snapshotId) { + SearchCriteria sc = snapshotIdSearch.create(); + sc.setParameters("snapshot_id", snapshotId); + return listBy(sc); + } + @Override public List listDestroyed(long id) { SearchCriteria sc = destroyedSearch.create(); @@ -223,4 +246,64 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase sc = storeSnapshotSearch.create(); + sc.setParameters("store_role", DataStoreRole.ImageCache); + sc.setParameters("destroyed", false); + List snapshots = listBy(sc); + // create an entry for each record, but with empty install path since the content is not yet on region-wide store yet + if (snapshots != null) { + s_logger.info("Duplicate " + snapshots.size() + " snapshot cache store records to region store"); + for (SnapshotDataStoreVO snap : snapshots) { + SnapshotDataStoreVO snapStore = findByStoreSnapshot(DataStoreRole.Image, storeId, snap.getSnapshotId()); + if (snapStore != null) { + s_logger.info("There is already entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); + continue; + } + s_logger.info("Persisting an entry for snapshot " + snap.getSnapshotId() + " on region store " + storeId); + SnapshotDataStoreVO ss = new SnapshotDataStoreVO(); + ss.setSnapshotId(snap.getSnapshotId()); + ss.setDataStoreId(storeId); + ss.setRole(DataStoreRole.Image); + ss.setVolumeId(snap.getVolumeId()); + ss.setParentSnapshotId(snap.getParentSnapshotId()); + ss.setState(snap.getState()); + ss.setSize(snap.getSize()); + ss.setPhysicalSize(snap.getPhysicalSize()); + ss.setRefCnt(snap.getRefCnt()); + persist(ss); + // increase ref_cnt so that this will not be recycled before the content is pushed to region-wide store + snap.incrRefCnt(); + update(snap.getId(), snap); + } + } + + } + + @Override + public List listOnCache(long snapshotId) { + SearchCriteria sc = storeSnapshotSearch.create(); + sc.setParameters("snapshot_id", snapshotId); + sc.setParameters("store_role", DataStoreRole.ImageCache); + return search(sc, null); + } + + @Override + public void updateStoreRoleToCache(long storeId) { + SearchCriteria sc = storeSearch.create(); + sc.setParameters("store_id", storeId); + sc.setParameters("destroyed", false); + List snaps = listBy(sc); + if (snaps != null) { + s_logger.info("Update to cache store role for " + snaps.size() + " entries in snapshot_store_ref"); + for (SnapshotDataStoreVO snap : snaps) { + snap.setRole(DataStoreRole.ImageCache); + update(snap.getId(), snap); + } + } + } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index 5f47de86b95..ee7c4fcfd56 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -31,20 +31,26 @@ import org.springframework.stereotype.Component; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import com.cloud.storage.DataStoreRole; +import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.VMTemplateDao; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; +import com.cloud.utils.exception.CloudRuntimeException; @Component public class TemplateDataStoreDaoImpl extends GenericDaoBase implements TemplateDataStoreDao { @@ -61,6 +67,11 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase params) throws ConfigurationException { super.configure(name, params); @@ -85,6 +96,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase sc = storeSearch.create(); sc.setParameters("store_id", id); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); @@ -212,7 +224,7 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase sc = templateSearch.create(); sc.setParameters("template_id", templateId); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); expunge(sc); txn.commit(); @@ -313,6 +325,24 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase sc = templateRoleSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("store_role", DataStoreRole.ImageCache); + sc.setParameters("destroyed", false); + sc.setParameters("state", ObjectInDataStoreStateMachine.State.Ready); + return findOneIncludingRemovedBy(sc); + } + + @Override + public List listOnCache(long templateId) { + SearchCriteria sc = templateRoleSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("store_role", DataStoreRole.ImageCache); + return search(sc, null); + } + @Override public List listByTemplate(long templateId) { SearchCriteria sc = templateSearch.create(); @@ -341,4 +371,78 @@ public class TemplateDataStoreDaoImpl extends GenericDaoBase sc = templateRoleSearch.create(); + sc.setParameters("store_role", DataStoreRole.ImageCache); + sc.setParameters("destroyed", false); + List tmpls = listBy(sc); + // create an entry for each template record, but with empty install path since the content is not yet on region-wide store yet + if (tmpls != null) { + s_logger.info("Duplicate " + tmpls.size() + " template cache store records to region store"); + for (TemplateDataStoreVO tmpl : tmpls) { + long templateId = tmpl.getTemplateId(); + VMTemplateVO template = _tmpltDao.findById(templateId); + if (template == null) { + throw new CloudRuntimeException("No template is found for template id: " + templateId); + } + if (template.getTemplateType() == TemplateType.SYSTEM) { + s_logger.info("No need to duplicate system template since it will be automatically downloaded while adding region store"); + continue; + } + TemplateDataStoreVO tmpStore = findByStoreTemplate(storeId, tmpl.getTemplateId()); + if (tmpStore != null) { + s_logger.info("There is already entry for template " + tmpl.getTemplateId() + " on region store " + storeId); + continue; + } + s_logger.info("Persisting an entry for template " + tmpl.getTemplateId() + " on region store " + storeId); + TemplateDataStoreVO ts = new TemplateDataStoreVO(); + ts.setTemplateId(tmpl.getTemplateId()); + ts.setDataStoreId(storeId); + ts.setDataStoreRole(DataStoreRole.Image); + ts.setState(tmpl.getState()); + ts.setDownloadPercent(tmpl.getDownloadPercent()); + ts.setDownloadState(tmpl.getDownloadState()); + ts.setSize(tmpl.getSize()); + ts.setPhysicalSize(tmpl.getPhysicalSize()); + ts.setErrorString(tmpl.getErrorString()); + ts.setDownloadUrl(tmpl.getDownloadUrl()); + ts.setRefCnt(tmpl.getRefCnt()); + persist(ts); + // increase ref_cnt of cache store entry so that this will not be recycled before the content is pushed to region-wide store + tmpl.incrRefCnt(); + this.update(tmpl.getId(), tmpl); + + // mark the template as cross-zones + template.setCrossZones(true); + _tmpltDao.update(templateId, template); + // add template_zone_ref association for these cross-zone templates + _tmplSrv.associateTemplateToZone(templateId, null); + } + + } + } + + + @Override + public void updateStoreRoleToCachce(long storeId) { + SearchCriteria sc = storeSearch.create(); + sc.setParameters("store_id", storeId); + sc.setParameters("destroyed", false); + List tmpls = listBy(sc); + if (tmpls != null) { + s_logger.info("Update to cache store role for " + tmpls.size() + " entries in template_store_ref"); + for (TemplateDataStoreVO tmpl : tmpls) { + tmpl.setDataStoreRole(DataStoreRole.ImageCache); + update(tmpl.getId(), tmpl); + } + } + + } + + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java index 04f8b70e44b..a3ff5666a7d 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/image/db/VolumeDataStoreDaoImpl.java @@ -16,25 +16,30 @@ // under the License. package org.apache.cloudstack.storage.image.db; +import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; +import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; @Component @@ -45,6 +50,9 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase storeSearch; private SearchBuilder cacheSearch; private SearchBuilder storeVolumeSearch; + + @Inject + DataStoreManager storeMgr; @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -143,7 +151,7 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase sc = storeSearch.create(); sc.setParameters("store_id", id); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); remove(sc); txn.commit(); @@ -186,4 +194,45 @@ public class VolumeDataStoreDaoImpl extends GenericDaoBase cacheStores = storeMgr.listImageCacheStores(); + if (cacheStores == null || cacheStores.size() == 0) { + return; + } + List vols = new ArrayList(); + for (DataStore store : cacheStores) { + // check if the volume is stored there + vols.addAll(listByStoreId(store.getId())); + } + // create an entry for each record, but with empty install path since the content is not yet on region-wide store yet + if (vols != null) { + s_logger.info("Duplicate " + vols.size() + " volume cache store records to region store"); + for (VolumeDataStoreVO vol : vols) { + VolumeDataStoreVO volStore = findByStoreVolume(storeId, vol.getVolumeId()); + if (volStore != null) { + s_logger.info("There is already entry for volume " + vol.getVolumeId() + " on region store " + storeId); + continue; + } + s_logger.info("Persisting an entry for volume " + vol.getVolumeId() + " on region store " + storeId); + VolumeDataStoreVO vs = new VolumeDataStoreVO(); + vs.setVolumeId(vol.getVolumeId()); + vs.setDataStoreId(storeId); + vs.setState(vol.getState()); + vs.setDownloadPercent(vol.getDownloadPercent()); + vs.setDownloadState(vol.getDownloadState()); + vs.setSize(vol.getSize()); + vs.setPhysicalSize(vol.getPhysicalSize()); + vs.setErrorString(vol.getErrorString()); + vs.setRefCnt(vol.getRefCnt()); + persist(vs); + // increase ref_cnt so that this will not be recycled before the content is pushed to region-wide store + vol.incrRefCnt(); + this.update(vol.getId(), vol); + } + } + + } } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java index 4ea4ceec555..90ad17aa6d8 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/datastore/PrimaryDataStoreHelper.java @@ -43,7 +43,7 @@ import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.dao.StoragePoolHostDao; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -181,7 +181,7 @@ public class PrimaryDataStoreHelper { public boolean deletePrimaryDataStore(DataStore store) { List hostPoolRecords = this.storagePoolHostDao.listByPoolId(store.getId()); StoragePoolVO poolVO = this.dataStoreDao.findById(store.getId()); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (StoragePoolHostVO host : hostPoolRecords) { storagePoolHostDao.deleteStoragePoolHostDetails(host.getHostId(), host.getPoolId()); diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/PrimaryDataStoreDetailsDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/PrimaryDataStoreDetailsDaoImpl.java index 9d174348c73..ee45053d0a4 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/PrimaryDataStoreDetailsDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/PrimaryDataStoreDetailsDaoImpl.java @@ -16,59 +16,19 @@ // under the License. package org.apache.cloudstack.storage.volume.db; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDetailsDao; import org.springframework.stereotype.Component; -import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; @Component -public class PrimaryDataStoreDetailsDaoImpl extends GenericDaoBase implements +public class PrimaryDataStoreDetailsDaoImpl extends ResourceDetailsDaoBase implements PrimaryDataStoreDetailsDao { - protected final SearchBuilder PoolSearch = null; - - protected PrimaryDataStoreDetailsDaoImpl() { - /* - * super(); PoolSearch = createSearchBuilder(); PoolSearch.and("pool", - * PoolSearch.entity().getPoolId(), SearchCriteria.Op.EQ); - * PoolSearch.done(); - */ - } - @Override - public void update(long poolId, Map details) { - Transaction txn = Transaction.currentTxn(); - SearchCriteria sc = PoolSearch.create(); - sc.setParameters("pool", poolId); - - txn.start(); - expunge(sc); - for (Map.Entry entry : details.entrySet()) { - PrimaryDataStoreDetailVO detail = new PrimaryDataStoreDetailVO(poolId, entry.getKey(), entry.getValue()); - persist(detail); - } - txn.commit(); - } - - @Override - public Map getDetails(long poolId) { - SearchCriteria sc = PoolSearch.create(); - sc.setParameters("pool", poolId); - - List details = listBy(sc); - Map detailsMap = new HashMap(); - for (PrimaryDataStoreDetailVO detail : details) { - detailsMap.put(detail.getName(), detail.getValue()); - } - - return detailsMap; + public void addDetail(long resourceId, String key, String value) { + super.addDetail(new PrimaryDataStoreDetailVO(resourceId, key, value)); } + } diff --git a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java index 7c986403636..50ab9e6c42b 100644 --- a/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java +++ b/engine/storage/src/org/apache/cloudstack/storage/volume/db/TemplatePrimaryDataStoreDaoImpl.java @@ -20,18 +20,18 @@ package org.apache.cloudstack.storage.volume.db; import java.util.Date; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.State; + import com.cloud.utils.db.GenericDaoBase; -import com.cloud.utils.db.SearchCriteria.Op; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; +import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.UpdateBuilder; @Component @@ -50,20 +50,18 @@ public class TemplatePrimaryDataStoreDaoImpl extends GenericDaoBase sc = SearchCriteria2 - .create(TemplatePrimaryDataStoreVO.class); - sc.addAnd(sc.getEntity().getTemplateId(), Op.EQ, templateId); - sc.addAnd(sc.getEntity().getPoolId(), Op.EQ, poolId); + QueryBuilder sc = QueryBuilder.create(TemplatePrimaryDataStoreVO.class); + sc.and(sc.entity().getTemplateId(), Op.EQ, templateId); + sc.and(sc.entity().getPoolId(), Op.EQ, poolId); return sc.find(); } @Override public TemplatePrimaryDataStoreVO findByTemplateIdAndPoolIdAndReady(long templateId, long poolId) { - SearchCriteriaService sc = SearchCriteria2 - .create(TemplatePrimaryDataStoreVO.class); - sc.addAnd(sc.getEntity().getTemplateId(), Op.EQ, templateId); - sc.addAnd(sc.getEntity().getPoolId(), Op.EQ, poolId); - sc.addAnd(sc.getEntity().getState(), Op.EQ, ObjectInDataStoreStateMachine.State.Ready); + QueryBuilder sc = QueryBuilder.create(TemplatePrimaryDataStoreVO.class); + sc.and(sc.entity().getTemplateId(), Op.EQ, templateId); + sc.and(sc.entity().getPoolId(), Op.EQ, poolId); + sc.and(sc.entity().getState(), Op.EQ, ObjectInDataStoreStateMachine.State.Ready); return sc.find(); } diff --git a/engine/storage/test/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriorityTest.java b/engine/storage/test/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriorityTest.java new file mode 100644 index 00000000000..1c3ceb6653b --- /dev/null +++ b/engine/storage/test/org/apache/cloudstack/engine/subsystem/api/storage/StrategyPriorityTest.java @@ -0,0 +1,161 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.storage.helper.StorageStrategyFactoryImpl; +import org.junit.Test; + +import com.cloud.host.Host; +import com.cloud.storage.Snapshot; + +import static org.junit.Assert.assertEquals; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.mock; + +public class StrategyPriorityTest { + + @Test + public void testSortSnapshotStrategies() { + SnapshotStrategy cantHandleStrategy = mock(SnapshotStrategy.class); + SnapshotStrategy defaultStrategy = mock(SnapshotStrategy.class); + SnapshotStrategy hyperStrategy = mock(SnapshotStrategy.class); + SnapshotStrategy pluginStrategy = mock(SnapshotStrategy.class); + SnapshotStrategy highestStrategy = mock(SnapshotStrategy.class); + + doReturn(StrategyPriority.CANT_HANDLE).when(cantHandleStrategy).canHandle(any(Snapshot.class), any(SnapshotOperation.class)); + doReturn(StrategyPriority.DEFAULT).when(defaultStrategy).canHandle(any(Snapshot.class), any(SnapshotOperation.class)); + doReturn(StrategyPriority.HYPERVISOR).when(hyperStrategy).canHandle(any(Snapshot.class), any(SnapshotOperation.class)); + doReturn(StrategyPriority.PLUGIN).when(pluginStrategy).canHandle(any(Snapshot.class), any(SnapshotOperation.class)); + doReturn(StrategyPriority.HIGHEST).when(highestStrategy).canHandle(any(Snapshot.class), any(SnapshotOperation.class)); + + List strategies = new ArrayList(5); + SnapshotStrategy strategy = null; + + StorageStrategyFactoryImpl factory = new StorageStrategyFactoryImpl(); + factory.setSnapshotStrategies(strategies); + + strategies.add(cantHandleStrategy); + strategy = factory.getSnapshotStrategy(mock(Snapshot.class), SnapshotOperation.TAKE); + assertEquals("A strategy was found when it shouldn't have been.", null, strategy); + + strategies.add(defaultStrategy); + strategy = factory.getSnapshotStrategy(mock(Snapshot.class), SnapshotOperation.TAKE); + assertEquals("Default strategy was not picked.", defaultStrategy, strategy); + + strategies.add(hyperStrategy); + strategy = factory.getSnapshotStrategy(mock(Snapshot.class), SnapshotOperation.TAKE); + assertEquals("Hypervisor strategy was not picked.", hyperStrategy, strategy); + + strategies.add(pluginStrategy); + strategy = factory.getSnapshotStrategy(mock(Snapshot.class), SnapshotOperation.TAKE); + assertEquals("Plugin strategy was not picked.", pluginStrategy, strategy); + + strategies.add(highestStrategy); + strategy = factory.getSnapshotStrategy(mock(Snapshot.class), SnapshotOperation.TAKE); + assertEquals("Highest strategy was not picked.", highestStrategy, strategy); + } + + @Test + public void testSortDataMotionStrategies() { + DataMotionStrategy cantHandleStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy defaultStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy hyperStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy pluginStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy highestStrategy = mock(DataMotionStrategy.class); + + doReturn(StrategyPriority.CANT_HANDLE).when(cantHandleStrategy).canHandle(any(DataObject.class), any(DataObject.class)); + doReturn(StrategyPriority.DEFAULT).when(defaultStrategy).canHandle(any(DataObject.class), any(DataObject.class)); + doReturn(StrategyPriority.HYPERVISOR).when(hyperStrategy).canHandle(any(DataObject.class), any(DataObject.class)); + doReturn(StrategyPriority.PLUGIN).when(pluginStrategy).canHandle(any(DataObject.class), any(DataObject.class)); + doReturn(StrategyPriority.HIGHEST).when(highestStrategy).canHandle(any(DataObject.class), any(DataObject.class)); + + List strategies = new ArrayList(5); + DataMotionStrategy strategy = null; + + StorageStrategyFactoryImpl factory = new StorageStrategyFactoryImpl(); + factory.setDataMotionStrategies(strategies); + + strategies.add(cantHandleStrategy); + strategy = factory.getDataMotionStrategy(mock(DataObject.class), mock(DataObject.class)); + assertEquals("A strategy was found when it shouldn't have been.", null, strategy); + + strategies.add(defaultStrategy); + strategy = factory.getDataMotionStrategy(mock(DataObject.class), mock(DataObject.class)); + assertEquals("Default strategy was not picked.", defaultStrategy, strategy); + + strategies.add(hyperStrategy); + strategy = factory.getDataMotionStrategy(mock(DataObject.class), mock(DataObject.class)); + assertEquals("Hypervisor strategy was not picked.", hyperStrategy, strategy); + + strategies.add(pluginStrategy); + strategy = factory.getDataMotionStrategy(mock(DataObject.class), mock(DataObject.class)); + assertEquals("Plugin strategy was not picked.", pluginStrategy, strategy); + + strategies.add(highestStrategy); + strategy = factory.getDataMotionStrategy(mock(DataObject.class), mock(DataObject.class)); + assertEquals("Highest strategy was not picked.", highestStrategy, strategy); + } + + @Test + @SuppressWarnings("unchecked") + public void testSortDataMotionStrategies2() { + DataMotionStrategy cantHandleStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy defaultStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy hyperStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy pluginStrategy = mock(DataMotionStrategy.class); + DataMotionStrategy highestStrategy = mock(DataMotionStrategy.class); + + doReturn(StrategyPriority.CANT_HANDLE).when(cantHandleStrategy).canHandle(any(Map.class), any(Host.class), any(Host.class)); + doReturn(StrategyPriority.DEFAULT).when(defaultStrategy).canHandle(any(Map.class), any(Host.class), any(Host.class)); + doReturn(StrategyPriority.HYPERVISOR).when(hyperStrategy).canHandle(any(Map.class), any(Host.class), any(Host.class)); + doReturn(StrategyPriority.PLUGIN).when(pluginStrategy).canHandle(any(Map.class), any(Host.class), any(Host.class)); + doReturn(StrategyPriority.HIGHEST).when(highestStrategy).canHandle(any(Map.class), any(Host.class), any(Host.class)); + + List strategies = new ArrayList(5); + DataMotionStrategy strategy = null; + + StorageStrategyFactoryImpl factory = new StorageStrategyFactoryImpl(); + factory.setDataMotionStrategies(strategies); + + strategies.add(cantHandleStrategy); + strategy = factory.getDataMotionStrategy(mock(Map.class), mock(Host.class), mock(Host.class)); + assertEquals("A strategy was found when it shouldn't have been.", null, strategy); + + strategies.add(defaultStrategy); + strategy = factory.getDataMotionStrategy(mock(Map.class), mock(Host.class), mock(Host.class)); + assertEquals("Default strategy was not picked.", defaultStrategy, strategy); + + strategies.add(hyperStrategy); + strategy = factory.getDataMotionStrategy(mock(Map.class), mock(Host.class), mock(Host.class)); + assertEquals("Hypervisor strategy was not picked.", hyperStrategy, strategy); + + strategies.add(pluginStrategy); + strategy = factory.getDataMotionStrategy(mock(Map.class), mock(Host.class), mock(Host.class)); + assertEquals("Plugin strategy was not picked.", pluginStrategy, strategy); + + strategies.add(highestStrategy); + strategy = factory.getDataMotionStrategy(mock(Map.class), mock(Host.class), mock(Host.class)); + assertEquals("Highest strategy was not picked.", highestStrategy, strategy); + } +} diff --git a/engine/storage/volume/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml b/engine/storage/volume/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml new file mode 100644 index 00000000000..ba9afb538ec --- /dev/null +++ b/engine/storage/volume/resources/META-INF/cloudstack/core/spring-engine-storage-volume-core-context.xml @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java index 7c8f49a9c9b..2d99c9b6b48 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/datastore/PrimaryDataStoreImpl.java @@ -23,19 +23,20 @@ import java.util.List; import javax.inject.Inject; -import com.cloud.utils.db.GlobalLock; +import org.apache.log4j.Logger; + import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -44,14 +45,13 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.volume.VolumeObject; -import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.VMTemplateStoragePoolVO; @@ -60,6 +60,7 @@ import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.storage.dao.VMTemplatePoolDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.storage.encoding.EncodingType; @@ -111,7 +112,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public List getVolumes() { - List volumes = volumeDao.findByPoolId(this.getId()); + List volumes = volumeDao.findByPoolId(getId()); List volumeInfos = new ArrayList(); for (VolumeVO volume : volumes) { volumeInfos.add(VolumeObject.getVolumeObject(this, volume)); @@ -121,7 +122,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public DataStoreDriver getDriver() { - return this.driver; + return driver; } @Override @@ -131,28 +132,28 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public long getId() { - return this.pdsv.getId(); + return pdsv.getId(); } @Override public String getUri() { - String path = this.pdsv.getPath(); + String path = pdsv.getPath(); path.replaceFirst("/*", ""); StringBuilder builder = new StringBuilder(); - builder.append(this.pdsv.getPoolType()); + builder.append(pdsv.getPoolType()); builder.append("://"); - builder.append(this.pdsv.getHostAddress()); + builder.append(pdsv.getHostAddress()); builder.append(File.separator); - builder.append(this.pdsv.getPath()); + builder.append(pdsv.getPath()); builder.append(File.separator); - builder.append("?" + EncodingType.ROLE + "=" + this.getRole()); - builder.append("&" + EncodingType.STOREUUID + "=" + this.pdsv.getUuid()); + builder.append("?" + EncodingType.ROLE + "=" + getRole()); + builder.append("&" + EncodingType.STOREUUID + "=" + pdsv.getUuid()); return builder.toString(); } @Override public Scope getScope() { - StoragePoolVO vo = dataStoreDao.findById(this.pdsv.getId()); + StoragePoolVO vo = dataStoreDao.findById(pdsv.getId()); if (vo.getScope() == ScopeType.CLUSTER) { return new ClusterScope(vo.getClusterId(), vo.getPodId(), vo.getDataCenterId()); } else if (vo.getScope() == ScopeType.ZONE) { @@ -184,7 +185,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public String getUuid() { - return this.pdsv.getUuid(); + return pdsv.getUuid(); } @Override @@ -195,7 +196,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public PrimaryDataStoreLifeCycle getLifeCycle() { - return this.lifeCycle; + return lifeCycle; } @Override @@ -205,7 +206,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public TemplateInfo getTemplate(long templateId) { - VMTemplateStoragePoolVO template = templatePoolDao.findByPoolTemplate(this.getId(), templateId); + VMTemplateStoragePoolVO template = templatePoolDao.findByPoolTemplate(getId(), templateId); if (template == null || template.getState() != ObjectInDataStoreStateMachine.State.Ready) { return null; } @@ -227,7 +228,7 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { // create template on primary storage if (obj.getType() == DataObjectType.TEMPLATE) { try{ - String templateIdPoolIdString = "templateId:" + obj.getId() + "poolId:" + this.getId(); + String templateIdPoolIdString = "templateId:" + obj.getId() + "poolId:" + getId(); VMTemplateStoragePoolVO templateStoragePoolRef; GlobalLock lock = GlobalLock.getInternLock(templateIdPoolIdString); if (!lock.lock(5)) { @@ -235,21 +236,21 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { return null; } try { - templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), + templateStoragePoolRef = templatePoolDao.findByPoolTemplate(getId(), obj.getId()); if (templateStoragePoolRef == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Not found (" + templateIdPoolIdString + ") in template_spool_ref, persisting it"); } - templateStoragePoolRef = new VMTemplateStoragePoolVO(this.getId(), obj.getId()); + templateStoragePoolRef = new VMTemplateStoragePoolVO(getId(), obj.getId()); templateStoragePoolRef = templatePoolDao.persist(templateStoragePoolRef); } } catch (Throwable t) { if (s_logger.isDebugEnabled()) { s_logger.debug("Failed to insert (" + templateIdPoolIdString + ") to template_spool_ref", t); } - templateStoragePoolRef = templatePoolDao.findByPoolTemplate(this.getId(), obj.getId()); + templateStoragePoolRef = templatePoolDao.findByPoolTemplate(getId(), obj.getId()); if (templateStoragePoolRef == null) { throw new CloudRuntimeException("Failed to create template storage pool entry"); } else { @@ -266,6 +267,12 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { } } else if (obj.getType() == DataObjectType.SNAPSHOT) { return objectInStoreMgr.create(obj, this); + } else if (obj.getType() == DataObjectType.VOLUME) { + VolumeVO vol = volumeDao.findById(obj.getId()); + if (vol != null) { + vol.setPoolId(getId()); + volumeDao.update(vol.getId(), vol); + } } return objectInStoreMgr.get(obj, this); @@ -280,96 +287,96 @@ public class PrimaryDataStoreImpl implements PrimaryDataStore { @Override public long getDataCenterId() { - return this.pdsv.getDataCenterId(); + return pdsv.getDataCenterId(); } @Override public String getPath() { - return this.pdsv.getPath(); + return pdsv.getPath(); } @Override public StoragePoolType getPoolType() { - return this.pdsv.getPoolType(); + return pdsv.getPoolType(); } @Override public Date getCreated() { - return this.pdsv.getCreated(); + return pdsv.getCreated(); } @Override public Date getUpdateTime() { - return this.pdsv.getUpdateTime(); + return pdsv.getUpdateTime(); } @Override public long getCapacityBytes() { - return this.pdsv.getCapacityBytes(); + return pdsv.getCapacityBytes(); } @Override public long getUsedBytes() { - return this.pdsv.getUsedBytes(); + return pdsv.getUsedBytes(); } @Override public Long getCapacityIops() { - return this.pdsv.getCapacityIops(); + return pdsv.getCapacityIops(); } @Override public Long getClusterId() { - return this.pdsv.getClusterId(); + return pdsv.getClusterId(); } @Override public String getHostAddress() { - return this.pdsv.getHostAddress(); + return pdsv.getHostAddress(); } @Override public String getUserInfo() { - return this.pdsv.getUserInfo(); + return pdsv.getUserInfo(); } @Override public boolean isShared() { - return this.pdsv.getScope() == ScopeType.HOST ? false : true; + return pdsv.getScope() == ScopeType.HOST ? false : true; } @Override public boolean isLocal() { - return !this.isShared(); + return !isShared(); } @Override public StoragePoolStatus getStatus() { - return this.pdsv.getStatus(); + return pdsv.getStatus(); } @Override public int getPort() { - return this.pdsv.getPort(); + return pdsv.getPort(); } @Override public Long getPodId() { - return this.pdsv.getPodId(); + return pdsv.getPodId(); } public Date getRemoved() { - return this.pdsv.getRemoved(); + return pdsv.getRemoved(); } @Override public boolean isInMaintenance() { - return this.getStatus() == StoragePoolStatus.PrepareForMaintenance || this.getStatus() == StoragePoolStatus.Maintenance || this.getStatus() == StoragePoolStatus.ErrorInMaintenance || this.getRemoved() != null; + return getStatus() == StoragePoolStatus.PrepareForMaintenance || getStatus() == StoragePoolStatus.Maintenance || getStatus() == StoragePoolStatus.ErrorInMaintenance || getRemoved() != null; } @Override public String getStorageProviderName() { - return this.pdsv.getStorageProviderName(); + return pdsv.getStorageProviderName(); } @Override diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java index 1d75ba1529b..8065f9c9378 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeDataFactoryImpl.java @@ -18,8 +18,13 @@ */ package org.apache.cloudstack.storage.volume; +import java.util.ArrayList; +import java.util.List; + import javax.inject.Inject; +import org.springframework.stereotype.Component; + import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -27,7 +32,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.springframework.stereotype.Component; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VolumeVO; @@ -58,13 +62,13 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory { if (storeRole == DataStoreRole.Image) { VolumeDataStoreVO volumeStore = volumeStoreDao.findByVolume(volumeId); if (volumeStore != null) { - DataStore store = this.storeMgr.getDataStore(volumeStore.getDataStoreId(), DataStoreRole.Image); + DataStore store = storeMgr.getDataStore(volumeStore.getDataStoreId(), DataStoreRole.Image); vol = VolumeObject.getVolumeObject(store, volumeVO); } } else { // Primary data store if (volumeVO.getPoolId() != null) { - DataStore store = this.storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); + DataStore store = storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); vol = VolumeObject.getVolumeObject(store, volumeVO); } } @@ -82,11 +86,11 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory { DataStore store = null; VolumeDataStoreVO volumeStore = volumeStoreDao.findByVolume(volumeId); if (volumeStore != null) { - store = this.storeMgr.getDataStore(volumeStore.getDataStoreId(), DataStoreRole.Image); + store = storeMgr.getDataStore(volumeStore.getDataStoreId(), DataStoreRole.Image); } vol = VolumeObject.getVolumeObject(store, volumeVO); } else { - DataStore store = this.storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); + DataStore store = storeMgr.getDataStore(volumeVO.getPoolId(), DataStoreRole.Primary); vol = VolumeObject.getVolumeObject(store, volumeVO); } return vol; @@ -99,4 +103,23 @@ public class VolumeDataFactoryImpl implements VolumeDataFactory { return vol; } + @Override + public List listVolumeOnCache(long volumeId) { + List cacheVols = new ArrayList(); + // find all image cache stores for this zone scope + List cacheStores = storeMgr.listImageCacheStores(); + if (cacheStores == null || cacheStores.size() == 0) { + return cacheVols; + } + for (DataStore store : cacheStores) { + // check if the volume is stored there + VolumeDataStoreVO volStore = volumeStoreDao.findByStoreVolume(store.getId(), volumeId); + if (volStore != null) { + VolumeInfo vol = getVolume(volumeId, store); + cacheVols.add(vol); + } + } + return cacheVols; + } + } diff --git a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index e7ff021b4f0..870363afb24 100644 --- a/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -18,51 +18,6 @@ */ package org.apache.cloudstack.storage.volume; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Map; - -import javax.inject.Inject; - -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; -import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; -import org.apache.cloudstack.engine.subsystem.api.storage.Scope; -import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; -import org.apache.cloudstack.framework.async.AsyncCallFuture; -import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.framework.async.AsyncRpcContext; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.datastore.DataObjectManager; -import org.apache.cloudstack.storage.datastore.ObjectInDataStoreManager; -import org.apache.cloudstack.storage.datastore.PrimaryDataStore; -import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; -import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ListVolumeAnswer; import com.cloud.agent.api.storage.ListVolumeCommand; @@ -94,6 +49,46 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.cloudstack.engine.cloud.entity.api.VolumeEntity; +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.Scope; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.datastore.PrimaryDataStore; +import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import javax.inject.Inject; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Map; @Component public class VolumeServiceImpl implements VolumeService { @@ -103,10 +98,6 @@ public class VolumeServiceImpl implements VolumeService { @Inject PrimaryDataStoreProviderManager dataStoreMgr; @Inject - ObjectInDataStoreManager objectInDataStoreMgr; - @Inject - DataObjectManager dataObjectMgr; - @Inject DataMotionService motionSrv; @Inject VolumeDataFactory volFactory; @@ -400,8 +391,12 @@ public class VolumeServiceImpl implements VolumeService { VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId()); if (templatePoolRef == null) { - throw new CloudRuntimeException("Failed to find template " + template.getUniqueName() - + " in VMTemplateStoragePool"); + throw new CloudRuntimeException("Failed to find template " + template.getUniqueName() + " in storage pool " + dataStore.getId()); + } else { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Found template " + template.getUniqueName() + " in storage pool " + dataStore.getId() + " with VMTemplateStoragePool id: " + + templatePoolRef.getId()); + } } long templatePoolRefId = templatePoolRef.getId(); CreateBaseImageContext context = new CreateBaseImageContext(null, volume, @@ -411,9 +406,15 @@ public class VolumeServiceImpl implements VolumeService { int storagePoolMaxWaitSeconds = NumbersUtil.parseInt( configDao.getValue(Config.StoragePoolMaxWaitSeconds.key()), 3600); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Acquire lock on VMTemplateStoragePool " + templatePoolRefId + " with timeout " + storagePoolMaxWaitSeconds + " seconds"); + } templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolRefId, storagePoolMaxWaitSeconds); if (templatePoolRef == null) { + if (s_logger.isDebugEnabled()) { + s_logger.info("Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId); + } templatePoolRef = _tmpltPoolDao.findByPoolTemplate(dataStore.getId(), template.getId()); if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready ) { s_logger.info("Unable to acquire lock on VMTemplateStoragePool " + templatePoolRefId + ", But Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); @@ -423,17 +424,15 @@ public class VolumeServiceImpl implements VolumeService { throw new CloudRuntimeException("Unable to acquire lock on VMTemplateStoragePool: " + templatePoolRefId); } + if (s_logger.isDebugEnabled()) { + s_logger.info("lock is acquired for VMTemplateStoragePool " + templatePoolRefId); + } try { - // lock acquired if (templatePoolRef.getState() == ObjectInDataStoreStateMachine.State.Ready ) { s_logger.info("Template " + template.getUniqueName() + " is already copied to primary storage, skip copying"); createVolumeFromBaseImageAsync(volume, templateOnPrimaryStoreObj, dataStore, future); return; } - // remove the leftover hanging entry - dataStore.delete(templateOnPrimaryStoreObj); - // create a new entry to restart copying process - templateOnPrimaryStoreObj = dataStore.create(template); templateOnPrimaryStoreObj.processEvent(Event.CreateOnlyRequested); motionSrv.copyAsync(template, templateOnPrimaryStoreObj, caller); } catch (Throwable e) { @@ -442,6 +441,10 @@ public class VolumeServiceImpl implements VolumeService { VolumeApiResult result = new VolumeApiResult(volume); result.setResult(e.toString()); future.complete(result); + } finally { + if (s_logger.isDebugEnabled()) { + s_logger.info("releasing lock for VMTemplateStoragePool " + templatePoolRefId); + } _tmpltPoolDao.releaseFromLockTable(templatePoolRefId); } return; @@ -518,6 +521,7 @@ public class VolumeServiceImpl implements VolumeService { if (result.isSuccess()) { vo.processEvent(Event.OperationSuccessed, result.getAnswer()); } else { + vo.processEvent(Event.OperationFailed); volResult.setResult(result.getResult()); // hack for Vmware: host is down, previously download template to the host needs to be re-downloaded, so we need to reset @@ -712,7 +716,7 @@ public class VolumeServiceImpl implements VolumeService { srcVolume.processEvent(Event.OperationSuccessed); destVolume.processEvent(Event.OperationSuccessed, result.getAnswer()); - // srcVolume.getDataStore().delete(srcVolume); + srcVolume.getDataStore().delete(srcVolume); future.complete(res); } catch (Exception e) { res.setResult(e.toString()); @@ -1291,20 +1295,11 @@ public class VolumeServiceImpl implements VolumeService { @Override public SnapshotInfo takeSnapshot(VolumeInfo volume) { - VolumeObject vol = (VolumeObject) volume; - vol.stateTransit(Volume.Event.SnapshotRequested); - SnapshotInfo snapshot = null; try { snapshot = snapshotMgr.takeSnapshot(volume); } catch (Exception e) { s_logger.debug("Take snapshot: " + volume.getId() + " failed", e); - } finally { - if (snapshot != null) { - vol.stateTransit(Volume.Event.OperationSucceeded); - } else { - vol.stateTransit(Volume.Event.OperationFailed); - } } return snapshot; diff --git a/framework/cluster/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml b/framework/cluster/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml new file mode 100644 index 00000000000..13353b0ead1 --- /dev/null +++ b/framework/cluster/resources/META-INF/cloudstack/core/spring-framework-cluster-core-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + + diff --git a/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java b/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java index 0f1cf9da487..71bea4f2e4f 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterManagerImpl.java @@ -17,7 +17,6 @@ package com.cloud.cluster; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.net.ConnectException; @@ -44,10 +43,10 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; import com.cloud.cluster.dao.ManagementServerHostDao; @@ -61,6 +60,9 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.ConnectionConcierge; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.events.SubscriptionMgr; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionUtil; @@ -217,18 +219,18 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } private Runnable getClusterPduSendingTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { onSendingClusterPdu(); } }; } private Runnable getClusterPduNotificationTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { onNotifyingClusterPdu(); } }; @@ -289,9 +291,9 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C if(pdu == null) continue; - _executor.execute(new Runnable() { + _executor.execute(new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { if(pdu.getPduType() == ClusterServicePdu.PDU_TYPE_RESPONSE) { ClusterServiceRequestPdu requestPdu = popRequestPdu(pdu.getAckSequenceId()); if(requestPdu != null) { @@ -528,10 +530,10 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C } private Runnable getHeartbeatTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { - Transaction txn = Transaction.open("ClusterHeartbeat"); + protected void runInContext() { + TransactionLegacy txn = TransactionLegacy.open("ClusterHeartbeat"); try { Profiler profiler = new Profiler(); Profiler profilerHeartbeatUpdate = new Profiler(); @@ -598,7 +600,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C invalidHeartbeatConnection(); } finally { - txn.transitToAutoManagedConnection(Transaction.CLOUD_DB); + txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); txn.close("ClusterHeartbeat"); } } @@ -619,7 +621,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C private Connection getHeartbeatConnection() throws SQLException { if(_heartbeatConnection == null) { - Connection conn = Transaction.getStandaloneConnectionWithException(); + Connection conn = TransactionLegacy.getStandaloneConnectionWithException(); _heartbeatConnection = new ConnectionConcierge("ClusterManagerHeartbeat", conn, false); } @@ -628,17 +630,17 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C private void invalidHeartbeatConnection() { if(_heartbeatConnection != null) { - Connection conn = Transaction.getStandaloneConnection(); + Connection conn = TransactionLegacy.getStandaloneConnection(); if (conn != null) { - _heartbeatConnection.reset(Transaction.getStandaloneConnection()); + _heartbeatConnection.reset(TransactionLegacy.getStandaloneConnection()); } } } private Runnable getNotificationTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { while(true) { synchronized(_notificationMsgs) { try { @@ -941,58 +943,54 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C s_logger.info("Starting cluster manager, msid : " + _msId); } - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); + ManagementServerHostVO mshost = Transaction.execute(new TransactionCallback() { + @Override + public ManagementServerHostVO doInTransaction(TransactionStatus status) { - final Class c = this.getClass(); - String version = c.getPackage().getImplementationVersion(); - - ManagementServerHostVO mshost = _mshostDao.findByMsid(_msId); - if (mshost == null) { - mshost = new ManagementServerHostVO(); - mshost.setMsid(_msId); - mshost.setRunid(getCurrentRunId()); - mshost.setName(NetUtils.getHostName()); - mshost.setVersion(version); - mshost.setServiceIP(_clusterNodeIP); - mshost.setServicePort(_currentServiceAdapter.getServicePort()); - mshost.setLastUpdateTime(DateUtil.currentGMTTime()); - mshost.setRemoved(null); - mshost.setAlertCount(0); - mshost.setState(ManagementServerHost.State.Up); - _mshostDao.persist(mshost); - - if (s_logger.isInfoEnabled()) { - s_logger.info("New instance of management server msid " + _msId + " is being started"); + final Class c = this.getClass(); + String version = c.getPackage().getImplementationVersion(); + + ManagementServerHostVO mshost = _mshostDao.findByMsid(_msId); + if (mshost == null) { + mshost = new ManagementServerHostVO(); + mshost.setMsid(_msId); + mshost.setRunid(getCurrentRunId()); + mshost.setName(NetUtils.getHostName()); + mshost.setVersion(version); + mshost.setServiceIP(_clusterNodeIP); + mshost.setServicePort(_currentServiceAdapter.getServicePort()); + mshost.setLastUpdateTime(DateUtil.currentGMTTime()); + mshost.setRemoved(null); + mshost.setAlertCount(0); + mshost.setState(ManagementServerHost.State.Up); + _mshostDao.persist(mshost); + + if (s_logger.isInfoEnabled()) { + s_logger.info("New instance of management server msid " + _msId + " is being started"); + } + } else { + if (s_logger.isInfoEnabled()) { + s_logger.info("Management server " + _msId + " is being started"); + } + + _mshostDao.update(mshost.getId(), getCurrentRunId(), NetUtils.getHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); } - } else { - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server " + _msId + " is being started"); - } - - _mshostDao.update(mshost.getId(), getCurrentRunId(), NetUtils.getHostName(), version, _clusterNodeIP, _currentServiceAdapter.getServicePort(), DateUtil.currentGMTTime()); + + return mshost; } + }); - txn.commit(); - - _mshostId = mshost.getId(); - if (s_logger.isInfoEnabled()) { - s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); - } - - _mshostPeerDao.clearPeerInfo(_mshostId); - - // use seperate thread for heartbeat updates - _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS); - _notificationExecutor.submit(getNotificationTask()); - - } catch (Throwable e) { - s_logger.error("Unexpected exception : ", e); - txn.rollback(); - - throw new CloudRuntimeException("Unable to initialize cluster info into database"); + _mshostId = mshost.getId(); + if (s_logger.isInfoEnabled()) { + s_logger.info("Management server (host id : " + _mshostId + ") is being started at " + _clusterNodeIP + ":" + _currentServiceAdapter.getServicePort()); } + + _mshostPeerDao.clearPeerInfo(_mshostId); + + // use seperate thread for heartbeat updates + _heartbeatScheduler.scheduleAtFixedRate(getHeartbeatTask(), HeartbeatInterval.value(), HeartbeatInterval.value(), TimeUnit.MILLISECONDS); + _notificationExecutor.submit(getNotificationTask()); + if (s_logger.isInfoEnabled()) { s_logger.info("Cluster manager was started successfully"); @@ -1034,7 +1032,7 @@ public class ClusterManagerImpl extends ManagerBase implements ClusterManager, C File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); Properties dbProps = new Properties(); try { - dbProps.load(new FileInputStream(dbPropsFile)); + PropertiesUtil.loadFromFile(dbProps, dbPropsFile); } catch (FileNotFoundException e) { throw new ConfigurationException("Unable to find db.properties"); } catch (IOException e) { diff --git a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java index 67df9461a5f..f80e21f1d34 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletAdapter.java @@ -17,7 +17,6 @@ package com.cloud.cluster; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.rmi.RemoteException; @@ -126,7 +125,7 @@ public class ClusterServiceServletAdapter extends AdapterBase implements Cluster File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); Properties dbProps = new Properties(); try { - dbProps.load(new FileInputStream(dbPropsFile)); + PropertiesUtil.loadFromFile(dbProps, dbPropsFile); } catch (FileNotFoundException e) { throw new ConfigurationException("Unable to find db.properties"); } catch (IOException e) { diff --git a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletContainer.java b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletContainer.java index def3e178116..019d83de8ac 100644 --- a/framework/cluster/src/com/cloud/cluster/ClusterServiceServletContainer.java +++ b/framework/cluster/src/com/cloud/cluster/ClusterServiceServletContainer.java @@ -22,6 +22,7 @@ import java.net.Socket; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.http.ConnectionClosedException; import org.apache.http.HttpException; import org.apache.http.impl.DefaultConnectionReuseStrategy; @@ -129,8 +130,9 @@ public class ClusterServiceServletContainer { final DefaultHttpServerConnection conn = new DefaultHttpServerConnection(); conn.bind(socket, _params); - _executor.execute(new Runnable() { - public void run() { + _executor.execute(new ManagedContextRunnable() { + @Override + protected void runInContext() { HttpContext context = new BasicHttpContext(null); try { while(!Thread.interrupted() && conn.isOpen()) { diff --git a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java index 879c4ce3a27..246bfe6bcd4 100644 --- a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java +++ b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostDaoImpl.java @@ -38,7 +38,7 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Local(value={ManagementServerHostDao.class}) @@ -52,7 +52,7 @@ public class ManagementServerHostDaoImpl extends GenericDaoBase listOrphanMsids() { List orphanList = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement( diff --git a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java index 8ef2e82a943..f51076ceb47 100644 --- a/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java +++ b/framework/cluster/src/com/cloud/cluster/dao/ManagementServerHostPeerDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Local(value={ManagementServerHostPeerDao.class}) public class ManagementServerHostPeerDaoImpl extends GenericDaoBase implements ManagementServerHostPeerDao { @@ -68,7 +68,7 @@ public class ManagementServerHostPeerDaoImpl extends GenericDaoBase + + + + + + + + + + + diff --git a/framework/config/resources/META-INF/cloudstack/system/spring-framework-config-system-context.xml b/framework/config/resources/META-INF/cloudstack/system/spring-framework-config-system-context.xml new file mode 100644 index 00000000000..8d75d27576e --- /dev/null +++ b/framework/config/resources/META-INF/cloudstack/system/spring-framework-config-system-context.xml @@ -0,0 +1,51 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepotAdmin.java b/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepotAdmin.java index b4d3773356d..b7fe1253854 100644 --- a/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepotAdmin.java +++ b/framework/config/src/org/apache/cloudstack/framework/config/ConfigDepotAdmin.java @@ -31,6 +31,8 @@ public interface ConfigDepotAdmin { * @see Configuration */ void populateConfigurations(); + + void populateConfiguration(Configurable configurable); List getComponentsInDepot(); } diff --git a/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java b/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java index 2f4b6e225ff..58e8a69b371 100644 --- a/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java +++ b/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDao.java @@ -55,6 +55,8 @@ public interface ConfigurationDao extends GenericDao { public String getValueAndInitIfNotExist(String name, String category, String initValue); + public String getValueAndInitIfNotExist(String name, String category, String initValue, String desc); + /** * returns whether or not this is a premium configuration diff --git a/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java b/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java index 26292b757e5..8804740a7af 100644 --- a/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java +++ b/framework/config/src/org/apache/cloudstack/framework/config/dao/ConfigurationDaoImpl.java @@ -21,12 +21,13 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import javax.annotation.PostConstruct; import javax.ejb.Local; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.springframework.stereotype.Component; import com.cloud.utils.component.ComponentLifecycle; import com.cloud.utils.crypt.DBEncryptionUtil; @@ -34,9 +35,10 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; +@Component @Local(value = {ConfigurationDao.class}) public class ConfigurationDaoImpl extends GenericDaoBase implements ConfigurationDao { private static final Logger s_logger = Logger.getLogger(ConfigurationDaoImpl.class); @@ -127,11 +129,19 @@ public class ConfigurationDaoImpl extends GenericDaoBase _configurables; - @Inject List _scopedStorages; + Set _configured = Collections.synchronizedSet(new HashSet()); HashMap>> _allKeys = new HashMap>>(1007); @@ -85,54 +86,60 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemInt return value != null ? value.second() : null; } + @PostConstruct @Override public void populateConfigurations() { Date date = new Date(); for (Configurable configurable : _configurables) { - for (ConfigKey key : configurable.getConfigKeys()) { - ConfigurationVO vo = _configDao.findById(key.key()); - if (vo == null) { - vo = new ConfigurationVO(configurable.getConfigComponentName(), key); - vo.setUpdated(date); - _configDao.persist(vo); - } else { - if (vo.isDynamic() != key.isDynamic() || - !vo.getDescription().equals(key.description()) || - ((vo.getDefaultValue() != null && key.defaultValue() == null) || - (vo.getDefaultValue() == null && key.defaultValue() != null) || - !vo.getDefaultValue().equals(key.defaultValue()))) { - vo.setDynamic(key.isDynamic()); - vo.setDescription(key.description()); - vo.setDefaultValue(key.defaultValue()); - vo.setUpdated(date); - _configDao.persist(vo); - } - } - } + populateConfiguration(date, configurable); } } + protected void populateConfiguration(Date date, Configurable configurable) { + if ( _configured.contains(configurable) ) + return; + + s_logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName()); + + for (ConfigKey key : configurable.getConfigKeys()) { + Pair> previous = _allKeys.get(key.key()); + if (previous != null && !previous.first().equals(configurable.getConfigComponentName())) { + throw new CloudRuntimeException("Configurable " + configurable.getConfigComponentName() + " is adding a key that has been added before by " + previous.first() + + ": " + key.toString()); + } + _allKeys.put(key.key(), new Pair>(configurable.getConfigComponentName(), key)); + + ConfigurationVO vo = _configDao.findById(key.key()); + if (vo == null) { + vo = new ConfigurationVO(configurable.getConfigComponentName(), key); + vo.setUpdated(date); + _configDao.persist(vo); + } else { + if (vo.isDynamic() != key.isDynamic() || + !ObjectUtils.equals(vo.getDescription(), key.description()) || + !ObjectUtils.equals(vo.getDefaultValue(), key.defaultValue())) { + vo.setDynamic(key.isDynamic()); + vo.setDescription(key.description()); + vo.setDefaultValue(key.defaultValue()); + vo.setUpdated(date); + _configDao.persist(vo); + } + } + } + + _configured.add(configurable); + } + + @Override + public void populateConfiguration(Configurable configurable) { + populateConfiguration(new Date(), configurable); + } + @Override public List getComponentsInDepot() { return new ArrayList(); } - @Override - @PostConstruct - public void check() { - for (Configurable configurable : _configurables) { - s_logger.debug("Retrieving keys from " + configurable.getClass().getSimpleName()); - for (ConfigKey key : configurable.getConfigKeys()) { - Pair> previous = _allKeys.get(key.key()); - if (previous != null && !previous.first().equals(configurable.getConfigComponentName())) { - throw new CloudRuntimeException("Configurable " + configurable.getConfigComponentName() + " is adding a key that has been added before by " + previous.first() + - ": " + key.toString()); - } - _allKeys.put(key.key(), new Pair>(configurable.getConfigComponentName(), key)); - } - } - } - public ConfigurationDao global() { return _configDao; } @@ -146,4 +153,23 @@ public class ConfigDepotImpl implements ConfigDepot, ConfigDepotAdmin, SystemInt throw new CloudRuntimeException("Unable to find config storage for this scope: " + config.scope() + " for " + config.key()); } + + public List getScopedStorages() { + return _scopedStorages; + } + + @Inject + public void setScopedStorages(List scopedStorages) { + this._scopedStorages = scopedStorages; + } + + public List getConfigurables() { + return _configurables; + } + + @Inject + public void setConfigurables(List configurables) { + this._configurables = configurables; + } + } diff --git a/framework/config/test/org/apache/cloudstack/framework/config/impl/ConfigDepotAdminTest.java b/framework/config/test/org/apache/cloudstack/framework/config/impl/ConfigDepotAdminTest.java index 1c5fbe5c807..5a7f1768e14 100644 --- a/framework/config/test/org/apache/cloudstack/framework/config/impl/ConfigDepotAdminTest.java +++ b/framework/config/test/org/apache/cloudstack/framework/config/impl/ConfigDepotAdminTest.java @@ -95,6 +95,7 @@ public class ConfigDepotAdminTest extends TestCase { verify(_configDao, times(1)).persist(any(ConfigurationVO.class)); when(_configDao.findById(DynamicIntCK.key())).thenReturn(dynamicIntCV); + _depotAdmin._configured.clear(); _depotAdmin.populateConfigurations(); // This is two because DynamicIntCK also returns null. verify(_configDao, times(2)).persist(any(ConfigurationVO.class)); diff --git a/framework/db/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml b/framework/db/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml new file mode 100644 index 00000000000..651b8742a33 --- /dev/null +++ b/framework/db/resources/META-INF/cloudstack/system/spring-framework-db-system-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/framework/db/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java b/framework/db/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java index 9b13eb8b155..b7246a95805 100755 --- a/framework/db/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java +++ b/framework/db/src/com/cloud/utils/crypt/EncryptionSecretKeyChanger.java @@ -39,6 +39,7 @@ import org.jasypt.properties.EncryptableProperties; import com.cloud.utils.PropertiesUtil; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import org.apache.commons.configuration.ConfigurationException; @@ -227,7 +228,7 @@ public class EncryptionSecretKeyChanger { initEncryptor(newEncryptor, newDBKey); System.out.println("Initialised Encryptors"); - Transaction txn = Transaction.open("Migrate"); + TransactionLegacy txn = TransactionLegacy.open("Migrate"); txn.start(); try { Connection conn; diff --git a/framework/db/src/com/cloud/utils/db/ConnectionConcierge.java b/framework/db/src/com/cloud/utils/db/ConnectionConcierge.java index 029433453bf..acb9cc6851b 100644 --- a/framework/db/src/com/cloud/utils/db/ConnectionConcierge.java +++ b/framework/db/src/com/cloud/utils/db/ConnectionConcierge.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger; import javax.management.StandardMBean; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -177,7 +178,7 @@ public class ConnectionConcierge { return "Not Found"; } - Connection conn = Transaction.getStandaloneConnection(); + Connection conn = TransactionLegacy.getStandaloneConnection(); if (conn == null) { return "Unable to get anotehr db connection"; } @@ -198,10 +199,9 @@ public class ConnectionConcierge { _executor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("ConnectionConcierge")); - _executor.scheduleAtFixedRate(new Runnable() { - + _executor.scheduleAtFixedRate(new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { s_logger.trace("connection concierge keep alive task"); for (Map.Entry entry : _conns.entrySet()) { ConnectionConcierge concierge = entry.getValue(); diff --git a/framework/db/src/com/cloud/utils/db/DB.java b/framework/db/src/com/cloud/utils/db/DB.java index f83a7ea7eb4..b67f93e814a 100644 --- a/framework/db/src/com/cloud/utils/db/DB.java +++ b/framework/db/src/com/cloud/utils/db/DB.java @@ -36,24 +36,8 @@ import java.lang.annotation.Target; * _dao.acquireInLockTable(id); * ... * _dao.releaseFromLockTable(id); - * - * 3. Annotate methods that are inside a DAO but doesn't use - * the Transaction class. Generally, these are methods - * that are utility methods for setting up searches. In - * this case use @DB(txn=false) to annotate the method. - * While this is not required, it helps when you're debugging - * the code and it saves on method calls during runtime. - * */ @Target({TYPE, METHOD}) @Retention(RUNTIME) public @interface DB { - /** - * (Optional) Specifies that the method - * does not use transaction. This is useful for - * utility methods within DAO classes which are - * automatically marked with @DB. By marking txn=false, - * the method is not surrounded with transaction code. - */ - boolean txn() default true; } diff --git a/framework/db/src/com/cloud/utils/db/DbUtil.java b/framework/db/src/com/cloud/utils/db/DbUtil.java index da0efbbe8cb..25700933080 100755 --- a/framework/db/src/com/cloud/utils/db/DbUtil.java +++ b/framework/db/src/com/cloud/utils/db/DbUtil.java @@ -56,7 +56,7 @@ public class DbUtil { assert(false); } - Connection connection = Transaction.getStandaloneConnection(); + Connection connection = TransactionLegacy.getStandaloneConnection(); if(connection != null) { try { connection.setAutoCommit(true); diff --git a/framework/db/src/com/cloud/utils/db/GenericDao.java b/framework/db/src/com/cloud/utils/db/GenericDao.java index f32880f6ad4..ef25d7f624f 100755 --- a/framework/db/src/com/cloud/utils/db/GenericDao.java +++ b/framework/db/src/com/cloud/utils/db/GenericDao.java @@ -258,10 +258,6 @@ public interface GenericDao { public K getRandomlyIncreasingNextInSequence(Class clazz, String name); - SearchCriteria2 createSearchCriteria2(Class resultType); - - SearchCriteria2 createSearchCriteria2(); - public T findOneBy(final SearchCriteria sc); /** @@ -269,8 +265,6 @@ public interface GenericDao { */ Class getEntityBeanType(); - public int getRegionId(); - /** * @param sc * @param filter diff --git a/framework/db/src/com/cloud/utils/db/GenericDaoBase.java b/framework/db/src/com/cloud/utils/db/GenericDaoBase.java index a566ec54ce6..ba5200ea65f 100755 --- a/framework/db/src/com/cloud/utils/db/GenericDaoBase.java +++ b/framework/db/src/com/cloud/utils/db/GenericDaoBase.java @@ -59,6 +59,7 @@ import net.sf.cglib.proxy.Callback; import net.sf.cglib.proxy.CallbackFilter; import net.sf.cglib.proxy.Enhancer; import net.sf.cglib.proxy.Factory; +import net.sf.cglib.proxy.MethodInterceptor; import net.sf.cglib.proxy.NoOp; import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; @@ -167,13 +168,9 @@ public abstract class GenericDaoBase extends Compone } @Override - @SuppressWarnings("unchecked") @DB(txn=false) + @SuppressWarnings("unchecked") @DB() public GenericSearchBuilder createSearchBuilder(Class resultType) { - final T entity = (T)_searchEnhancer.create(); - final Factory factory = (Factory)entity; - GenericSearchBuilder builder = new GenericSearchBuilder(entity, resultType, _allAttributes); - factory.setCallback(0, builder); - return builder; + return new GenericSearchBuilder(_entityBeanType, resultType); } @Override @@ -181,6 +178,15 @@ public abstract class GenericDaoBase extends Compone return _allAttributes; } + + @SuppressWarnings("unchecked") + public T createSearchEntity(MethodInterceptor interceptor) { + T entity = (T)_searchEnhancer.create(); + final Factory factory = (Factory)entity; + factory.setCallback(0, interceptor); + return entity; + } + @SuppressWarnings("unchecked") protected GenericDaoBase() { super(); @@ -276,7 +282,7 @@ public abstract class GenericDaoBase extends Compone setRunLevel(ComponentLifecycle.RUN_LEVEL_SYSTEM); } - @Override @DB(txn=false) + @Override @DB() @SuppressWarnings("unchecked") public T createForUpdate(final ID id) { final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)}); @@ -290,12 +296,12 @@ public abstract class GenericDaoBase extends Compone return entity; } - @Override @DB(txn=false) + @Override @DB() public T createForUpdate() { return createForUpdate(null); } - @Override @DB(txn=false) + @Override @DB() public K getNextInSequence(final Class clazz, final String name) { final TableGenerator tg = _tgs.get(name); assert (tg != null) : "Couldn't find Table generator using " + name; @@ -303,7 +309,7 @@ public abstract class GenericDaoBase extends Compone return s_seqFetcher.getNextSequence(clazz, tg); } - @Override @DB(txn=false) + @Override @DB() public K getRandomlyIncreasingNextInSequence(final Class clazz, final String name) { final TableGenerator tg = _tgs.get(name); assert (tg != null) : "Couldn't find Table generator using " + name; @@ -311,19 +317,19 @@ public abstract class GenericDaoBase extends Compone return s_seqFetcher.getRandomNextSequence(clazz, tg); } - @Override @DB(txn=false) + @Override @DB() public List lockRows(final SearchCriteria sc, final Filter filter, final boolean exclusive) { return search(sc, filter, exclusive, false); } - @Override @DB(txn=false) + @Override @DB() public T lockOneRandomRow(final SearchCriteria sc, final boolean exclusive) { final Filter filter = new Filter(1); final List beans = search(sc, filter, exclusive, true); return beans.isEmpty() ? null : beans.get(0); } - @DB(txn=false) + @DB() protected List search(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache) { if (_removed != null) { if (sc == null) { @@ -334,7 +340,7 @@ public abstract class GenericDaoBase extends Compone return searchIncludingRemoved(sc, filter, lock, cache); } - @DB(txn=false) + @DB() protected List search(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache, final boolean enable_query_cache) { if (_removed != null) { if (sc == null) { @@ -374,7 +380,7 @@ public abstract class GenericDaoBase extends Compone List groupByValues = addGroupBy(str, sc); addFilter(str, filter); - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); if (lock != null) { assert (txn.dbTxnStarted() == true) : "As nice as I can here now....how do you lock when there's no DB transaction? Review your db 101 course from college."; str.append(lock ? FOR_UPDATE_CLAUSE : SHARE_MODE_CLAUSE); @@ -446,7 +452,7 @@ public abstract class GenericDaoBase extends Compone final String sql = str.toString(); - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); @@ -493,7 +499,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List customSearch(SearchCriteria sc, final Filter filter) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -502,7 +508,7 @@ public abstract class GenericDaoBase extends Compone return customSearchIncludingRemoved(sc, filter); } - @DB(txn=false) + @DB() protected void setField(Object entity, Field field, ResultSet rs, int index) throws SQLException { try { final Class type = field.getType(); @@ -646,7 +652,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) @SuppressWarnings("unchecked") + @DB() @SuppressWarnings("unchecked") protected M getObject(Class type, ResultSet rs, int index) throws SQLException { if (type == String.class) { byte[] bytes = rs.getBytes(index); @@ -738,7 +744,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected int addJoinAttributes(int count, PreparedStatement pstmt, Collection>> joins) throws SQLException { for (JoinBuilder> join : joins) { for (final Pair value : join.getT().getValues()) { @@ -764,7 +770,7 @@ public abstract class GenericDaoBase extends Compone } SearchCriteria sc = createSearchCriteria(); sc.addAnd(_idAttributes.get(_table)[0], SearchCriteria.Op.EQ, id); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); try { @@ -785,7 +791,7 @@ public abstract class GenericDaoBase extends Compone public int update(UpdateBuilder ub, final SearchCriteria sc, Integer rows) { StringBuilder sql = null; PreparedStatement pstmt = null; - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); try { final String searchClause = sc.getWhereClause(); @@ -826,12 +832,12 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected Attribute findAttributeByFieldName(String name) { return _allAttributes.get(name); } - @DB(txn=false) + @DB() protected String buildSelectByIdSql(final StringBuilder sql) { if (_idField == null) { return null; @@ -851,13 +857,13 @@ public abstract class GenericDaoBase extends Compone return sql.toString(); } - @DB(txn=false) + @DB() @Override public Class getEntityBeanType() { return _entityBeanType; } - @DB(txn=false) + @DB() protected T findOneIncludingRemovedBy(final SearchCriteria sc) { Filter filter = new Filter(1); List results = searchIncludingRemoved(sc, filter, null, false); @@ -866,7 +872,7 @@ public abstract class GenericDaoBase extends Compone } @Override - @DB(txn=false) + @DB() public T findOneBy(final SearchCriteria sc) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -874,7 +880,7 @@ public abstract class GenericDaoBase extends Compone return findOneIncludingRemovedBy(sc); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc, final Filter filter) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -882,7 +888,7 @@ public abstract class GenericDaoBase extends Compone return listIncludingRemovedBy(sc, filter); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { if (_removed != null) { sc.addAnd(_removed.second().field.getName(), SearchCriteria.Op.NULL); @@ -890,27 +896,27 @@ public abstract class GenericDaoBase extends Compone return listIncludingRemovedBy(sc, filter, enable_query_cache); } - @DB(txn=false) + @DB() protected List listBy(final SearchCriteria sc) { return listBy(sc, null); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { return searchIncludingRemoved(sc, filter, null, false, enable_query_cache); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc, final Filter filter) { return searchIncludingRemoved(sc, filter, null, false); } - @DB(txn=false) + @DB() protected List listIncludingRemovedBy(final SearchCriteria sc) { return listIncludingRemovedBy(sc, null); } - @Override @DB(txn=false) + @Override @DB() @SuppressWarnings("unchecked") public T findById(final ID id) { if (_cache != null) { @@ -921,28 +927,26 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) - @SuppressWarnings("unchecked") + @Override @DB() public T findByUuid(final String uuid) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid); return findOneBy(sc); } - @Override @DB(txn=false) - @SuppressWarnings("unchecked") + @Override @DB() public T findByUuidIncludingRemoved(final String uuid) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("uuid", SearchCriteria.Op.EQ, uuid); return findOneIncludingRemovedBy(sc); } - @Override @DB(txn=false) + @Override @DB() public T findByIdIncludingRemoved(ID id) { return findById(id, true, null); } - @Override @DB(txn=false) + @Override @DB() public T findById(final ID id, boolean fresh) { if(!fresh) { return findById(id); @@ -954,7 +958,7 @@ public abstract class GenericDaoBase extends Compone return lockRow(id, null); } - @Override @DB(txn=false) + @Override @DB() public T lockRow(ID id, Boolean lock) { return findById(id, false, lock); } @@ -967,7 +971,7 @@ public abstract class GenericDaoBase extends Compone if (lock != null) { sql.append(lock ? FOR_UPDATE_CLAUSE : SHARE_MODE_CLAUSE); } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql.toString()); @@ -983,14 +987,14 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public T acquireInLockTable(ID id) { return acquireInLockTable(id, _timeoutSeconds); } @Override public T acquireInLockTable(final ID id, int seconds) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); T t = null; boolean locked = false; try { @@ -1010,35 +1014,35 @@ public abstract class GenericDaoBase extends Compone @Override public boolean releaseFromLockTable(final ID id) { - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); return txn.release(_table + id); } - @Override @DB(txn=false) + @Override @DB() public boolean lockInLockTable(final String id) { return lockInLockTable(id, _timeoutSeconds); } @Override public boolean lockInLockTable(final String id, int seconds) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); return txn.lock(_table + id, seconds); } @Override public boolean unlockFromLockTable(final String id) { - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); return txn.release(_table + id); } - @Override @DB(txn=false) + @Override @DB() public List listAllIncludingRemoved() { return listAllIncludingRemoved(null); } - @DB(txn=false) + @DB() protected List addGroupBy(final StringBuilder sql, SearchCriteria sc) { - Pair, List> groupBys = sc.getGroupBy(); + Pair, List> groupBys = sc.getGroupBy(); if (groupBys != null) { groupBys.first().toSql(sql); return groupBys.second(); @@ -1047,7 +1051,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void addFilter(final StringBuilder sql, final Filter filter) { if (filter != null) { if (filter.getOrderBy() != null) { @@ -1063,7 +1067,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List listAllIncludingRemoved(final Filter filter) { final StringBuilder sql = createPartialSelectSql(null, false); addFilter(sql, filter); @@ -1072,7 +1076,7 @@ public abstract class GenericDaoBase extends Compone } protected List executeList(final String sql, final Object... params) { - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; final List result = new ArrayList(); try { @@ -1094,12 +1098,12 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List listAll() { return listAll(null); } - @Override @DB(txn=false) + @Override @DB() public List listAll(final Filter filter) { if (_removed == null) { return listAllIncludingRemoved(filter); @@ -1114,7 +1118,7 @@ public abstract class GenericDaoBase extends Compone @Override public boolean expunge(final ID id) { - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = null; try { @@ -1154,7 +1158,7 @@ public abstract class GenericDaoBase extends Compone final String sql = str.toString(); - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); @@ -1170,7 +1174,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected StringBuilder createPartialSelectSql(SearchCriteria sc, final boolean whereClause, final boolean enable_query_cache) { StringBuilder sql = new StringBuilder(enable_query_cache ? _partialQueryCacheSelectSql.first() : _partialSelectSql.first()); if (sc != null && !sc.isSelectAll()) { @@ -1185,7 +1189,7 @@ public abstract class GenericDaoBase extends Compone return sql; } - @DB(txn=false) + @DB() protected StringBuilder createPartialSelectSql(SearchCriteria sc, final boolean whereClause) { StringBuilder sql = new StringBuilder(_partialSelectSql.first()); if (sc != null && !sc.isSelectAll()) { @@ -1201,7 +1205,7 @@ public abstract class GenericDaoBase extends Compone } - @DB(txn = false) + @DB() protected void addJoins(StringBuilder str, Collection>> joins) { int fromIndex = str.lastIndexOf("WHERE"); if (fromIndex == -1) { @@ -1234,24 +1238,24 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public List search(final SearchCriteria sc, final Filter filter) { return search(sc, filter, null, false); } - @Override @DB(txn=false) + @Override @DB() public Pair, Integer> searchAndCount(final SearchCriteria sc, final Filter filter) { List objects = search(sc, filter, null, false); Integer count = getCount(sc); return new Pair, Integer>(objects, count); } - @Override @DB(txn=false) + @Override @DB() public List search(final SearchCriteria sc, final Filter filter, final boolean enable_query_cache) { return search(sc, filter, null, false, enable_query_cache); } - @Override @DB(txn=false) + @Override @DB() public boolean update(ID id, T entity) { assert Enhancer.isEnhanced(entity.getClass()) : "Entity is not generated by this dao"; @@ -1260,14 +1264,14 @@ public abstract class GenericDaoBase extends Compone return result; } - @DB(txn=false) + @DB() public int update(final T entity, final SearchCriteria sc, Integer rows) { final UpdateBuilder ub = getUpdateBuilder(entity); return update(ub, sc, rows); } @Override - @DB(txn=false) + @DB() public int update(final T entity, final SearchCriteria sc) { final UpdateBuilder ub = getUpdateBuilder(entity); return update(ub, sc, null); @@ -1292,7 +1296,7 @@ public abstract class GenericDaoBase extends Compone } ID id = null; - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; String sql = null; try { @@ -1355,14 +1359,14 @@ public abstract class GenericDaoBase extends Compone } protected void insertElementCollection(T entity, Attribute idAttribute, ID id, Map ecAttributes) throws SQLException { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); for (Map.Entry entry : ecAttributes.entrySet()) { Attribute attr = entry.getKey(); Object obj = entry.getValue(); EcInfo ec = (EcInfo)attr.attache; - Enumeration en = null; + Enumeration en = null; if (ec.rawClass == null) { en = Collections.enumeration(Arrays.asList((Object[])obj)); } else { @@ -1386,7 +1390,7 @@ public abstract class GenericDaoBase extends Compone txn.commit(); } - @DB(txn=false) + @DB() protected Object generateValue(final Attribute attr) { if (attr.is(Attribute.Flag.Created) || attr.is(Attribute.Flag.Removed)) { return new Date(); @@ -1410,7 +1414,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void prepareAttribute(final int j, final PreparedStatement pstmt, final Attribute attr, Object value) throws SQLException { if (attr.is(Attribute.Flag.DaoGenerated) && value == null) { value = generateValue(attr); @@ -1515,7 +1519,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected int prepareAttributes(final PreparedStatement pstmt, final Object entity, final Attribute[] attrs, final int index) throws SQLException { int j = 0; for (int i = 0; i < attrs.length; i++) { @@ -1532,7 +1536,7 @@ public abstract class GenericDaoBase extends Compone return j; } - @SuppressWarnings("unchecked") @DB(txn=false) + @SuppressWarnings("unchecked") @DB() protected T toEntityBean(final ResultSet result, final boolean cache) throws SQLException { final T entity = (T)_factory.newInstance(new Callback[] {NoOp.INSTANCE, new UpdateBuilder(this)}); @@ -1549,7 +1553,7 @@ public abstract class GenericDaoBase extends Compone return entity; } - @DB(txn=false) + @DB() protected T toVO(ResultSet result, boolean cache) throws SQLException { T entity; try { @@ -1571,7 +1575,7 @@ public abstract class GenericDaoBase extends Compone return entity; } - @DB(txn=false) + @DB() protected void toEntityBean(final ResultSet result, final T entity) throws SQLException { ResultSetMetaData meta = result.getMetaData(); for (int index = 1, max = meta.getColumnCount(); index <= max; index++) { @@ -1582,12 +1586,12 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn = true) + @DB() @SuppressWarnings("unchecked") protected void loadCollection(T entity, Attribute attr) { EcInfo ec = (EcInfo)attr.attache; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ResultSet rs = null; PreparedStatement pstmt = null; try { @@ -1671,7 +1675,7 @@ public abstract class GenericDaoBase extends Compone } final StringBuilder sql = new StringBuilder("DELETE FROM "); sql.append(_table).append(" WHERE ").append(_removed.first()).append(" IS NOT NULL"); - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { txn.start(); @@ -1684,7 +1688,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected void setField(final Object entity, final ResultSet rs, ResultSetMetaData meta, final int index) throws SQLException { Attribute attr = _allColumns.get(new Pair(meta.getTableName(index), meta.getColumnName(index))); if ( attr == null ){ @@ -1704,7 +1708,7 @@ public abstract class GenericDaoBase extends Compone return expunge(id); } - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { @@ -1741,7 +1745,7 @@ public abstract class GenericDaoBase extends Compone } protected Cache _cache; - @DB(txn=false) + @DB() protected void createCache(final Map params) { final String value = (String)params.get("cache.size"); @@ -1758,7 +1762,7 @@ public abstract class GenericDaoBase extends Compone } } - @Override @DB(txn=false) + @Override @DB() public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; @@ -1774,52 +1778,24 @@ public abstract class GenericDaoBase extends Compone return true; } - @DB(txn=false) + @DB() public static UpdateBuilder getUpdateBuilder(final T entityObject) { final Factory factory = (Factory)entityObject; assert(factory != null); return (UpdateBuilder)factory.getCallback(1); } - @SuppressWarnings("unchecked") - @Override @DB(txn=false) + @Override @DB() public SearchBuilder createSearchBuilder() { - final T entity = (T)_searchEnhancer.create(); - final Factory factory = (Factory)entity; - SearchBuilder builder = new SearchBuilder(entity, _allAttributes); - factory.setCallback(0, builder); - return builder; + return new SearchBuilder(_entityBeanType); } - @Override @DB(txn=false) + @Override @DB() public SearchCriteria createSearchCriteria() { SearchBuilder builder = createSearchBuilder(); return builder.create(); } - @Override @DB(txn=false) - public SearchCriteria2 createSearchCriteria2(Class resultType) { - final T entity = (T)_searchEnhancer.create(); - final Factory factory = (Factory)entity; - SearchCriteria2 sc = new SearchCriteria2(entity, resultType, _allAttributes, this); - factory.setCallback(0, sc); - return sc; - } - - @Override @DB(txn=false) - public SearchCriteria2 createSearchCriteria2() { - final T entity = (T)_searchEnhancer.create(); - final Factory factory = (Factory)entity; - SearchCriteria2 sc = new SearchCriteria2(entity, entity.getClass(), _allAttributes, this); - factory.setCallback(0, sc); - return sc; - } - - @Override - public int getRegionId(){ - return Transaction.s_region_id; - } - public Integer getCount(SearchCriteria sc) { String clause = sc != null ? sc.getWhereClause() : null; if (clause != null && clause.length() == 0) { @@ -1841,7 +1817,7 @@ public abstract class GenericDaoBase extends Compone // we have to disable group by in getting count, since count for groupBy clause will be different. //List groupByValues = addGroupBy(str, sc); - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); final String sql = str.toString(); PreparedStatement pstmt = null; @@ -1878,7 +1854,7 @@ public abstract class GenericDaoBase extends Compone } } - @DB(txn=false) + @DB() protected StringBuilder createCountSelect(SearchCriteria sc, final boolean whereClause) { StringBuilder sql = new StringBuilder(_count); diff --git a/framework/db/src/com/cloud/utils/db/GenericQueryBuilder.java b/framework/db/src/com/cloud/utils/db/GenericQueryBuilder.java new file mode 100755 index 00000000000..28cfebc5f94 --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/GenericQueryBuilder.java @@ -0,0 +1,176 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.utils.db; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; + +import com.cloud.utils.db.SearchCriteria.Op; + +/** + * GenericQueryBuilder builds a search query during runtime. It allows the + * search query to be built completely in Java rather than part SQL fragments + * and part entity field like HQL or JPQL. This class is different from + * GenericSearchBuilder in that it is used for building queries during runtime + * where GenericSearchBuilder expects the query to be built during load time + * and parameterized values to be set during runtime. + * + * GenericQueryBuilder allows results to be a native type, the entity bean, + * and a composite type. If you are just retrieving the entity bean, there + * is a simpler class called QueryBuilder that you can use. The usage + * is approximately the same. + * + * + * // Note that in the following search, it selects a func COUNT to be the + * // return result so for the second parameterized type is long. + * // Note the entity object itself must have came from search and + * // it uses the getters of the object to retrieve the field used in the search. + * + * GenericQueryBuilder sc = GenericQueryBuilder.create(HostVO.class, Long.class); + * HostVO entity = CountSearch.entity(); + * sc.select(null, FUNC.COUNT, null, null).where(entity.getType(), Op.EQ, Host.Type.Routing); + * sc.and(entity.getCreated(), Op.LT, new Date()); + * Long count = sc.find(); + * + * * + * + * @see GenericSearchBuilder + * @see QueryBuilder + * + * @param Entity object to perform the search on + * @param Result object + */ +public class GenericQueryBuilder extends SearchBase, T, K> { + final HashMap _params = new HashMap(); + + protected GenericQueryBuilder(Class entityType, Class resultType) { + super(entityType, resultType); + } + + /** + * Creator method for GenericQueryBuilder. + * @param entityType Entity to search on + * @param resultType Result to return + * @return GenericQueryBuilder + */ + @SuppressWarnings("unchecked") + static public GenericQueryBuilder create(Class entityType, Class resultType) { + GenericDao dao = (GenericDao)GenericDaoBase.getDao(entityType); + assert dao != null : "Can not find DAO for " + entityType.getName(); + return new GenericQueryBuilder(entityType, resultType); + } + + /** + * Adds AND search condition + * + * @param field the field of the entity to perform the search on. + * @param op operator + * @param values parameterized values + * @return this + */ + public GenericQueryBuilder and(Object field, Op op, Object... values) { + String uuid = UUID.randomUUID().toString(); + constructCondition(uuid, " AND ", _specifiedAttrs.get(0), op); + _params.put(uuid, values); + return this; + } + + /** + * Adds OR search condition + * + * @param field the field of the entity to perform the search on. + * @param op operator + * @param values parameterized values + * @return this + */ + public GenericQueryBuilder or(Object field, Op op, Object... values) { + String uuid = UUID.randomUUID().toString(); + constructCondition(uuid, " OR ", _specifiedAttrs.get(0), op); + _params.put(uuid, values); + return this; + } + + protected GenericQueryBuilder left(Object field, Op op, Object... values) { + String uuid = UUID.randomUUID().toString(); + constructCondition(uuid, " ( ", _specifiedAttrs.get(0), op); + _params.put(uuid, values); + return this; + } + + /** + * Adds search condition that starts with an open parenthesis. Call cp() + * to close the parenthesis. + * + * @param field the field of the entity to perform the search on. + * @param op operator + * @param values parameterized values + * @return this + */ + public GenericQueryBuilder op(Object field, Op op, Object... values) { + return left(field, op, values); + } + + /** + * If the query is supposed to return a list, use this. + * @return List of result objects + */ + @SuppressWarnings("unchecked") + public List list() { + finalize(); + if (isSelectAll()) { + @SuppressWarnings("rawtypes") + SearchCriteria sc1 = create(); + return (List)_dao.search(sc1, null); + } else { + SearchCriteria sc1 = create(); + return _dao.customSearch(sc1, null); + } + } + + /** + * Creates a SearchCriteria to be used with dao objects. + */ + @Override + public SearchCriteria create() { + SearchCriteria sc = super.create(); + sc.setParameters(_params); + return sc; + } + + private boolean isSelectAll() { + return _selects == null || _selects.size() == 0; + } + + /** + * Convenience method to find the result so the result won't be a list. + * @return result as specified. + */ + @SuppressWarnings("unchecked") + public K find() { + finalize(); + if (isSelectAll()) { + @SuppressWarnings("rawtypes") + SearchCriteria sc1 = create(); + return (K)_dao.findOneBy(sc1); + } else { + List lst = list(); + return lst.get(0); + } + } +} diff --git a/framework/db/src/com/cloud/utils/db/GenericSearchBuilder.java b/framework/db/src/com/cloud/utils/db/GenericSearchBuilder.java index bf28144236f..72ad278730b 100755 --- a/framework/db/src/com/cloud/utils/db/GenericSearchBuilder.java +++ b/framework/db/src/com/cloud/utils/db/GenericSearchBuilder.java @@ -16,516 +16,216 @@ // under the License. package com.cloud.utils.db; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; import java.util.UUID; -import javax.persistence.Column; -import javax.persistence.Transient; - -import net.sf.cglib.proxy.Factory; -import net.sf.cglib.proxy.MethodInterceptor; -import net.sf.cglib.proxy.MethodProxy; - -import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria.SelectType; /** - * GenericSearchBuilder is used to build a search based on a VO object - * a convenience class provided called SearchBuilder that provides - * exactly that functionality. + * GenericSearchBuilder is used to build a search based on a VO object. It + * can select the result into a native type, the entity object, or a composite + * object depending on what's needed. + * + * The way to use GenericSearchBuilder is to use it to build a search at load + * time so it should be declared at class constructions. It allows queries to + * be constructed completely in Java and parameters have String tokens that + * can be replaced during runtime with SearchCriteria. Because + * GenericSearchBuilder is created at load time and SearchCriteria is used + * at runtime, the search query creation and the parameter value setting are + * separated in the code. While that's tougher on the coder to maintain, what + * you gain is that all string constructions are done at load time rather than + * runtime and, more importantly, the proper construction can be checked when + * components are being loaded. However, if you prefer to just construct + * the entire search at runtime, you can use GenericQueryBuilder. + * + * + * // To specify the GenericSearchBuilder, you should do this at load time. + * // Note that in the following search, it selects a func COUNT to be the + * // return result so for the second parameterized type is long. It also + * // presets the type in the search and declares created to be set during + * // runtime. Note the entity object itself must have came from search and + * // it uses the getters of the object to retrieve the field used in the search. + * + * GenericSearchBuilder CountSearch = _hostDao.createSearchBuilder(Long.class); + * HostVO entity = CountSearch.entity(); + * CountSearch.select(null, FUNC.COUNT, null, null).where(entity.getType(), Op.EQ).value(Host.Type.Routing); + * CountSearch.and(entity.getCreated(), Op.LT, "create_date").done(); + * + * // Later in the code during runtime + * SearchCriteria sc = CountSearch.create(); + * sc.setParameter("create_date", new Date()); + * Long count = _hostDao.customizedSearch(sc, null); + * + * + * @see GenericQueryBuilder for runtime construction of search query + * @see SearchBuilder for returning VO objects itself * * @param VO object this Search is build for. * @param Result object that should contain the results. */ -public class GenericSearchBuilder implements MethodInterceptor { - final protected Map _attrs; - - protected ArrayList _conditions; - protected HashMap>> _joins; - protected ArrayList(); - } - - for (Attribute attr : _specifiedAttrs) { - Field field = null; - try { - field = _resultType.getDeclaredField(attr.field.getName()); - field.setAccessible(true); - } catch (SecurityException e) { - } catch (NoSuchFieldException e) { - } - _selects.add(new Select(Func.NATIVE, attr, field, null)); - } - - _specifiedAttrs.clear(); - - return this; - } - -// public GenericSearchBuilder selectField(String joinName, Object... entityFields) { -// JoinBuilder> jb = _joins.get(joinName); -// -// } - - /** - * Specifies the field to select. - * - * @param fieldName The field name of the result object to put the value of the field selected. This can be null if you're selecting only one field and the result is not a complex object. - * @param func function to place. - * @param useless column to select. Call this with this.entity() method. - * @param params parameters to the function. - * @return a SearchBuilder to build more search parts. - */ - public GenericSearchBuilder select(String fieldName, Func func, Object useless, Object... params) { - if (_entity == null) { - throw new RuntimeException("SearchBuilder cannot be modified once it has been setup"); - } - if (_specifiedAttrs.size() > 1) { - throw new RuntimeException("You can't specify more than one field to search on"); - } - if (func.getCount() != -1 && (func.getCount() != (params.length + 1))) { - throw new RuntimeException("The number of parameters does not match the function param count for " + func); - } - - if (_selects == null) { - _selects = new ArrayList _selects; + protected GroupBy _groupBy = null; + protected SelectType _selectType; + T _entity; + + SearchBase(Class entityType, Class resultType) { + _dao = (GenericDaoBase)GenericDaoBase.getDao(entityType); + if (_dao == null) { + throw new CloudRuntimeException("Unable to find DAO for " + entityType); + } + + _entityBeanType = entityType; + _resultType = resultType; + _attrs = _dao.getAllAttributes(); + + _entity = _dao.createSearchEntity(new Interceptor()); + _conditions = new ArrayList(); + _joins = null; + _specifiedAttrs = new ArrayList(); + } + + /** + * Specifies how the search query should be grouped + * + * @param fields fields of the entity object that should be grouped on. The order is important. + * @return GroupBy object to perform more operations on. + * @see GroupBy + */ + @SuppressWarnings("unchecked") + public GroupBy groupBy(Object... fields) { + assert _groupBy == null : "Can't do more than one group bys"; + _groupBy = new GroupBy((J)this); + return _groupBy; + } + + /** + * Specifies what to select in the search. + * + * @param fieldName The field name of the result object to put the value of the field selected. This can be null if you're selecting only one field and the result is not a complex object. + * @param func function to place. + * @param field column to select. Call this with this.entity() method. + * @param params parameters to the function. + * @return itself to build more search parts. + */ + @SuppressWarnings("unchecked") + public J select(String fieldName, Func func, Object field, Object... params) { + if (_entity == null) { + throw new RuntimeException("SearchBuilder cannot be modified once it has been setup"); + } + if (_specifiedAttrs.size() > 1) { + throw new RuntimeException("You can't specify more than one field to search on"); + } + if (func.getCount() != -1 && (func.getCount() != (params.length + 1))) { + throw new RuntimeException("The number of parameters does not match the function param count for " + func); + } + + if (_selects == null) { + _selects = new ArrayList(); + } + + for (Attribute attr : _specifiedAttrs) { + Field field = null; + try { + field = _resultType.getDeclaredField(attr.field.getName()); + field.setAccessible(true); + } catch (SecurityException e) { + } catch (NoSuchFieldException e) { + } + _selects.add(new Select(Func.NATIVE, attr, field, null)); + } + + _specifiedAttrs.clear(); + + return (J)this; + } + + /** + * joins this search with another search + * + * @param name name given to the other search. used for setJoinParameters. + * @param builder The other search + * @param joinField1 field of the first table used to perform the join + * @param joinField2 field of the second table used to perform the join + * @param joinType type of join + * @return itself + */ + @SuppressWarnings("unchecked") + public J join(String name, SearchBase builder, Object joinField1, Object joinField2, JoinBuilder.JoinType joinType) { + assert _entity != null : "SearchBuilder cannot be modified once it has been setup"; + assert _specifiedAttrs.size() == 1 : "You didn't select the attribute."; + assert builder._entity != null : "SearchBuilder cannot be modified once it has been setup"; + assert builder._specifiedAttrs.size() == 1 : "You didn't select the attribute."; + assert builder != this : "You can't add yourself, can you? Really think about it!"; + + JoinBuilder> t = new JoinBuilder>(builder, _specifiedAttrs.get(0), builder._specifiedAttrs.get(0), joinType); + if (_joins == null) { + _joins = new HashMap>>(); + } + _joins.put(name, t); + + builder._specifiedAttrs.clear(); + _specifiedAttrs.clear(); + return (J)this; + } + + public SelectType getSelectType() { + return _selectType; + } + + protected void set(String name) { + Attribute attr = _attrs.get(name); + assert (attr != null) : "Searching for a field that's not there: " + name; + _specifiedAttrs.add(attr); + } + + /** + * @return entity object. This allows the caller to use the entity return + * to specify the field to be selected in many of the search parameters. + */ + public T entity() { + return _entity; + } + + protected Attribute getSpecifiedAttribute() { + if (_entity == null || _specifiedAttrs == null || _specifiedAttrs.size() != 1) { + throw new RuntimeException("Now now, better specify an attribute or else we can't help you"); + } + return _specifiedAttrs.get(0); + } + + protected List getSpecifiedAttributes() { + return _specifiedAttrs; + } + + protected Condition constructCondition(String conditionName, String cond, Attribute attr, Op op) { + assert _entity != null : "SearchBuilder cannot be modified once it has been setup"; + assert op == null || _specifiedAttrs.size() == 1 : "You didn't select the attribute."; + assert op != Op.SC : "Call join"; + + Condition condition = new Condition(conditionName, cond, attr, op); + _conditions.add(condition); + _specifiedAttrs.clear(); + return condition; + } + + /** + * creates the SearchCriteria so the actual values can be filled in. + * + * @return SearchCriteria + */ + public SearchCriteria create() { + if (_entity != null) { + finalize(); + } + return new SearchCriteria(this); + } + + /** + * Adds an OR condition to the search. Normally you should use this to + * perform an 'OR' with a big conditional in parenthesis. For example, + * + * search.or().op(entity.getId(), Op.Eq, "abc").cp() + * + * The above fragment produces something similar to + * + * "OR (id = $abc) where abc is the token to be replaced by a value later. + * + * @return this + */ + @SuppressWarnings("unchecked") + public J or() { + constructCondition(null, " OR ", null, null); + return (J)this; + } + + /** + * Adds an AND condition to the search. Normally you should use this to + * perform an 'AND' with a big conditional in parenthesis. For example, + * + * search.and().op(entity.getId(), Op.Eq, "abc").cp() + * + * The above fragment produces something similar to + * + * "AND (id = $abc) where abc is the token to be replaced by a value later. + * + * @return this + */ + @SuppressWarnings("unchecked") + public J and() { + constructCondition(null, " AND ", null, null); + return (J)this; + } + + /** + * Closes a parenthesis that's started by op() + * @return this + */ + @SuppressWarnings("unchecked") + public J cp() { + Condition condition = new Condition(null, " ) ", null, Op.RP); + _conditions.add(condition); + return (J)this; + } + + /** + * Writes an open parenthesis into the search + * @return this + */ + @SuppressWarnings("unchecked") + public J op() { + Condition condition = new Condition(null, " ( ", null, Op.RP); + _conditions.add(condition); + return (J)this; + } + + /** + * Marks the SearchBuilder as completed in building the search conditions. + */ + @Override + protected synchronized void finalize() { + if (_entity != null) { + Factory factory = (Factory)_entity; + factory.setCallback(0, null); + _entity = null; + } + + if (_joins != null) { + for (JoinBuilder> join : _joins.values()) { + join.getT().finalize(); + } + } + + if (_selects == null || _selects.size() == 0) { + _selectType = SelectType.Entity; + assert _entityBeanType.equals(_resultType) : "Expecting " + _entityBeanType + " because you didn't specify any selects but instead got " + _resultType; + return; + } + + for (Select select : _selects) { + if (select.field == null) { + assert (_selects.size() == 1) : "You didn't specify any fields to put the result in but you're specifying more than one select so where should I put the selects?"; + _selectType = SelectType.Single; + return; + } + if (select.func != null) { + _selectType = SelectType.Result; + return; + } + } + + _selectType = SelectType.Fields; + } + + protected static class Condition { + protected final String name; + protected final String cond; + protected final Op op; + protected final Attribute attr; + protected Object[] presets; + + protected Condition(String name) { + this(name, null, null, null); + } + + public Condition(String name, String cond, Attribute attr, Op op) { + this.name = name; + this.attr = attr; + this.cond = cond; + this.op = op; + this.presets = null; + } + + public boolean isPreset() { + return presets != null; + } + + public void setPresets(Object... presets) { + this.presets = presets; + } + + public Object[] getPresets() { + return presets; + } + + public void toSql(StringBuilder sql, Object[] params, int count) { + if (count > 0) { + sql.append(cond); + } + + if (op == null) { + return; + } + + if (op == Op.SC) { + sql.append(" (").append(((SearchCriteria)params[0]).getWhereClause()).append(") "); + return; + } + + if (attr == null) { + return; + } + + sql.append(attr.table).append(".").append(attr.columnName).append(op.toString()); + if (op == Op.IN && params.length == 1) { + sql.delete(sql.length() - op.toString().length(), sql.length()); + sql.append("=?"); + } else if (op == Op.NIN && params.length == 1) { + sql.delete(sql.length() - op.toString().length(), sql.length()); + sql.append("!=?"); + } else if (op.getParams() == -1) { + for (int i = 0; i < params.length; i++) { + sql.insert(sql.length() - 2, "?,"); + } + sql.delete(sql.length() - 3, sql.length() - 2); // remove the last , + } else if (op == Op.EQ && (params == null || params.length == 0 || params[0] == null)) { + sql.delete(sql.length() - 4, sql.length()); + sql.append(" IS NULL "); + } else if (op == Op.NEQ && (params == null || params.length == 0 || params[0] == null)) { + sql.delete(sql.length() - 5, sql.length()); + sql.append(" IS NOT NULL "); + } else { + if ((op.getParams() != 0 || params != null) && (params.length != op.getParams())) { + throw new RuntimeException("Problem with condition: " + name); + } + } + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Condition)) { + return false; + } + + Condition condition = (Condition)obj; + return name.equals(condition.name); + } + } + + protected static class Select { + public Func func; + public Attribute attr; + public Object[] params; + public Field field; + + protected Select() { + } + + public Select(Func func, Attribute attr, Field field, Object[] params) { + this.func = func; + this.attr = attr; + this.params = params; + this.field = field; + } + } + + protected class Interceptor implements MethodInterceptor { + @Override + public Object intercept(Object object, Method method, Object[] args, MethodProxy methodProxy) throws Throwable { + String name = method.getName(); + if (method.getAnnotation(Transient.class) == null) { + if (name.startsWith("get")) { + String fieldName = Character.toLowerCase(name.charAt(3)) + name.substring(4); + set(fieldName); + return null; + } else if (name.startsWith("is")) { + String fieldName = Character.toLowerCase(name.charAt(2)) + name.substring(3); + set(fieldName); + return null; + } else { + Column ann = method.getAnnotation(Column.class); + if (ann != null) { + String colName = ann.name(); + for (Map.Entry attr : _attrs.entrySet()) { + if (colName.equals(attr.getValue().columnName)) { + set(attr.getKey()); + return null; + } + } + } + throw new RuntimeException("Perhaps you need to make the method start with get or is: " + method); + } + } + return methodProxy.invokeSuper(object, args); + } + + } +} \ No newline at end of file diff --git a/framework/db/src/com/cloud/utils/db/SearchBuilder.java b/framework/db/src/com/cloud/utils/db/SearchBuilder.java index c177e209860..a56ec1bd74b 100755 --- a/framework/db/src/com/cloud/utils/db/SearchBuilder.java +++ b/framework/db/src/com/cloud/utils/db/SearchBuilder.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.utils.db; -import java.util.Map; /** * SearchBuilder is meant as a static query construct. Often times in DAO code, @@ -57,8 +56,7 @@ import java.util.Map; */ public class SearchBuilder extends GenericSearchBuilder { - @SuppressWarnings("unchecked") - public SearchBuilder(T entity, Map attrs) { - super(entity, (Class)entity.getClass(), attrs); + public SearchBuilder(Class entityType) { + super(entityType, entityType); } } diff --git a/framework/db/src/com/cloud/utils/db/SearchCriteria.java b/framework/db/src/com/cloud/utils/db/SearchCriteria.java index 22bccd36b7d..3b20686fe3f 100755 --- a/framework/db/src/com/cloud/utils/db/SearchCriteria.java +++ b/framework/db/src/com/cloud/utils/db/SearchCriteria.java @@ -24,8 +24,8 @@ import java.util.List; import java.util.Map; import com.cloud.utils.Pair; -import com.cloud.utils.db.GenericSearchBuilder.Condition; -import com.cloud.utils.db.GenericSearchBuilder.Select; +import com.cloud.utils.db.SearchBase.Condition; +import com.cloud.utils.db.SearchBase.Select; /** * big joins or high performance searches, it is much better to @@ -56,6 +56,7 @@ public class SearchCriteria { private final String op; int params; + Op(String op, int params) { this.op = op; this.params = params; @@ -113,26 +114,12 @@ public class SearchCriteria { private int _counter; private HashMap>> _joins; private final ArrayList selects, SelectType selectType, Class resultType, HashMap params) { - this._attrs = attrs; - this._conditions = conditions; - this._selects = selects; - this._selectType = selectType; - this._resultType = resultType; - this._params = params; - this._additionals = new ArrayList(); - this._counter = 0; - this._joins = null; - this._groupBy = null; - this._groupByValues = null; - } - - protected SearchCriteria(GenericSearchBuilder sb) { + protected SearchCriteria(SearchBase sb) { this._attrs = sb._attrs; this._conditions = sb._conditions; this._additionals = new ArrayList(); @@ -140,9 +127,9 @@ public class SearchCriteria { this._joins = null; if (sb._joins != null) { _joins = new HashMap>>(sb._joins.size()); - for (Map.Entry>> entry : sb._joins.entrySet()) { - JoinBuilder> value = entry.getValue(); - _joins.put(entry.getKey(), new JoinBuilder>(value.getT().create(),value.getFirstAttribute(), value.getSecondAttribute(), value.getType())); + for (Map.Entry>> entry : sb._joins.entrySet()) { + JoinBuilder> value = entry.getValue(); + _joins.put(entry.getKey(), new JoinBuilder>(value.getT().create(), value.getFirstAttribute(), value.getSecondAttribute(), value.getType())); } } _selects = sb._selects; @@ -156,6 +143,10 @@ public class SearchCriteria { _selectType = sb._selectType; } + protected void setParameters(HashMap parameters) { + _params = parameters; + } + public SelectType getSelectType() { return _selectType; } @@ -201,22 +192,22 @@ public class SearchCriteria { } protected JoinBuilder> findJoin(Map>> jbmap, String joinName) { - JoinBuilder> jb = jbmap.get(joinName); - if (jb != null) { - return jb; - } - - for (JoinBuilder> j2 : jbmap.values()) { - SearchCriteria sc = j2.getT(); - if(sc._joins != null) - jb = findJoin(sc._joins, joinName); - if (jb != null) { - return jb; - } - } - - assert (false) : "Unable to find a join by the name " + joinName; - return null; + JoinBuilder> jb = jbmap.get(joinName); + if (jb != null) { + return jb; + } + + for (JoinBuilder> j2 : jbmap.values()) { + SearchCriteria sc = j2.getT(); + if (sc._joins != null) + jb = findJoin(sc._joins, joinName); + if (jb != null) { + return jb; + } + } + + assert (false) : "Unable to find a join by the name " + joinName; + return null; } public void setJoinParameters(String joinName, String conditionName, Object... params) { @@ -226,24 +217,12 @@ public class SearchCriteria { } - public void addJoinAnd(String joinName, String field, Op op, Object... values) { - JoinBuilder> join = _joins.get(joinName); - assert (join != null) : "Incorrect join name specified: " + joinName; - join.getT().addAnd(field, op, values); - } - - public void addJoinOr(String joinName, String field, Op op, Object... values) { - JoinBuilder> join = _joins.get(joinName); - assert (join != null) : "Incorrect join name specified: " + joinName; - join.getT().addOr(field, op, values); - } - public SearchCriteria getJoin(String joinName) { return _joins.get(joinName).getT(); } - public Pair, List> getGroupBy() { - return _groupBy == null ? null : new Pair, List>(_groupBy, _groupByValues); + public Pair, List> getGroupBy() { + return _groupBy == null ? null : new Pair, List>(_groupBy, _groupByValues); } public void setGroupByValues(Object... values) { @@ -256,30 +235,27 @@ public class SearchCriteria { return _resultType; } + @Deprecated public void addAnd(String field, Op op, Object... values) { String name = Integer.toString(_counter++); addCondition(name, " AND ", field, op); setParameters(name, values); } + @Deprecated public void addAnd(Attribute attr, Op op, Object... values) { String name = Integer.toString(_counter++); addCondition(name, " AND ", attr, op); setParameters(name, values); } + @Deprecated public void addOr(String field, Op op, Object... values) { String name = Integer.toString(_counter++); addCondition(name, " OR ", field, op); setParameters(name, values); } - public void addOr(Attribute attr, Op op, Object... values) { - String name = Integer.toString(_counter++); - addCondition(name, " OR ", attr, op); - setParameters(name, values); - } - protected void addCondition(String conditionName, String cond, String fieldName, Op op) { Attribute attr = _attrs.get(fieldName); assert attr != null : "Unable to find field: " + fieldName; diff --git a/framework/db/src/com/cloud/utils/db/SearchCriteria2.java b/framework/db/src/com/cloud/utils/db/SearchCriteria2.java deleted file mode 100755 index 67e95b09244..00000000000 --- a/framework/db/src/com/cloud/utils/db/SearchCriteria2.java +++ /dev/null @@ -1,213 +0,0 @@ -// Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file -// distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file -// to you under the Apache License, Version 2.0 (the -// "License"); you may not use this file except in compliance -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. -package com.cloud.utils.db; - -import java.io.Serializable; -import java.lang.reflect.Field; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import javax.persistence.Transient; - -import net.sf.cglib.proxy.Factory; -import net.sf.cglib.proxy.MethodInterceptor; -import net.sf.cglib.proxy.MethodProxy; - -import com.cloud.utils.db.GenericSearchBuilder.Condition; -import com.cloud.utils.db.GenericSearchBuilder.Select; -import com.cloud.utils.db.SearchCriteria.Func; -import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria.SelectType; - -public class SearchCriteria2 implements SearchCriteriaService, MethodInterceptor{ - GenericDao _dao; - final protected Map _attrs; - protected ArrayList _specifiedAttrs; - protected T _entity; - protected ArrayList _conditions; - protected ArrayList(); - } - - for (Attribute attr : _specifiedAttrs) { - Field field = null; - try { - field = _resultType.getDeclaredField(attr.field.getName()); - field.setAccessible(true); - } catch (SecurityException e) { - } catch (NoSuchFieldException e) { - } - _selects.add(new Select(Func.NATIVE, attr, field, null)); - } - - _specifiedAttrs.clear(); - } - - private void constructCondition(String conditionName, String cond, Attribute attr, Op op) { - assert _entity != null : "SearchBuilder cannot be modified once it has been setup"; - assert op == null || _specifiedAttrs.size() == 1 : "You didn't select the attribute."; - assert op != Op.SC : "Call join"; - - GenericSearchBuilder.Condition condition = new GenericSearchBuilder.Condition(conditionName, cond, attr, op); - _conditions.add(condition); - _specifiedAttrs.clear(); - } - - private void setParameters(String conditionName, Object... params) { - assert _conditions.contains(new Condition(conditionName)) : "Couldn't find " + conditionName; - _params.put(conditionName, params); - } - - @Override - public void addAnd(Object useless, Op op, Object...values) { - String uuid = UUID.randomUUID().toString(); - constructCondition(uuid, " AND ", _specifiedAttrs.get(0), op); - setParameters(uuid, values); - } - - @Override - public List list() { - done(); - SearchCriteria sc1 = createSearchCriteria(); - if (isSelectAll()) { - return (List)_dao.search(sc1, null); - } else { - return _dao.customSearch(sc1, null); - } - } - - private boolean isSelectAll() { - return _selects == null || _selects.size() == 0; - } - - @Override - public T getEntity() { - return _entity; - } - - private SearchCriteria createSearchCriteria() { - return new SearchCriteria(_attrs, _conditions, _selects, _selectType, _resultType, _params); - } - - private void set(String name) { - Attribute attr = _attrs.get(name); - assert (attr != null) : "Searching for a field that's not there: " + name; - _specifiedAttrs.add(attr); - } - - private void done() { - if (_entity != null) { - Factory factory = (Factory)_entity; - factory.setCallback(0, null); - _entity = null; - } - - if (_selects == null || _selects.size() == 0) { - _selectType = SelectType.Entity; - assert _entityBeanType.equals(_resultType) : "Expecting " + _entityBeanType + " because you didn't specify any selects but instead got " + _resultType; - return; - } - - for (Select select : _selects) { - if (select.field == null) { - assert (_selects.size() == 1) : "You didn't specify any fields to put the result in but you're specifying more than one select so where should I put the selects?"; - _selectType = SelectType.Single; - return; - } - if (select.func != null) { - _selectType = SelectType.Result; - return; - } - } - - _selectType = SelectType.Fields; - } - - @Override - public Object intercept(Object object, Method method, Object[] args, MethodProxy methodProxy) throws Throwable { - String name = method.getName(); - if (method.getAnnotation(Transient.class) == null) { - if (name.startsWith("get")) { - String fieldName = Character.toLowerCase(name.charAt(3)) + name.substring(4); - set(fieldName); - return null; - } else if (name.startsWith("is")) { - String fieldName = Character.toLowerCase(name.charAt(2)) + name.substring(3); - set(fieldName); - return null; - } else { - name = name.toLowerCase(); - for (String fieldName : _attrs.keySet()) { - if (name.endsWith(fieldName.toLowerCase())) { - set(fieldName); - return null; - } - } - assert false : "Perhaps you need to make the method start with get or is?"; - } - } - return methodProxy.invokeSuper(object, args); - } - - @Override - public K find() { - assert isSelectAll() : "find doesn't support select search"; - done(); - SearchCriteria sc1 = createSearchCriteria(); - return (K)_dao.findOneBy(sc1); - } - -} diff --git a/framework/db/src/com/cloud/utils/db/SequenceFetcher.java b/framework/db/src/com/cloud/utils/db/SequenceFetcher.java index 88235527fc2..bb45847a7bb 100644 --- a/framework/db/src/com/cloud/utils/db/SequenceFetcher.java +++ b/framework/db/src/com/cloud/utils/db/SequenceFetcher.java @@ -98,7 +98,7 @@ public class SequenceFetcher { sql.append(_tg.valueColumnName()).append(" FROM ").append(_tg.table()); sql.append(" WHERE ").append(_tg.pkColumnName()).append(" = ? FOR UPDATE"); - Transaction txn = Transaction.open("Sequence"); + TransactionLegacy txn = TransactionLegacy.open("Sequence"); PreparedStatement selectStmt = txn.prepareStatement(sql.toString()); if (_key == null) { diff --git a/framework/db/src/com/cloud/utils/db/Transaction.java b/framework/db/src/com/cloud/utils/db/Transaction.java index a5da4b3575c..4b19be088c8 100755 --- a/framework/db/src/com/cloud/utils/db/Transaction.java +++ b/framework/db/src/com/cloud/utils/db/Transaction.java @@ -16,1165 +16,39 @@ // under the License. package com.cloud.utils.db; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Savepoint; -import java.sql.Statement; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Properties; import java.util.concurrent.atomic.AtomicLong; -import javax.sql.DataSource; - -import org.apache.commons.dbcp.ConnectionFactory; -import org.apache.commons.dbcp.DriverManagerConnectionFactory; -import org.apache.commons.dbcp.PoolableConnectionFactory; -import org.apache.commons.dbcp.PoolingDataSource; -import org.apache.commons.pool.KeyedObjectPoolFactory; -import org.apache.commons.pool.impl.GenericObjectPool; -import org.apache.commons.pool.impl.StackKeyedObjectPoolFactory; -import org.apache.log4j.Logger; -import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; -import org.jasypt.properties.EncryptableProperties; - -import com.cloud.utils.Pair; -import com.cloud.utils.PropertiesUtil; -import com.cloud.utils.crypt.EncryptionSecretKeyChecker; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.mgmt.JmxUtil; - -/** - * Transaction abstracts away the Connection object in JDBC. It allows the - * following things that the Connection object does not. - * - * 1. Transaction can be started at an entry point and whether the DB - * actions should be auto-commit or not determined at that point. - * 2. DB Connection is allocated only when it is needed. - * 3. Code does not need to know if a transaction has been started or not. - * It just starts/ends a transaction and we resolve it correctly with - * the previous actions. - * - * Note that this class is not synchronous but it doesn't need to be because - * it is stored with TLS and is one per thread. Use appropriately. - */ public class Transaction { - private static final Logger s_logger = Logger.getLogger(Transaction.class.getName() + "." + "Transaction"); - private static final Logger s_stmtLogger = Logger.getLogger(Transaction.class.getName() + "." + "Statement"); - private static final Logger s_lockLogger = Logger.getLogger(Transaction.class.getName() + "." + "Lock"); - private static final Logger s_connLogger = Logger.getLogger(Transaction.class.getName() + "." + "Connection"); + private final static AtomicLong counter = new AtomicLong(0); + private final static TransactionStatus STATUS = new TransactionStatus() { + }; - private static final ThreadLocal tls = new ThreadLocal(); - private static final String START_TXN = "start_txn"; - private static final String CURRENT_TXN = "current_txn"; - private static final String CREATE_TXN = "create_txn"; - private static final String CREATE_CONN = "create_conn"; - private static final String STATEMENT = "statement"; - private static final String ATTACHMENT = "attachment"; - - public static final short CLOUD_DB = 0; - public static final short USAGE_DB = 1; - public static final short AWSAPI_DB = 2; - public static final short SIMULATOR_DB = 3; - public static final short CONNECTED_DB = -1; - public static int s_region_id; - - private static AtomicLong s_id = new AtomicLong(); - private static final TransactionMBeanImpl s_mbean = new TransactionMBeanImpl(); - static { + @SuppressWarnings("deprecation") + public static T execute(TransactionCallbackWithException callback) throws E { + String name = "tx-" + counter.incrementAndGet(); + short databaseId = TransactionLegacy.CLOUD_DB; + TransactionLegacy currentTxn = TransactionLegacy.currentTxn(false); + if ( currentTxn != null ) { + databaseId = currentTxn.getDatabaseId(); + } + TransactionLegacy txn = TransactionLegacy.open(name, databaseId, false); try { - JmxUtil.registerMBean("Transaction", "Transaction", s_mbean); - } catch (Exception e) { - s_logger.error("Unable to register mbean for transaction", e); + txn.start(); + T result = callback.doInTransaction(STATUS); + txn.commit(); + return result; + } finally { + txn.close(); } - - /* FIXME: We need a better solution for this - * Initialize encryption if we need it for db.properties - */ - EncryptionSecretKeyChecker enc = new EncryptionSecretKeyChecker(); - enc.check(); } - private final LinkedList _stack; - private long _id; - - private final LinkedList> _lockTimes = new LinkedList>(); - - private String _name; - private Connection _conn; - private boolean _txn; - private short _dbId; - private long _txnTime; - private Statement _stmt; - private String _creator; - - private Transaction _prev = null; - - public static Transaction currentTxn() { - Transaction txn = tls.get(); - assert txn != null : "No Transaction on stack. Did you mark the method with @DB?"; - - assert checkAnnotation(3, txn) : "Did you even read the guide to use Transaction...IOW...other people's code? Try method can't be private. What about @DB? hmmm... could that be it? " + txn; - return txn; - } - - public static Transaction open(final short databaseId) { - String name = buildName(); - if (name == null) { - name = CURRENT_TXN; - } - return open(name, databaseId, true); - } - - // - // Usage of this transaction setup should be limited, it will always open a new transaction context regardless of whether or not there is other - // transaction context in the stack. It is used in special use cases that we want to control DB connection explicitly and in the mean time utilize - // the existing DAO features - // - public void transitToUserManagedConnection(Connection conn) { - assert(_conn == null /*&& _stack.size() <= 1*/) : "Can't change to a user managed connection unless the stack is empty and the db connection is null, you may have forgotten to invoke transitToAutoManagedConnection to close out the DB connection: " + toString(); - _conn = conn; - _dbId = CONNECTED_DB; - } - - public void transitToAutoManagedConnection(short dbId) { - // assert(_stack.size() <= 1) : "Can't change to auto managed connection unless your stack is empty"; - _dbId = dbId; - _conn = null; - } - - public static Transaction open(final String name) { - return open(name, CLOUD_DB, false); - } - - public static Transaction open(final String name, final short databaseId, final boolean forceDbChange) { - Transaction txn = tls.get(); - boolean isNew = false; - if (txn == null) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Creating the transaction: " + name); + public static T execute(final TransactionCallback callback) { + return execute(new TransactionCallbackWithException() { + @Override + public T doInTransaction(TransactionStatus status) throws RuntimeException { + return callback.doInTransaction(status); } - txn = new Transaction(name, false, databaseId); - tls.set(txn); - isNew = true; - } else if (forceDbChange) { - final short currentDbId = txn.getDatabaseId(); - if (currentDbId != databaseId) { - // we need to end the current transaction and switch databases - txn.close(txn.getName()); - - txn = new Transaction(name, false, databaseId); - tls.set(txn); - isNew = true; - } - } - - txn.takeOver(name, false); - if (isNew) { - s_mbean.addTransaction(txn); - } - return txn; + }); } - protected StackElement peekInStack(Object obj) { - final Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement next = it.next(); - if (next.type == obj) { - return next; - } - } - return null; - } - - public void registerLock(String sql) { - if (_txn && s_lockLogger.isDebugEnabled()) { - Pair time = new Pair(sql, System.currentTimeMillis()); - _lockTimes.add(time); - } - } - - public boolean dbTxnStarted() { - return _txn; - } - - public static Connection getStandaloneConnectionWithException() throws SQLException { - Connection conn = s_ds.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection: dbconn" + System.identityHashCode(conn)); - } - return conn; - } - - public static Connection getStandaloneConnection() { - try { - return getStandaloneConnectionWithException(); - } catch (SQLException e) { - s_logger.error("Unexpected exception: ", e); - return null; - } - } - - public static Connection getStandaloneUsageConnection() { - try { - Connection conn = s_usageDS.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); - } - return conn; - } catch (SQLException e) { - s_logger.warn("Unexpected exception: ", e); - return null; - } - } - - public static Connection getStandaloneAwsapiConnection() { - try { - Connection conn = s_awsapiDS.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); - } - return conn; - } catch (SQLException e) { - s_logger.warn("Unexpected exception: ", e); - return null; - } - } - - public static Connection getStandaloneSimulatorConnection() { - try { - Connection conn = s_simulatorDS.getConnection(); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Retrieving a standalone connection for simulator: dbconn" + System.identityHashCode(conn)); - } - return conn; - } catch (SQLException e) { - s_logger.warn("Unexpected exception: ", e); - return null; - } - } - - protected void attach(TransactionAttachment value) { - _stack.push(new StackElement(ATTACHMENT, value)); - } - - protected TransactionAttachment detach(String name) { - Iterator it = _stack.descendingIterator(); - while (it.hasNext()) { - StackElement element = it.next(); - if (element.type == ATTACHMENT) { - TransactionAttachment att = (TransactionAttachment)element.ref; - if (name.equals(att.getName())) { - it.remove(); - return att; - } - } - } - assert false : "Are you sure you attached this: " + name; - return null; - } - - public static void attachToTxn(TransactionAttachment value) { - Transaction txn = tls.get(); - assert txn != null && txn.peekInStack(CURRENT_TXN) != null: "Come on....how can we attach something to the transaction if you haven't started it?"; - - txn.attach(value); - } - - public static TransactionAttachment detachFromTxn(String name) { - Transaction txn = tls.get(); - assert txn != null : "No Transaction in TLS"; - return txn.detach(name); - } - - protected static boolean checkAnnotation(int stack, Transaction txn) { - final StackTraceElement[] stacks = Thread.currentThread().getStackTrace(); - StackElement se = txn.peekInStack(CURRENT_TXN); - if (se == null) { - return false; - } - - StringBuffer sb = new StringBuffer(); - for (; stack < stacks.length; stack++) { - String methodName = stacks[stack].getMethodName(); - sb.append(" ").append(methodName); - if (methodName.equals(se.ref)){ - return true; - } - } - - // relax stack structure for several places that @DB required injection is not in place - s_logger.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: " + sb); - return true; - } - - protected static String buildName() { - if (s_logger.isDebugEnabled()) { - final StackTraceElement[] stacks = Thread.currentThread().getStackTrace(); - final StringBuilder str = new StringBuilder(); - int i = 3, j = 3; - while (j < 15 && i < stacks.length) { - StackTraceElement element = stacks[i]; - String filename = element.getFileName(); - String method = element.getMethodName(); - if ((filename != null && filename.equals("")) || (method != null && method.equals("invokeSuper"))) { - i++; - continue; - } - - str.append("-").append(stacks[i].getClassName().substring(stacks[i].getClassName().lastIndexOf(".") + 1)).append(".").append(stacks[i].getMethodName()).append(":").append(stacks[i].getLineNumber()); - j++; - i++; - } - return str.toString(); - } - - return ""; - } - - public Transaction(final String name, final boolean forLocking, final short databaseId) { - _name = name; - _conn = null; - _stack = new LinkedList(); - _txn = false; - _dbId = databaseId; - _id = s_id.incrementAndGet(); - _creator = Thread.currentThread().getName(); - } - - public String getCreator() { - return _creator; - } - - public long getId() { - return _id; - } - - public String getName() { - return _name; - } - - public Short getDatabaseId() { - return _dbId; - } - - @Override - public String toString() { - final StringBuilder str = new StringBuilder((_name != null ? _name : "")); - str.append(" : "); - for (final StackElement se : _stack) { - if (se.type == CURRENT_TXN) { - str.append(se.ref).append(", "); - } - } - - return str.toString(); - } - - protected void mark(final String name) { - _stack.push(new StackElement(CURRENT_TXN, name)); - } - - public boolean lock(final String name, final int timeoutSeconds) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { - throw new CloudRuntimeException("There's no support for locking yet"); - } - return lockMaster.acquire(name, timeoutSeconds); - } - - public boolean release(final String name) { - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster == null) { - throw new CloudRuntimeException("There's no support for locking yet"); - } - return lockMaster.release(name); - } - - public void start() { - if (s_logger.isTraceEnabled()) { - s_logger.trace("txn: start requested by: " + buildName()); - } - - _stack.push(new StackElement(START_TXN, null)); - - if (_txn) { - s_logger.trace("txn: has already been started."); - return; - } - - _txn = true; - - _txnTime = System.currentTimeMillis(); - if (_conn != null) { - try { - s_logger.trace("txn: set auto commit to false"); - _conn.setAutoCommit(false); - } catch (final SQLException e) { - s_logger.warn("Unable to set auto commit: ", e); - throw new CloudRuntimeException("Unable to set auto commit: ", e); - } - } - } - - protected void closePreviousStatement() { - if (_stmt != null) { - try { - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Closing: " + _stmt.toString()); - } - try { - ResultSet rs = _stmt.getResultSet(); - if (rs != null && _stmt.getResultSetHoldability() != ResultSet.HOLD_CURSORS_OVER_COMMIT) { - rs.close(); - } - } catch(SQLException e) { - s_stmtLogger.trace("Unable to close resultset"); - } - _stmt.close(); - } catch (final SQLException e) { - s_stmtLogger.trace("Unable to close statement: " + _stmt.toString()); - } finally { - _stmt = null; - } - } - } - - /** - * Prepares an auto close statement. The statement is closed automatically if it is - * retrieved with this method. - * - * @param sql sql String - * @return PreparedStatement - * @throws SQLException if problem with JDBC layer. - * - * @see java.sql.Connection - */ - public PreparedStatement prepareAutoCloseStatement(final String sql) throws SQLException { - PreparedStatement stmt = prepareStatement(sql); - closePreviousStatement(); - _stmt = stmt; - return stmt; - } - - public PreparedStatement prepareStatement(final String sql) throws SQLException { - final Connection conn = getConnection(); - final PreparedStatement pstmt = conn.prepareStatement(sql); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); - } - return pstmt; - } - - /** - * Prepares an auto close statement. The statement is closed automatically if it is - * retrieved with this method. - * - * @param sql sql String - * @param autoGeneratedKeys keys that are generated - * @return PreparedStatement - * @throws SQLException if problem with JDBC layer. - * - * @see java.sql.Connection - */ - public PreparedStatement prepareAutoCloseStatement(final String sql, final int autoGeneratedKeys) throws SQLException { - final Connection conn = getConnection(); - final PreparedStatement pstmt = conn.prepareStatement(sql, autoGeneratedKeys); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); - } - closePreviousStatement(); - _stmt = pstmt; - return pstmt; - } - - /** - * Prepares an auto close statement. The statement is closed automatically if it is - * retrieved with this method. - * - * @param sql sql String - * @param columnNames names of the columns - * @return PreparedStatement - * @throws SQLException if problem with JDBC layer. - * - * @see java.sql.Connection - */ - public PreparedStatement prepareAutoCloseStatement(final String sql, final String[] columnNames) throws SQLException { - final Connection conn = getConnection(); - final PreparedStatement pstmt = conn.prepareStatement(sql, columnNames); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); - } - closePreviousStatement(); - _stmt = pstmt; - return pstmt; - } - - /** - * Prepares an auto close statement. The statement is closed automatically if it is - * retrieved with this method. - * - * @param sql sql String - * @return PreparedStatement - * @throws SQLException if problem with JDBC layer. - * - * @see java.sql.Connection - */ - public PreparedStatement prepareAutoCloseStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { - final Connection conn = getConnection(); - final PreparedStatement pstmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability); - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Preparing: " + sql); - } - closePreviousStatement(); - _stmt = pstmt; - return pstmt; - } - - /** - * Returns the db connection. - * - * Note: that you can call getConnection() but beaware that - * all prepare statements from the Connection are not garbage - * collected! - * - * @return DB Connection but make sure you understand that - * you are responsible for closing the PreparedStatement. - * @throws SQLException - */ - public Connection getConnection() throws SQLException { - if (_conn == null) { - switch (_dbId) { - case CLOUD_DB: - if(s_ds != null) { - _conn = s_ds.getConnection(); - } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); - throw new CloudRuntimeException("Database is not initialized, process is dying?"); - } - break; - case USAGE_DB: - if(s_usageDS != null) { - _conn = s_usageDS.getConnection(); - } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); - throw new CloudRuntimeException("Database is not initialized, process is dying?"); - } - break; - case AWSAPI_DB: - if(s_awsapiDS != null) { - _conn = s_awsapiDS.getConnection(); - } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); - throw new CloudRuntimeException("Database is not initialized, process is dying?"); - } - break; - - case SIMULATOR_DB: - if(s_simulatorDS != null) { - _conn = s_simulatorDS.getConnection(); - } else { - s_logger.warn("A static-initialized variable becomes null, process is dying?"); - throw new CloudRuntimeException("Database is not initialized, process is dying?"); - } - break; - default: - - throw new CloudRuntimeException("No database selected for the transaction"); - } - _conn.setAutoCommit(!_txn); - - // - // MySQL default transaction isolation level is REPEATABLE READ, - // to reduce chances of DB deadlock, we will use READ COMMITED isolation level instead - // see http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html - // - _stack.push(new StackElement(CREATE_CONN, null)); - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Creating a DB connection with " + (_txn ? " txn: " : " no txn: ") + " for " + _dbId + ": dbconn" + System.identityHashCode(_conn) + ". Stack: " + buildName()); - } - } else { - s_logger.trace("conn: Using existing DB connection"); - } - - return _conn; - } - - protected boolean takeOver(final String name, final boolean create) { - if (_stack.size() != 0) { - if (!create) { - // If it is not a create transaction, then let's just use the current one. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Using current transaction: " + toString()); - } - mark(name); - return false; - } - - final StackElement se = _stack.getFirst(); - if (se.type == CREATE_TXN) { - // This create is called inside of another create. Which is ok? - // We will let that create be responsible for cleaning up. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Create using current transaction: " + toString()); - } - mark(name); - return false; - } - - s_logger.warn("Encountered a transaction that has leaked. Cleaning up. " + toString()); - cleanup(); - } - - if (s_logger.isTraceEnabled()) { - s_logger.trace("Took over the transaction: " + name); - } - _stack.push(new StackElement(create ? CREATE_TXN : CURRENT_TXN, name)); - _name = name; - return true; - } - - public void cleanup() { - closePreviousStatement(); - - removeUpTo(null, null); - if (_txn) { - rollbackTransaction(); - } - _txn = false; - _name = null; - - closeConnection(); - - _stack.clear(); - Merovingian2 lockMaster = Merovingian2.getLockMaster(); - if (lockMaster != null) { - lockMaster.cleanupThread(); - } - } - - public void close() { - removeUpTo(CURRENT_TXN, null); - - if (_stack.size() == 0) { - s_logger.trace("Transaction is done"); - cleanup(); - } - } - - /** - * close() is used by endTxn to close the connection. This method only - * closes the connection if the name is the same as what's stored. - * - * @param name - * @return true if this close actually closes the connection. false if not. - */ - public boolean close(final String name) { - if (_name == null) { // Already cleaned up. - if (s_logger.isTraceEnabled()) { - s_logger.trace("Already cleaned up." + buildName()); - } - return true; - } - - if (!_name.equals(name)) { - close(); - return false; - } - - if (s_logger.isDebugEnabled() && _stack.size() > 2) { - s_logger.debug("Transaction is not closed properly: " + toString() + ". Called by " + buildName()); - } - - cleanup(); - - s_logger.trace("All done"); - return true; - } - - protected boolean hasTxnInStack() { - return peekInStack(START_TXN) != null; - } - - protected void clearLockTimes() { - if (s_lockLogger.isDebugEnabled()) { - for (Pair time : _lockTimes) { - s_lockLogger.trace("SQL " + time.first() + " took " + (System.currentTimeMillis() - time.second())); - } - _lockTimes.clear(); - } - } - - public boolean commit() { - if (!_txn) { - s_logger.warn("txn: Commit called when it is not a transaction: " + buildName()); - return false; - } - - Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement st = it.next(); - if (st.type == START_TXN) { - it.remove(); - break; - } - } - - if (hasTxnInStack()) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("txn: Not committing because transaction started elsewhere: " + buildName() + " / " + toString()); - } - return false; - } - - _txn = false; - try { - if (_conn != null) { - _conn.commit(); - s_logger.trace("txn: DB Changes committed. Time = " + (System.currentTimeMillis() - _txnTime)); - clearLockTimes(); - closeConnection(); - } - return true; - } catch (final SQLException e) { - rollbackTransaction(); - throw new CloudRuntimeException("Unable to commit or close the connection. ", e); - } - } - - protected void closeConnection() { - closePreviousStatement(); - - if (_conn == null) { - return; - } - - if (_txn) { - s_connLogger.trace("txn: Not closing DB connection because we're still in a transaction."); - return; - } - - try { - // we should only close db connection when it is not user managed - if (this._dbId != CONNECTED_DB) { - if (s_connLogger.isTraceEnabled()) { - s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); - } - _conn.close(); - _conn = null; - } - - } catch (final SQLException e) { - s_logger.warn("Unable to close connection", e); - } - } - - protected void removeUpTo(String type, Object ref) { - boolean rollback = false; - Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement item = it.next(); - - it.remove(); - - try { - if (item.type == type && (ref == null || item.ref == ref)) { - break; - } - - if (item.type == CURRENT_TXN) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Releasing the current txn: " + (item.ref != null ? item.ref : "")); - } - } else if (item.type == CREATE_CONN) { - closeConnection(); - } else if (item.type == START_TXN) { - if (item.ref == null) { - rollback = true; - } else { - try { - _conn.rollback((Savepoint)ref); - rollback = false; - } catch (final SQLException e) { - s_logger.warn("Unable to rollback Txn.", e); - } - } - } else if (item.type == STATEMENT) { - try { - if (s_stmtLogger.isTraceEnabled()) { - s_stmtLogger.trace("Closing: " + ref.toString()); - } - Statement stmt = (Statement)ref; - try { - ResultSet rs = stmt.getResultSet(); - if (rs != null) { - rs.close(); - } - } catch(SQLException e) { - s_stmtLogger.trace("Unable to close resultset"); - } - stmt.close(); - } catch (final SQLException e) { - s_stmtLogger.trace("Unable to close statement: " + item); - } - } else if (item.type == ATTACHMENT) { - TransactionAttachment att = (TransactionAttachment)item.ref; - if (s_logger.isTraceEnabled()) { - s_logger.trace("Cleaning up " + att.getName()); - } - att.cleanup(); - } - } catch(Exception e) { - s_logger.error("Unable to clean up " + item, e); - } - } - - if (rollback) { - rollback(); - } - } - - protected void rollbackTransaction() { - closePreviousStatement(); - if (!_txn) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Rollback called for " + _name + " when there's no transaction: " + buildName()); - } - return; - } - assert (!hasTxnInStack()) : "Who's rolling back transaction when there's still txn in stack?"; - _txn = false; - try { - if (_conn != null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Rolling back the transaction: Time = " + (System.currentTimeMillis() - _txnTime) + " Name = " + _name + "; called by " + buildName()); - } - _conn.rollback(); - } - clearLockTimes(); - closeConnection(); - } catch(final SQLException e) { - s_logger.warn("Unable to rollback", e); - } - } - - protected void rollbackSavepoint(Savepoint sp) { - try { - if (_conn != null) { - _conn.rollback(sp); - } - } catch (SQLException e) { - s_logger.warn("Unable to rollback to savepoint " + sp); - } - - if (!hasTxnInStack()) { - _txn = false; - closeConnection(); - } - } - - public void rollback() { - Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement st = it.next(); - if (st.type == START_TXN) { - if (st.ref == null) { - it.remove(); - } else { - rollback((Savepoint)st.ref); - return; - } - } - } - - rollbackTransaction(); - } - - public Savepoint setSavepoint() throws SQLException { - _txn = true; - StackElement st = new StackElement(START_TXN, null); - _stack.push(st); - final Connection conn = getConnection(); - final Savepoint sp = conn.setSavepoint(); - st.ref = sp; - - return sp; - } - - public Savepoint setSavepoint(final String name) throws SQLException { - _txn = true; - StackElement st = new StackElement(START_TXN, null); - _stack.push(st); - final Connection conn = getConnection(); - final Savepoint sp = conn.setSavepoint(name); - st.ref = sp; - - return sp; - } - - public void releaseSavepoint(final Savepoint sp) throws SQLException { - removeTxn(sp); - if (_conn != null) { - _conn.releaseSavepoint(sp); - } - - if (!hasTxnInStack()) { - _txn = false; - closeConnection(); - } - } - - protected boolean hasSavepointInStack(Savepoint sp) { - Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement se = it.next(); - if (se.type == START_TXN && se.ref == sp) { - return true; - } - } - return false; - } - - protected void removeTxn(Savepoint sp) { - assert hasSavepointInStack(sp) : "Removing a save point that's not in the stack"; - - if (!hasSavepointInStack(sp)) { - return; - } - - Iterator it = _stack.iterator(); - while (it.hasNext()) { - StackElement se = it.next(); - if (se.type == START_TXN) { - it.remove(); - if (se.ref == sp) { - return; - } - } - } - } - - public void rollback(final Savepoint sp) { - removeTxn(sp); - - rollbackSavepoint(sp); - } - - public Connection getCurrentConnection() { - return _conn; - } - - public List getStack() { - return _stack; - } - - protected Transaction() { - _name = null; - _conn = null; - _stack = null; - _txn = false; - _dbId = -1; - } - - @Override - protected void finalize() throws Throwable { - if (!(_conn == null && (_stack == null || _stack.size() == 0))) { - assert (false) : "Oh Alex oh alex...something is wrong with how we're doing this"; - s_logger.error("Something went wrong that a transaction is orphaned before db connection is closed"); - cleanup(); - } - } - - protected class StackElement { - public String type; - public Object ref; - - public StackElement (String type, Object ref) { - this.type = type; - this.ref = ref; - } - - @Override - public String toString() { - return type + "-" + ref; - } - } - - private static DataSource s_ds; - private static DataSource s_usageDS; - private static DataSource s_awsapiDS; - private static DataSource s_simulatorDS; - - static { - // Initialize with assumed db.properties file - initDataSource("db.properties"); - } - - public static void initDataSource(String propsFileName) { - try { - File dbPropsFile = PropertiesUtil.findConfigFile(propsFileName); - final Properties dbProps; - if (EncryptionSecretKeyChecker.useEncryption()) { - StandardPBEStringEncryptor encryptor = EncryptionSecretKeyChecker.getEncryptor(); - dbProps = new EncryptableProperties(encryptor); - } else { - dbProps = new Properties(); - } - try { - dbProps.load(new FileInputStream(dbPropsFile)); - } catch (IOException e) { - s_logger.fatal("Unable to load db properties file, pl. check the classpath and file path configuration", e); - return; - } catch (NullPointerException e) { - s_logger.fatal("Unable to locate db properties file within classpath or absolute path: " + propsFileName); - return; - } - - // FIXME: If params are missing...default them???? - final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); - final int cloudMaxIdle = Integer.parseInt(dbProps.getProperty("db.cloud.maxIdle")); - final long cloudMaxWait = Long.parseLong(dbProps.getProperty("db.cloud.maxWait")); - final String cloudUsername = dbProps.getProperty("db.cloud.username"); - final String cloudPassword = dbProps.getProperty("db.cloud.password"); - final String cloudHost = dbProps.getProperty("db.cloud.host"); - final int cloudPort = Integer.parseInt(dbProps.getProperty("db.cloud.port")); - final String cloudDbName = dbProps.getProperty("db.cloud.name"); - final boolean cloudAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.cloud.autoReconnect")); - final String cloudValidationQuery = dbProps.getProperty("db.cloud.validationQuery"); - final String cloudIsolationLevel = dbProps.getProperty("db.cloud.isolation.level"); - - int isolationLevel = Connection.TRANSACTION_READ_COMMITTED; - if (cloudIsolationLevel == null) { - isolationLevel = Connection.TRANSACTION_READ_COMMITTED; - } else if (cloudIsolationLevel.equalsIgnoreCase("readcommitted")) { - isolationLevel = Connection.TRANSACTION_READ_COMMITTED; - } else if (cloudIsolationLevel.equalsIgnoreCase("repeatableread")) { - isolationLevel = Connection.TRANSACTION_REPEATABLE_READ; - } else if (cloudIsolationLevel.equalsIgnoreCase("serializable")) { - isolationLevel = Connection.TRANSACTION_SERIALIZABLE; - } else if (cloudIsolationLevel.equalsIgnoreCase("readuncommitted")) { - isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED; - } else { - s_logger.warn("Unknown isolation level " + cloudIsolationLevel + ". Using read uncommitted"); - } - - final boolean cloudTestOnBorrow = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testOnBorrow")); - final boolean cloudTestWhileIdle = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testWhileIdle")); - final long cloudTimeBtwEvictionRunsMillis = Long.parseLong(dbProps.getProperty("db.cloud.timeBetweenEvictionRunsMillis")); - final long cloudMinEvcitableIdleTimeMillis = Long.parseLong(dbProps.getProperty("db.cloud.minEvictableIdleTimeMillis")); - final boolean cloudPoolPreparedStatements = Boolean.parseBoolean(dbProps.getProperty("db.cloud.poolPreparedStatements")); - final String url = dbProps.getProperty("db.cloud.url.params"); - - final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL")); - if (useSSL) { - System.setProperty("javax.net.ssl.keyStore", dbProps.getProperty("db.cloud.keyStore")); - System.setProperty("javax.net.ssl.keyStorePassword", dbProps.getProperty("db.cloud.keyStorePassword")); - System.setProperty("javax.net.ssl.trustStore", dbProps.getProperty("db.cloud.trustStore")); - System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword")); - } - - String regionId = dbProps.getProperty("region.id"); - if(regionId == null){ - s_region_id = 1; - } else { - s_region_id = Integer.parseInt(regionId); - } - final GenericObjectPool cloudConnectionPool = new GenericObjectPool(null, cloudMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, - cloudMaxWait, cloudMaxIdle, cloudTestOnBorrow, false, cloudTimeBtwEvictionRunsMillis, 1, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle); - - final ConnectionFactory cloudConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + cloudHost + ":" + cloudPort + "/" + cloudDbName + - "?autoReconnect=" + cloudAutoReconnect + (url != null ? "&" + url : "") + (useSSL ? "&useSSL=true" : ""), cloudUsername, cloudPassword); - - final KeyedObjectPoolFactory poolableObjFactory = (cloudPoolPreparedStatements ? new StackKeyedObjectPoolFactory() : null); - - final PoolableConnectionFactory cloudPoolableConnectionFactory = new PoolableConnectionFactory(cloudConnectionFactory, cloudConnectionPool, poolableObjFactory, - cloudValidationQuery, false, false, isolationLevel); - - // Default Data Source for CloudStack - s_ds = new PoolingDataSource(cloudPoolableConnectionFactory.getPool()); - - // Configure the usage db - final int usageMaxActive = Integer.parseInt(dbProps.getProperty("db.usage.maxActive")); - final int usageMaxIdle = Integer.parseInt(dbProps.getProperty("db.usage.maxIdle")); - final long usageMaxWait = Long.parseLong(dbProps.getProperty("db.usage.maxWait")); - final String usageUsername = dbProps.getProperty("db.usage.username"); - final String usagePassword = dbProps.getProperty("db.usage.password"); - final String usageHost = dbProps.getProperty("db.usage.host"); - final int usagePort = Integer.parseInt(dbProps.getProperty("db.usage.port")); - final String usageDbName = dbProps.getProperty("db.usage.name"); - final boolean usageAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.usage.autoReconnect")); - final String usageUrl = dbProps.getProperty("db.usage.url.params"); - - final GenericObjectPool usageConnectionPool = new GenericObjectPool(null, usageMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, - usageMaxWait, usageMaxIdle); - - final ConnectionFactory usageConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + usageHost + ":" + usagePort + "/" + usageDbName + - "?autoReconnect=" + usageAutoReconnect + (usageUrl != null ? "&" + usageUrl : ""), usageUsername, usagePassword); - - final PoolableConnectionFactory usagePoolableConnectionFactory = new PoolableConnectionFactory(usageConnectionFactory, usageConnectionPool, - new StackKeyedObjectPoolFactory(), null, false, false); - - // Data Source for usage server - s_usageDS = new PoolingDataSource(usagePoolableConnectionFactory.getPool()); - - // Configure awsapi db - final String awsapiDbName = dbProps.getProperty("db.awsapi.name"); - final GenericObjectPool awsapiConnectionPool = new GenericObjectPool(null, usageMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, - usageMaxWait, usageMaxIdle); - final ConnectionFactory awsapiConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + cloudHost + ":" + cloudPort + "/" + awsapiDbName + - "?autoReconnect=" + usageAutoReconnect, cloudUsername, cloudPassword); - final PoolableConnectionFactory awsapiPoolableConnectionFactory = new PoolableConnectionFactory(awsapiConnectionFactory, awsapiConnectionPool, - new StackKeyedObjectPoolFactory(), null, false, false); - - // Data Source for awsapi - s_awsapiDS = new PoolingDataSource(awsapiPoolableConnectionFactory.getPool()); - - try { - // Configure the simulator db - final int simulatorMaxActive = Integer.parseInt(dbProps.getProperty("db.simulator.maxActive")); - final int simulatorMaxIdle = Integer.parseInt(dbProps.getProperty("db.simulator.maxIdle")); - final long simulatorMaxWait = Long.parseLong(dbProps.getProperty("db.simulator.maxWait")); - final String simulatorUsername = dbProps.getProperty("db.simulator.username"); - final String simulatorPassword = dbProps.getProperty("db.simulator.password"); - final String simulatorHost = dbProps.getProperty("db.simulator.host"); - final int simulatorPort = Integer.parseInt(dbProps.getProperty("db.simulator.port")); - final String simulatorDbName = dbProps.getProperty("db.simulator.name"); - final boolean simulatorAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.simulator.autoReconnect")); - - final GenericObjectPool simulatorConnectionPool = new GenericObjectPool(null, simulatorMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, - simulatorMaxWait, simulatorMaxIdle); - - final ConnectionFactory simulatorConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + simulatorHost + ":" + simulatorPort + "/" + simulatorDbName + - "?autoReconnect=" + simulatorAutoReconnect, simulatorUsername, simulatorPassword); - - final PoolableConnectionFactory simulatorPoolableConnectionFactory = new PoolableConnectionFactory(simulatorConnectionFactory, simulatorConnectionPool, - new StackKeyedObjectPoolFactory(), null, false, false); - s_simulatorDS = new PoolingDataSource(simulatorPoolableConnectionFactory.getPool()); - } catch (Exception e) { - s_logger.debug("Simulator DB properties are not available. Not initializing simulator DS"); - } - } catch (final Exception e) { - s_ds = getDefaultDataSource("cloud"); - s_usageDS = getDefaultDataSource("cloud_usage"); - s_simulatorDS = getDefaultDataSource("cloud_simulator"); - s_logger.warn("Unable to load db configuration, using defaults with 5 connections. Falling back on assumed datasource on localhost:3306 using username:password=cloud:cloud. Please check your configuration", e); - } - } - - private static DataSource getDefaultDataSource(final String database) { - final GenericObjectPool connectionPool = new GenericObjectPool(null, 5); - final ConnectionFactory connectionFactory = new DriverManagerConnectionFactory( - "jdbc:mysql://localhost:3306/" + database, "cloud", "cloud"); - final PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory( - connectionFactory, connectionPool, null, null, false, true); - return new PoolingDataSource( - /* connectionPool */poolableConnectionFactory.getPool()); - } - } diff --git a/framework/db/src/com/cloud/utils/db/TransactionCallback.java b/framework/db/src/com/cloud/utils/db/TransactionCallback.java new file mode 100644 index 00000000000..df07fa696aa --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionCallback.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +public interface TransactionCallback { + + public T doInTransaction(TransactionStatus status); + +} diff --git a/docs/publican-cloudstack/en-US/css/overrides.css b/framework/db/src/com/cloud/utils/db/TransactionCallbackNoReturn.java similarity index 50% rename from docs/publican-cloudstack/en-US/css/overrides.css rename to framework/db/src/com/cloud/utils/db/TransactionCallbackNoReturn.java index 5d31cec2706..65e378b7658 100644 --- a/docs/publican-cloudstack/en-US/css/overrides.css +++ b/framework/db/src/com/cloud/utils/db/TransactionCallbackNoReturn.java @@ -1,13 +1,14 @@ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information# + * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 - * + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -15,60 +16,16 @@ * specific language governing permissions and limitations * under the License. */ -a:link { - color:#0066cc; +package com.cloud.utils.db; + +public abstract class TransactionCallbackNoReturn implements TransactionCallback { + + @Override + public final Object doInTransaction(TransactionStatus status) { + doInTransactionWithoutResult(status); + return null; + } + + public abstract void doInTransactionWithoutResult(TransactionStatus status); + } - -a:visited { - color:#6699cc; -} - -h1 { - color:#3c6eb4; -} - -.producttitle { - background: #3c6eb4 url(../images/h1-bg.png) top left repeat; -} - -.section h1.title { - color:#3c6eb4; -} - - -h2,h3,h4,h5,h6 { - color:#3c6eb4 -} - -table { - border:1px solid #3c6eb4; -} - -table th { - background-color:#3c6eb4; -} - -table tr.even td { - background-color:#f5f5f5; -} - -#title a { - height:54px; -} - -.term{ - color:#a70000; -} - -.revhistory table th { - color:#3c6eb4; -} - -.edition { - color: #3c6eb4; -} - -span.remark{ - background-color: #ffff00; -} - diff --git a/framework/db/src/com/cloud/utils/db/TransactionCallbackWithException.java b/framework/db/src/com/cloud/utils/db/TransactionCallbackWithException.java new file mode 100644 index 00000000000..a2f829db238 --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionCallbackWithException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +public interface TransactionCallbackWithException { + + public T doInTransaction(TransactionStatus status) throws E; + +} diff --git a/framework/db/src/com/cloud/utils/db/TransactionCallbackWithExceptionNoReturn.java b/framework/db/src/com/cloud/utils/db/TransactionCallbackWithExceptionNoReturn.java new file mode 100644 index 00000000000..d5bc44fe7b3 --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionCallbackWithExceptionNoReturn.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +public abstract class TransactionCallbackWithExceptionNoReturn implements TransactionCallbackWithException { + + @Override + public final Boolean doInTransaction(TransactionStatus status) throws E { + doInTransactionWithoutResult(status); + return true; + } + + public abstract void doInTransactionWithoutResult(TransactionStatus status) throws E; + +} + diff --git a/framework/db/src/com/cloud/utils/db/TransactionContextBuilder.java b/framework/db/src/com/cloud/utils/db/TransactionContextBuilder.java index 40fcbbf5593..d60ab7b7f43 100644 --- a/framework/db/src/com/cloud/utils/db/TransactionContextBuilder.java +++ b/framework/db/src/com/cloud/utils/db/TransactionContextBuilder.java @@ -46,19 +46,19 @@ public class TransactionContextBuilder implements ComponentMethodInterceptor { @Override public Object interceptStart(Method method, Object target) { - return Transaction.open(method.getName()); + return TransactionLegacy.open(method.getName()); } @Override public void interceptComplete(Method method, Object target, Object objReturnedInInterceptStart) { - Transaction txn = (Transaction)objReturnedInInterceptStart; + TransactionLegacy txn = (TransactionLegacy)objReturnedInInterceptStart; if(txn != null) txn.close(); } @Override public void interceptException(Method method, Object target, Object objReturnedInInterceptStart) { - Transaction txn = (Transaction)objReturnedInInterceptStart; + TransactionLegacy txn = (TransactionLegacy)objReturnedInInterceptStart; if(txn != null) txn.close(); } diff --git a/framework/db/src/com/cloud/utils/db/TransactionContextInterceptor.java b/framework/db/src/com/cloud/utils/db/TransactionContextInterceptor.java new file mode 100644 index 00000000000..3f5d6d27db5 --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionContextInterceptor.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +import org.aopalliance.intercept.MethodInterceptor; +import org.aopalliance.intercept.MethodInvocation; + +public class TransactionContextInterceptor implements MethodInterceptor { + + public TransactionContextInterceptor() { + + } + @Override + public Object invoke(MethodInvocation m) throws Throwable { + TransactionLegacy txn = TransactionLegacy.open(m.getMethod().getName()); + try { + return m.proceed(); + } finally { + txn.close(); + } + } + +} diff --git a/framework/db/src/com/cloud/utils/db/TransactionContextListener.java b/framework/db/src/com/cloud/utils/db/TransactionContextListener.java new file mode 100644 index 00000000000..db0a706cb9f --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionContextListener.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +import org.apache.cloudstack.managed.context.ManagedContextListener; + +public class TransactionContextListener implements ManagedContextListener { + + @Override + public TransactionLegacy onEnterContext(boolean reentry) { + if ( ! reentry ) { + return TransactionLegacy.open(Thread.currentThread().getName()); + } + + return null; + } + + @Override + public void onLeaveContext(TransactionLegacy data, boolean reentry) { + if ( ! reentry ) { + data.close(); + } + } + +} diff --git a/framework/db/src/com/cloud/utils/db/TransactionLegacy.java b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java new file mode 100755 index 00000000000..a318d83f92b --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionLegacy.java @@ -0,0 +1,1195 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.utils.db; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Savepoint; +import java.sql.Statement; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicLong; + +import javax.sql.DataSource; + +import org.apache.commons.dbcp.ConnectionFactory; +import org.apache.commons.dbcp.DriverManagerConnectionFactory; +import org.apache.commons.dbcp.PoolableConnectionFactory; +import org.apache.commons.dbcp.PoolingDataSource; +import org.apache.commons.pool.KeyedObjectPoolFactory; +import org.apache.commons.pool.impl.GenericObjectPool; +import org.apache.commons.pool.impl.StackKeyedObjectPoolFactory; +import org.apache.log4j.Logger; +import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; +import org.jasypt.properties.EncryptableProperties; + +import com.cloud.utils.Pair; +import com.cloud.utils.PropertiesUtil; +import com.cloud.utils.crypt.EncryptionSecretKeyChecker; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.mgmt.JmxUtil; + +/** + * Transaction abstracts away the Connection object in JDBC. It allows the + * following things that the Connection object does not. + * + * 1. Transaction can be started at an entry point and whether the DB + * actions should be auto-commit or not determined at that point. + * 2. DB Connection is allocated only when it is needed. + * 3. Code does not need to know if a transaction has been started or not. + * It just starts/ends a transaction and we resolve it correctly with + * the previous actions. + * + * Note that this class is not synchronous but it doesn't need to be because + * it is stored with TLS and is one per thread. Use appropriately. + */ +public class TransactionLegacy { + private static final Logger s_logger = Logger.getLogger(Transaction.class.getName() + "." + "Transaction"); + private static final Logger s_stmtLogger = Logger.getLogger(Transaction.class.getName() + "." + "Statement"); + private static final Logger s_lockLogger = Logger.getLogger(Transaction.class.getName() + "." + "Lock"); + private static final Logger s_connLogger = Logger.getLogger(Transaction.class.getName() + "." + "Connection"); + + private static final ThreadLocal tls = new ThreadLocal(); + private static final String START_TXN = "start_txn"; + private static final String CURRENT_TXN = "current_txn"; + private static final String CREATE_TXN = "create_txn"; + private static final String CREATE_CONN = "create_conn"; + private static final String STATEMENT = "statement"; + private static final String ATTACHMENT = "attachment"; + + public static final short CLOUD_DB = 0; + public static final short USAGE_DB = 1; + public static final short AWSAPI_DB = 2; + public static final short SIMULATOR_DB = 3; + + public static final short CONNECTED_DB = -1; + + private static AtomicLong s_id = new AtomicLong(); + private static final TransactionMBeanImpl s_mbean = new TransactionMBeanImpl(); + static { + try { + JmxUtil.registerMBean("Transaction", "Transaction", s_mbean); + } catch (Exception e) { + s_logger.error("Unable to register mbean for transaction", e); + } + + /* FIXME: We need a better solution for this + * Initialize encryption if we need it for db.properties + */ + EncryptionSecretKeyChecker enc = new EncryptionSecretKeyChecker(); + enc.check(); + } + + private final LinkedList _stack; + private long _id; + + private final LinkedList> _lockTimes = new LinkedList>(); + + private String _name; + private Connection _conn; + private boolean _txn; + private short _dbId; + private long _txnTime; + private Statement _stmt; + private String _creator; + + private TransactionLegacy _prev = null; + + public static TransactionLegacy currentTxn() { + return currentTxn(true); + } + + protected static TransactionLegacy currentTxn(boolean check) { + TransactionLegacy txn = tls.get(); + if (check) { + assert txn != null : "No Transaction on stack. Did you mark the method with @DB?"; + + assert checkAnnotation(4, txn) : "Did you even read the guide to use Transaction...IOW...other people's code? Try method can't be private. What about @DB? hmmm... could that be it? " + txn; + } + return txn; + } + + public static TransactionLegacy open(final short databaseId) { + String name = buildName(); + if (name == null) { + name = CURRENT_TXN; + } + return open(name, databaseId, true); + } + + // + // Usage of this transaction setup should be limited, it will always open a new transaction context regardless of whether or not there is other + // transaction context in the stack. It is used in special use cases that we want to control DB connection explicitly and in the mean time utilize + // the existing DAO features + // + public void transitToUserManagedConnection(Connection conn) { + if ( _conn != null ) + throw new IllegalStateException("Can't change to a user managed connection unless the db connection is null"); + + _conn = conn; + _dbId = CONNECTED_DB; + } + + public void transitToAutoManagedConnection(short dbId) { + // assert(_stack.size() <= 1) : "Can't change to auto managed connection unless your stack is empty"; + _dbId = dbId; + _conn = null; + } + + public static TransactionLegacy open(final String name) { + return open(name, TransactionLegacy.CLOUD_DB, false); + } + + public static TransactionLegacy open(final String name, final short databaseId, final boolean forceDbChange) { + TransactionLegacy txn = tls.get(); + boolean isNew = false; + if (txn == null) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Creating the transaction: " + name); + } + txn = new TransactionLegacy(name, false, databaseId); + tls.set(txn); + isNew = true; + } else if (forceDbChange) { + final short currentDbId = txn.getDatabaseId(); + if (currentDbId != databaseId) { + // we need to end the current transaction and switch databases + txn.close(txn.getName()); + + txn = new TransactionLegacy(name, false, databaseId); + tls.set(txn); + isNew = true; + } + } + + txn.takeOver(name, false); + if (isNew) { + s_mbean.addTransaction(txn); + } + return txn; + } + + protected StackElement peekInStack(Object obj) { + final Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement next = it.next(); + if (next.type == obj) { + return next; + } + } + return null; + } + + public void registerLock(String sql) { + if (_txn && s_lockLogger.isDebugEnabled()) { + Pair time = new Pair(sql, System.currentTimeMillis()); + _lockTimes.add(time); + } + } + + public boolean dbTxnStarted() { + return _txn; + } + + public static Connection getStandaloneConnectionWithException() throws SQLException { + Connection conn = s_ds.getConnection(); + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Retrieving a standalone connection: dbconn" + System.identityHashCode(conn)); + } + return conn; + } + + public static Connection getStandaloneConnection() { + try { + return getStandaloneConnectionWithException(); + } catch (SQLException e) { + s_logger.error("Unexpected exception: ", e); + return null; + } + } + + public static Connection getStandaloneUsageConnection() { + try { + Connection conn = s_usageDS.getConnection(); + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); + } + return conn; + } catch (SQLException e) { + s_logger.warn("Unexpected exception: ", e); + return null; + } + } + + public static Connection getStandaloneAwsapiConnection() { + try { + Connection conn = s_awsapiDS.getConnection(); + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Retrieving a standalone connection for usage: dbconn" + System.identityHashCode(conn)); + } + return conn; + } catch (SQLException e) { + s_logger.warn("Unexpected exception: ", e); + return null; + } + } + + public static Connection getStandaloneSimulatorConnection() { + try { + Connection conn = s_simulatorDS.getConnection(); + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Retrieving a standalone connection for simulator: dbconn" + System.identityHashCode(conn)); + } + return conn; + } catch (SQLException e) { + s_logger.warn("Unexpected exception: ", e); + return null; + } + } + + protected void attach(TransactionAttachment value) { + _stack.push(new StackElement(ATTACHMENT, value)); + } + + protected TransactionAttachment detach(String name) { + Iterator it = _stack.descendingIterator(); + while (it.hasNext()) { + StackElement element = it.next(); + if (element.type == ATTACHMENT) { + TransactionAttachment att = (TransactionAttachment)element.ref; + if (name.equals(att.getName())) { + it.remove(); + return att; + } + } + } + assert false : "Are you sure you attached this: " + name; + return null; + } + + public static void attachToTxn(TransactionAttachment value) { + TransactionLegacy txn = tls.get(); + assert txn != null && txn.peekInStack(CURRENT_TXN) != null: "Come on....how can we attach something to the transaction if you haven't started it?"; + + txn.attach(value); + } + + public static TransactionAttachment detachFromTxn(String name) { + TransactionLegacy txn = tls.get(); + assert txn != null : "No Transaction in TLS"; + return txn.detach(name); + } + + protected static boolean checkAnnotation(int stack, TransactionLegacy txn) { + final StackTraceElement[] stacks = Thread.currentThread().getStackTrace(); + StackElement se = txn.peekInStack(CURRENT_TXN); + if (se == null) { + return false; + } + + StringBuffer sb = new StringBuffer(); + for (; stack < stacks.length; stack++) { + String methodName = stacks[stack].getMethodName(); + sb.append(" ").append(methodName); + if (methodName.equals(se.ref)){ + return true; + } + } + + // relax stack structure for several places that @DB required injection is not in place + s_logger.warn("Non-standard stack context that Transaction context is manaully placed into the calling chain. Stack chain: " + sb); + return true; + } + + protected static String buildName() { + if (s_logger.isDebugEnabled()) { + final StackTraceElement[] stacks = Thread.currentThread().getStackTrace(); + final StringBuilder str = new StringBuilder(); + int i = 3, j = 3; + while (j < 15 && i < stacks.length) { + StackTraceElement element = stacks[i]; + String filename = element.getFileName(); + String method = element.getMethodName(); + if ((filename != null && filename.equals("")) || (method != null && method.equals("invokeSuper"))) { + i++; + continue; + } + + str.append("-").append(stacks[i].getClassName().substring(stacks[i].getClassName().lastIndexOf(".") + 1)).append(".").append(stacks[i].getMethodName()).append(":").append(stacks[i].getLineNumber()); + j++; + i++; + } + return str.toString(); + } + + return ""; + } + + public TransactionLegacy(final String name, final boolean forLocking, final short databaseId) { + _name = name; + _conn = null; + _stack = new LinkedList(); + _txn = false; + _dbId = databaseId; + _id = s_id.incrementAndGet(); + _creator = Thread.currentThread().getName(); + } + + public String getCreator() { + return _creator; + } + + public long getId() { + return _id; + } + + public String getName() { + return _name; + } + + public Short getDatabaseId() { + return _dbId; + } + + @Override + public String toString() { + final StringBuilder str = new StringBuilder((_name != null ? _name : "")); + str.append(" : "); + for (final StackElement se : _stack) { + if (se.type == CURRENT_TXN) { + str.append(se.ref).append(", "); + } + } + + return str.toString(); + } + + protected void mark(final String name) { + _stack.push(new StackElement(CURRENT_TXN, name)); + } + + public boolean lock(final String name, final int timeoutSeconds) { + Merovingian2 lockMaster = Merovingian2.getLockMaster(); + if (lockMaster == null) { + throw new CloudRuntimeException("There's no support for locking yet"); + } + return lockMaster.acquire(name, timeoutSeconds); + } + + public boolean release(final String name) { + Merovingian2 lockMaster = Merovingian2.getLockMaster(); + if (lockMaster == null) { + throw new CloudRuntimeException("There's no support for locking yet"); + } + return lockMaster.release(name); + } + + /** + * @deprecated Use {@link Transaction} for new code + */ + @Deprecated + public void start() { + if (s_logger.isTraceEnabled()) { + s_logger.trace("txn: start requested by: " + buildName()); + } + + _stack.push(new StackElement(START_TXN, null)); + + if (_txn) { + s_logger.trace("txn: has already been started."); + return; + } + + _txn = true; + + _txnTime = System.currentTimeMillis(); + if (_conn != null) { + try { + s_logger.trace("txn: set auto commit to false"); + _conn.setAutoCommit(false); + } catch (final SQLException e) { + s_logger.warn("Unable to set auto commit: ", e); + throw new CloudRuntimeException("Unable to set auto commit: ", e); + } + } + } + + protected void closePreviousStatement() { + if (_stmt != null) { + try { + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Closing: " + _stmt.toString()); + } + try { + ResultSet rs = _stmt.getResultSet(); + if (rs != null && _stmt.getResultSetHoldability() != ResultSet.HOLD_CURSORS_OVER_COMMIT) { + rs.close(); + } + } catch(SQLException e) { + s_stmtLogger.trace("Unable to close resultset"); + } + _stmt.close(); + } catch (final SQLException e) { + s_stmtLogger.trace("Unable to close statement: " + _stmt.toString()); + } finally { + _stmt = null; + } + } + } + + /** + * Prepares an auto close statement. The statement is closed automatically if it is + * retrieved with this method. + * + * @param sql sql String + * @return PreparedStatement + * @throws SQLException if problem with JDBC layer. + * + * @see java.sql.Connection + */ + public PreparedStatement prepareAutoCloseStatement(final String sql) throws SQLException { + PreparedStatement stmt = prepareStatement(sql); + closePreviousStatement(); + _stmt = stmt; + return stmt; + } + + public PreparedStatement prepareStatement(final String sql) throws SQLException { + final Connection conn = getConnection(); + final PreparedStatement pstmt = conn.prepareStatement(sql); + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Preparing: " + sql); + } + return pstmt; + } + + /** + * Prepares an auto close statement. The statement is closed automatically if it is + * retrieved with this method. + * + * @param sql sql String + * @param autoGeneratedKeys keys that are generated + * @return PreparedStatement + * @throws SQLException if problem with JDBC layer. + * + * @see java.sql.Connection + */ + public PreparedStatement prepareAutoCloseStatement(final String sql, final int autoGeneratedKeys) throws SQLException { + final Connection conn = getConnection(); + final PreparedStatement pstmt = conn.prepareStatement(sql, autoGeneratedKeys); + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Preparing: " + sql); + } + closePreviousStatement(); + _stmt = pstmt; + return pstmt; + } + + /** + * Prepares an auto close statement. The statement is closed automatically if it is + * retrieved with this method. + * + * @param sql sql String + * @param columnNames names of the columns + * @return PreparedStatement + * @throws SQLException if problem with JDBC layer. + * + * @see java.sql.Connection + */ + public PreparedStatement prepareAutoCloseStatement(final String sql, final String[] columnNames) throws SQLException { + final Connection conn = getConnection(); + final PreparedStatement pstmt = conn.prepareStatement(sql, columnNames); + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Preparing: " + sql); + } + closePreviousStatement(); + _stmt = pstmt; + return pstmt; + } + + /** + * Prepares an auto close statement. The statement is closed automatically if it is + * retrieved with this method. + * + * @param sql sql String + * @return PreparedStatement + * @throws SQLException if problem with JDBC layer. + * + * @see java.sql.Connection + */ + public PreparedStatement prepareAutoCloseStatement(String sql, int resultSetType, int resultSetConcurrency, int resultSetHoldability) throws SQLException { + final Connection conn = getConnection(); + final PreparedStatement pstmt = conn.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability); + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Preparing: " + sql); + } + closePreviousStatement(); + _stmt = pstmt; + return pstmt; + } + + /** + * Returns the db connection. + * + * Note: that you can call getConnection() but beaware that + * all prepare statements from the Connection are not garbage + * collected! + * + * @return DB Connection but make sure you understand that + * you are responsible for closing the PreparedStatement. + * @throws SQLException + */ + public Connection getConnection() throws SQLException { + if (_conn == null) { + switch (_dbId) { + case CLOUD_DB: + if(s_ds != null) { + _conn = s_ds.getConnection(); + } else { + s_logger.warn("A static-initialized variable becomes null, process is dying?"); + throw new CloudRuntimeException("Database is not initialized, process is dying?"); + } + break; + case USAGE_DB: + if(s_usageDS != null) { + _conn = s_usageDS.getConnection(); + } else { + s_logger.warn("A static-initialized variable becomes null, process is dying?"); + throw new CloudRuntimeException("Database is not initialized, process is dying?"); + } + break; + case AWSAPI_DB: + if(s_awsapiDS != null) { + _conn = s_awsapiDS.getConnection(); + } else { + s_logger.warn("A static-initialized variable becomes null, process is dying?"); + throw new CloudRuntimeException("Database is not initialized, process is dying?"); + } + break; + + case SIMULATOR_DB: + if(s_simulatorDS != null) { + _conn = s_simulatorDS.getConnection(); + } else { + s_logger.warn("A static-initialized variable becomes null, process is dying?"); + throw new CloudRuntimeException("Database is not initialized, process is dying?"); + } + break; + default: + + throw new CloudRuntimeException("No database selected for the transaction"); + } + _conn.setAutoCommit(!_txn); + + // + // MySQL default transaction isolation level is REPEATABLE READ, + // to reduce chances of DB deadlock, we will use READ COMMITED isolation level instead + // see http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html + // + _stack.push(new StackElement(CREATE_CONN, null)); + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Creating a DB connection with " + (_txn ? " txn: " : " no txn: ") + " for " + _dbId + ": dbconn" + System.identityHashCode(_conn) + ". Stack: " + buildName()); + } + } else { + s_logger.trace("conn: Using existing DB connection"); + } + + return _conn; + } + + protected boolean takeOver(final String name, final boolean create) { + if (_stack.size() != 0) { + if (!create) { + // If it is not a create transaction, then let's just use the current one. + if (s_logger.isTraceEnabled()) { + s_logger.trace("Using current transaction: " + toString()); + } + mark(name); + return false; + } + + final StackElement se = _stack.getFirst(); + if (se.type == CREATE_TXN) { + // This create is called inside of another create. Which is ok? + // We will let that create be responsible for cleaning up. + if (s_logger.isTraceEnabled()) { + s_logger.trace("Create using current transaction: " + toString()); + } + mark(name); + return false; + } + + s_logger.warn("Encountered a transaction that has leaked. Cleaning up. " + toString()); + cleanup(); + } + + if (s_logger.isTraceEnabled()) { + s_logger.trace("Took over the transaction: " + name); + } + _stack.push(new StackElement(create ? CREATE_TXN : CURRENT_TXN, name)); + _name = name; + return true; + } + + public void cleanup() { + closePreviousStatement(); + + removeUpTo(null, null); + if (_txn) { + rollbackTransaction(); + } + _txn = false; + _name = null; + + closeConnection(); + + _stack.clear(); + Merovingian2 lockMaster = Merovingian2.getLockMaster(); + if (lockMaster != null) { + lockMaster.cleanupThread(); + } + } + + public void close() { + removeUpTo(CURRENT_TXN, null); + + if (_stack.size() == 0) { + s_logger.trace("Transaction is done"); + cleanup(); + } + } + + /** + * close() is used by endTxn to close the connection. This method only + * closes the connection if the name is the same as what's stored. + * + * @param name + * @return true if this close actually closes the connection. false if not. + */ + public boolean close(final String name) { + if (_name == null) { // Already cleaned up. + if (s_logger.isTraceEnabled()) { + s_logger.trace("Already cleaned up." + buildName()); + } + return true; + } + + if (!_name.equals(name)) { + close(); + return false; + } + + if (s_logger.isDebugEnabled() && _stack.size() > 2) { + s_logger.debug("Transaction is not closed properly: " + toString() + ". Called by " + buildName()); + } + + cleanup(); + + s_logger.trace("All done"); + return true; + } + + protected boolean hasTxnInStack() { + return peekInStack(START_TXN) != null; + } + + protected void clearLockTimes() { + if (s_lockLogger.isDebugEnabled()) { + for (Pair time : _lockTimes) { + s_lockLogger.trace("SQL " + time.first() + " took " + (System.currentTimeMillis() - time.second())); + } + _lockTimes.clear(); + } + } + + public boolean commit() { + if (!_txn) { + s_logger.warn("txn: Commit called when it is not a transaction: " + buildName()); + return false; + } + + Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement st = it.next(); + if (st.type == START_TXN) { + it.remove(); + break; + } + } + + if (hasTxnInStack()) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("txn: Not committing because transaction started elsewhere: " + buildName() + " / " + toString()); + } + return false; + } + + _txn = false; + try { + if (_conn != null) { + _conn.commit(); + s_logger.trace("txn: DB Changes committed. Time = " + (System.currentTimeMillis() - _txnTime)); + clearLockTimes(); + closeConnection(); + } + return true; + } catch (final SQLException e) { + rollbackTransaction(); + throw new CloudRuntimeException("Unable to commit or close the connection. ", e); + } + } + + protected void closeConnection() { + closePreviousStatement(); + + if (_conn == null) { + return; + } + + if (_txn) { + s_connLogger.trace("txn: Not closing DB connection because we're still in a transaction."); + return; + } + + try { + // we should only close db connection when it is not user managed + if (this._dbId != CONNECTED_DB) { + if (s_connLogger.isTraceEnabled()) { + s_connLogger.trace("Closing DB connection: dbconn" + System.identityHashCode(_conn)); + } + _conn.close(); + _conn = null; + } + + } catch (final SQLException e) { + s_logger.warn("Unable to close connection", e); + } + } + + protected void removeUpTo(String type, Object ref) { + boolean rollback = false; + Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement item = it.next(); + + it.remove(); + + try { + if (item.type == type && (ref == null || item.ref == ref)) { + break; + } + + if (item.type == CURRENT_TXN) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Releasing the current txn: " + (item.ref != null ? item.ref : "")); + } + } else if (item.type == CREATE_CONN) { + closeConnection(); + } else if (item.type == START_TXN) { + if (item.ref == null) { + rollback = true; + } else { + try { + _conn.rollback((Savepoint)ref); + rollback = false; + } catch (final SQLException e) { + s_logger.warn("Unable to rollback Txn.", e); + } + } + } else if (item.type == STATEMENT) { + try { + if (s_stmtLogger.isTraceEnabled()) { + s_stmtLogger.trace("Closing: " + ref.toString()); + } + Statement stmt = (Statement)ref; + try { + ResultSet rs = stmt.getResultSet(); + if (rs != null) { + rs.close(); + } + } catch(SQLException e) { + s_stmtLogger.trace("Unable to close resultset"); + } + stmt.close(); + } catch (final SQLException e) { + s_stmtLogger.trace("Unable to close statement: " + item); + } + } else if (item.type == ATTACHMENT) { + TransactionAttachment att = (TransactionAttachment)item.ref; + if (s_logger.isTraceEnabled()) { + s_logger.trace("Cleaning up " + att.getName()); + } + att.cleanup(); + } + } catch(Exception e) { + s_logger.error("Unable to clean up " + item, e); + } + } + + if (rollback) { + rollback(); + } + } + + protected void rollbackTransaction() { + closePreviousStatement(); + if (!_txn) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Rollback called for " + _name + " when there's no transaction: " + buildName()); + } + return; + } + assert (!hasTxnInStack()) : "Who's rolling back transaction when there's still txn in stack?"; + _txn = false; + try { + if (_conn != null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Rolling back the transaction: Time = " + (System.currentTimeMillis() - _txnTime) + " Name = " + _name + "; called by " + buildName()); + } + _conn.rollback(); + } + clearLockTimes(); + closeConnection(); + } catch(final SQLException e) { + s_logger.warn("Unable to rollback", e); + } + } + + protected void rollbackSavepoint(Savepoint sp) { + try { + if (_conn != null) { + _conn.rollback(sp); + } + } catch (SQLException e) { + s_logger.warn("Unable to rollback to savepoint " + sp); + } + + if (!hasTxnInStack()) { + _txn = false; + closeConnection(); + } + } + + public void rollback() { + Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement st = it.next(); + if (st.type == START_TXN) { + if (st.ref == null) { + it.remove(); + } else { + rollback((Savepoint)st.ref); + return; + } + } + } + + rollbackTransaction(); + } + + public Savepoint setSavepoint() throws SQLException { + _txn = true; + StackElement st = new StackElement(START_TXN, null); + _stack.push(st); + final Connection conn = getConnection(); + final Savepoint sp = conn.setSavepoint(); + st.ref = sp; + + return sp; + } + + public Savepoint setSavepoint(final String name) throws SQLException { + _txn = true; + StackElement st = new StackElement(START_TXN, null); + _stack.push(st); + final Connection conn = getConnection(); + final Savepoint sp = conn.setSavepoint(name); + st.ref = sp; + + return sp; + } + + public void releaseSavepoint(final Savepoint sp) throws SQLException { + removeTxn(sp); + if (_conn != null) { + _conn.releaseSavepoint(sp); + } + + if (!hasTxnInStack()) { + _txn = false; + closeConnection(); + } + } + + protected boolean hasSavepointInStack(Savepoint sp) { + Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement se = it.next(); + if (se.type == START_TXN && se.ref == sp) { + return true; + } + } + return false; + } + + protected void removeTxn(Savepoint sp) { + assert hasSavepointInStack(sp) : "Removing a save point that's not in the stack"; + + if (!hasSavepointInStack(sp)) { + return; + } + + Iterator it = _stack.iterator(); + while (it.hasNext()) { + StackElement se = it.next(); + if (se.type == START_TXN) { + it.remove(); + if (se.ref == sp) { + return; + } + } + } + } + + public void rollback(final Savepoint sp) { + removeTxn(sp); + + rollbackSavepoint(sp); + } + + public Connection getCurrentConnection() { + return _conn; + } + + public List getStack() { + return _stack; + } + + protected TransactionLegacy() { + _name = null; + _conn = null; + _stack = null; + _txn = false; + _dbId = -1; + } + + @Override + protected void finalize() throws Throwable { + if (!(_conn == null && (_stack == null || _stack.size() == 0))) { + assert (false) : "Oh Alex oh alex...something is wrong with how we're doing this"; + s_logger.error("Something went wrong that a transaction is orphaned before db connection is closed"); + cleanup(); + } + } + + protected class StackElement { + public String type; + public Object ref; + + public StackElement (String type, Object ref) { + this.type = type; + this.ref = ref; + } + + @Override + public String toString() { + return type + "-" + ref; + } + } + + private static DataSource s_ds; + private static DataSource s_usageDS; + private static DataSource s_awsapiDS; + private static DataSource s_simulatorDS; + + static { + // Initialize with assumed db.properties file + initDataSource("db.properties"); + } + + public static void initDataSource(String propsFileName) { + try { + File dbPropsFile = PropertiesUtil.findConfigFile(propsFileName); + final Properties dbProps; + if (EncryptionSecretKeyChecker.useEncryption()) { + StandardPBEStringEncryptor encryptor = EncryptionSecretKeyChecker.getEncryptor(); + dbProps = new EncryptableProperties(encryptor); + } else { + dbProps = new Properties(); + } + try { + PropertiesUtil.loadFromFile(dbProps, dbPropsFile); + dbProps.load(new FileInputStream(dbPropsFile)); + } catch (IOException e) { + s_logger.fatal("Unable to load db properties file, pl. check the classpath and file path configuration", e); + return; + } catch (NullPointerException e) { + s_logger.fatal("Unable to locate db properties file within classpath or absolute path: " + propsFileName); + return; + } + + // FIXME: If params are missing...default them???? + final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); + final int cloudMaxIdle = Integer.parseInt(dbProps.getProperty("db.cloud.maxIdle")); + final long cloudMaxWait = Long.parseLong(dbProps.getProperty("db.cloud.maxWait")); + final String cloudUsername = dbProps.getProperty("db.cloud.username"); + final String cloudPassword = dbProps.getProperty("db.cloud.password"); + final String cloudHost = dbProps.getProperty("db.cloud.host"); + final int cloudPort = Integer.parseInt(dbProps.getProperty("db.cloud.port")); + final String cloudDbName = dbProps.getProperty("db.cloud.name"); + final boolean cloudAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.cloud.autoReconnect")); + final String cloudValidationQuery = dbProps.getProperty("db.cloud.validationQuery"); + final String cloudIsolationLevel = dbProps.getProperty("db.cloud.isolation.level"); + + int isolationLevel = Connection.TRANSACTION_READ_COMMITTED; + if (cloudIsolationLevel == null) { + isolationLevel = Connection.TRANSACTION_READ_COMMITTED; + } else if (cloudIsolationLevel.equalsIgnoreCase("readcommitted")) { + isolationLevel = Connection.TRANSACTION_READ_COMMITTED; + } else if (cloudIsolationLevel.equalsIgnoreCase("repeatableread")) { + isolationLevel = Connection.TRANSACTION_REPEATABLE_READ; + } else if (cloudIsolationLevel.equalsIgnoreCase("serializable")) { + isolationLevel = Connection.TRANSACTION_SERIALIZABLE; + } else if (cloudIsolationLevel.equalsIgnoreCase("readuncommitted")) { + isolationLevel = Connection.TRANSACTION_READ_UNCOMMITTED; + } else { + s_logger.warn("Unknown isolation level " + cloudIsolationLevel + ". Using read uncommitted"); + } + + final boolean cloudTestOnBorrow = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testOnBorrow")); + final boolean cloudTestWhileIdle = Boolean.parseBoolean(dbProps.getProperty("db.cloud.testWhileIdle")); + final long cloudTimeBtwEvictionRunsMillis = Long.parseLong(dbProps.getProperty("db.cloud.timeBetweenEvictionRunsMillis")); + final long cloudMinEvcitableIdleTimeMillis = Long.parseLong(dbProps.getProperty("db.cloud.minEvictableIdleTimeMillis")); + final boolean cloudPoolPreparedStatements = Boolean.parseBoolean(dbProps.getProperty("db.cloud.poolPreparedStatements")); + final String url = dbProps.getProperty("db.cloud.url.params"); + + final boolean useSSL = Boolean.parseBoolean(dbProps.getProperty("db.cloud.useSSL")); + if (useSSL) { + System.setProperty("javax.net.ssl.keyStore", dbProps.getProperty("db.cloud.keyStore")); + System.setProperty("javax.net.ssl.keyStorePassword", dbProps.getProperty("db.cloud.keyStorePassword")); + System.setProperty("javax.net.ssl.trustStore", dbProps.getProperty("db.cloud.trustStore")); + System.setProperty("javax.net.ssl.trustStorePassword", dbProps.getProperty("db.cloud.trustStorePassword")); + } + + final GenericObjectPool cloudConnectionPool = new GenericObjectPool(null, cloudMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, + cloudMaxWait, cloudMaxIdle, cloudTestOnBorrow, false, cloudTimeBtwEvictionRunsMillis, 1, cloudMinEvcitableIdleTimeMillis, cloudTestWhileIdle); + + final ConnectionFactory cloudConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + cloudHost + ":" + cloudPort + "/" + cloudDbName + + "?autoReconnect=" + cloudAutoReconnect + (url != null ? "&" + url : "") + (useSSL ? "&useSSL=true" : ""), cloudUsername, cloudPassword); + + final KeyedObjectPoolFactory poolableObjFactory = (cloudPoolPreparedStatements ? new StackKeyedObjectPoolFactory() : null); + + final PoolableConnectionFactory cloudPoolableConnectionFactory = new PoolableConnectionFactory(cloudConnectionFactory, cloudConnectionPool, poolableObjFactory, + cloudValidationQuery, false, false, isolationLevel); + + // Default Data Source for CloudStack + s_ds = new PoolingDataSource(cloudPoolableConnectionFactory.getPool()); + + // Configure the usage db + final int usageMaxActive = Integer.parseInt(dbProps.getProperty("db.usage.maxActive")); + final int usageMaxIdle = Integer.parseInt(dbProps.getProperty("db.usage.maxIdle")); + final long usageMaxWait = Long.parseLong(dbProps.getProperty("db.usage.maxWait")); + final String usageUsername = dbProps.getProperty("db.usage.username"); + final String usagePassword = dbProps.getProperty("db.usage.password"); + final String usageHost = dbProps.getProperty("db.usage.host"); + final int usagePort = Integer.parseInt(dbProps.getProperty("db.usage.port")); + final String usageDbName = dbProps.getProperty("db.usage.name"); + final boolean usageAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.usage.autoReconnect")); + final String usageUrl = dbProps.getProperty("db.usage.url.params"); + + final GenericObjectPool usageConnectionPool = new GenericObjectPool(null, usageMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, + usageMaxWait, usageMaxIdle); + + final ConnectionFactory usageConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + usageHost + ":" + usagePort + "/" + usageDbName + + "?autoReconnect=" + usageAutoReconnect + (usageUrl != null ? "&" + usageUrl : ""), usageUsername, usagePassword); + + final PoolableConnectionFactory usagePoolableConnectionFactory = new PoolableConnectionFactory(usageConnectionFactory, usageConnectionPool, + new StackKeyedObjectPoolFactory(), null, false, false); + + // Data Source for usage server + s_usageDS = new PoolingDataSource(usagePoolableConnectionFactory.getPool()); + + // Configure awsapi db + final String awsapiDbName = dbProps.getProperty("db.awsapi.name"); + final GenericObjectPool awsapiConnectionPool = new GenericObjectPool(null, usageMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, + usageMaxWait, usageMaxIdle); + final ConnectionFactory awsapiConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + cloudHost + ":" + cloudPort + "/" + awsapiDbName + + "?autoReconnect=" + usageAutoReconnect, cloudUsername, cloudPassword); + final PoolableConnectionFactory awsapiPoolableConnectionFactory = new PoolableConnectionFactory(awsapiConnectionFactory, awsapiConnectionPool, + new StackKeyedObjectPoolFactory(), null, false, false); + + // Data Source for awsapi + s_awsapiDS = new PoolingDataSource(awsapiPoolableConnectionFactory.getPool()); + + try { + // Configure the simulator db + final int simulatorMaxActive = Integer.parseInt(dbProps.getProperty("db.simulator.maxActive")); + final int simulatorMaxIdle = Integer.parseInt(dbProps.getProperty("db.simulator.maxIdle")); + final long simulatorMaxWait = Long.parseLong(dbProps.getProperty("db.simulator.maxWait")); + final String simulatorUsername = dbProps.getProperty("db.simulator.username"); + final String simulatorPassword = dbProps.getProperty("db.simulator.password"); + final String simulatorHost = dbProps.getProperty("db.simulator.host"); + final int simulatorPort = Integer.parseInt(dbProps.getProperty("db.simulator.port")); + final String simulatorDbName = dbProps.getProperty("db.simulator.name"); + final boolean simulatorAutoReconnect = Boolean.parseBoolean(dbProps.getProperty("db.simulator.autoReconnect")); + + final GenericObjectPool simulatorConnectionPool = new GenericObjectPool(null, simulatorMaxActive, GenericObjectPool.DEFAULT_WHEN_EXHAUSTED_ACTION, + simulatorMaxWait, simulatorMaxIdle); + + final ConnectionFactory simulatorConnectionFactory = new DriverManagerConnectionFactory("jdbc:mysql://" + simulatorHost + ":" + simulatorPort + "/" + simulatorDbName + + "?autoReconnect=" + simulatorAutoReconnect, simulatorUsername, simulatorPassword); + + final PoolableConnectionFactory simulatorPoolableConnectionFactory = new PoolableConnectionFactory(simulatorConnectionFactory, simulatorConnectionPool, + new StackKeyedObjectPoolFactory(), null, false, false); + s_simulatorDS = new PoolingDataSource(simulatorPoolableConnectionFactory.getPool()); + } catch (Exception e) { + s_logger.debug("Simulator DB properties are not available. Not initializing simulator DS"); + } + } catch (final Exception e) { + s_ds = getDefaultDataSource("cloud"); + s_usageDS = getDefaultDataSource("cloud_usage"); + s_simulatorDS = getDefaultDataSource("cloud_simulator"); + s_logger.warn("Unable to load db configuration, using defaults with 5 connections. Falling back on assumed datasource on localhost:3306 using username:password=cloud:cloud. Please check your configuration", e); + } + } + + private static DataSource getDefaultDataSource(final String database) { + final GenericObjectPool connectionPool = new GenericObjectPool(null, 5); + final ConnectionFactory connectionFactory = new DriverManagerConnectionFactory( + "jdbc:mysql://localhost:3306/" + database, "cloud", "cloud"); + final PoolableConnectionFactory poolableConnectionFactory = new PoolableConnectionFactory( + connectionFactory, connectionPool, null, null, false, true); + return new PoolingDataSource( + /* connectionPool */poolableConnectionFactory.getPool()); + } + + /** + * Used for unit testing primarily + * + * @param conn + */ + protected void setConnection(Connection conn) { + this._conn = conn; + } +} diff --git a/framework/db/src/com/cloud/utils/db/TransactionMBeanImpl.java b/framework/db/src/com/cloud/utils/db/TransactionMBeanImpl.java index d51a9bd0cc7..73511b17ce4 100644 --- a/framework/db/src/com/cloud/utils/db/TransactionMBeanImpl.java +++ b/framework/db/src/com/cloud/utils/db/TransactionMBeanImpl.java @@ -25,21 +25,21 @@ import java.util.concurrent.ConcurrentHashMap; import javax.management.StandardMBean; -import com.cloud.utils.db.Transaction.StackElement; +import com.cloud.utils.db.TransactionLegacy.StackElement; public class TransactionMBeanImpl extends StandardMBean implements TransactionMBean { - Map _txns = new ConcurrentHashMap(); + Map _txns = new ConcurrentHashMap(); public TransactionMBeanImpl() { super(TransactionMBean.class, false); } - public void addTransaction(Transaction txn) { + public void addTransaction(TransactionLegacy txn) { _txns.put(txn.getId(), txn); } - public void removeTransaction(Transaction txn) { + public void removeTransaction(TransactionLegacy txn) { _txns.remove(txn.getId()); } @@ -53,7 +53,7 @@ public class TransactionMBeanImpl extends StandardMBean implements TransactionMB int[] count = new int[2]; count[0] = 0; count[1] = 0; - for (Transaction txn : _txns.values()) { + for (TransactionLegacy txn : _txns.values()) { if (txn.getStack().size() > 0) { count[0]++; } @@ -67,7 +67,7 @@ public class TransactionMBeanImpl extends StandardMBean implements TransactionMB @Override public List> getTransactions() { ArrayList> txns = new ArrayList>(); - for (Transaction info : _txns.values()) { + for (TransactionLegacy info : _txns.values()) { txns.add(toMap(info)); } return txns; @@ -76,7 +76,7 @@ public class TransactionMBeanImpl extends StandardMBean implements TransactionMB @Override public List> getActiveTransactions() { ArrayList> txns = new ArrayList>(); - for (Transaction txn : _txns.values()) { + for (TransactionLegacy txn : _txns.values()) { if (txn.getStack().size() > 0 || txn.getCurrentConnection() != null) { txns.add(toMap(txn)); } @@ -84,7 +84,7 @@ public class TransactionMBeanImpl extends StandardMBean implements TransactionMB return txns; } - protected Map toMap(Transaction txn) { + protected Map toMap(TransactionLegacy txn) { Map map = new HashMap(); map.put("name", txn.getName()); map.put("id", Long.toString(txn.getId())); @@ -103,7 +103,7 @@ public class TransactionMBeanImpl extends StandardMBean implements TransactionMB @Override public List> getTransactionsWithDatabaseConnection() { ArrayList> txns = new ArrayList>(); - for (Transaction txn : _txns.values()) { + for (TransactionLegacy txn : _txns.values()) { if (txn.getCurrentConnection() != null) { txns.add(toMap(txn)); } diff --git a/framework/db/src/com/cloud/utils/db/TransactionStatus.java b/framework/db/src/com/cloud/utils/db/TransactionStatus.java new file mode 100644 index 00000000000..616603c0932 --- /dev/null +++ b/framework/db/src/com/cloud/utils/db/TransactionStatus.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +/** + * Placeholder for possible future features + */ +public interface TransactionStatus { +} diff --git a/framework/db/test/com/cloud/utils/db/DbTestDao.java b/framework/db/test/com/cloud/utils/db/DbTestDao.java index 9530b3b2d44..7db5ba867db 100644 --- a/framework/db/test/com/cloud/utils/db/DbTestDao.java +++ b/framework/db/test/com/cloud/utils/db/DbTestDao.java @@ -29,7 +29,7 @@ public class DbTestDao extends GenericDaoBase implements Generic @DB public void create(int fldInt, long fldLong, String fldString) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { txn.start(); @@ -48,7 +48,7 @@ public class DbTestDao extends GenericDaoBase implements Generic @DB public void update(int fldInt, long fldLong, String fldString) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { txn.start(); diff --git a/framework/db/test/com/cloud/utils/db/DbTestUtils.java b/framework/db/test/com/cloud/utils/db/DbTestUtils.java index 11ae1aa4d0b..2458b8c7e26 100644 --- a/framework/db/test/com/cloud/utils/db/DbTestUtils.java +++ b/framework/db/test/com/cloud/utils/db/DbTestUtils.java @@ -33,7 +33,7 @@ public class DbTestUtils { throw new RuntimeException("Unable to clean the database because I can't find " + file); } - Connection conn = Transaction.getStandaloneConnection(); + Connection conn = TransactionLegacy.getStandaloneConnection(); ScriptRunner runner = new ScriptRunner(conn, autoCommit, stopOnError); FileReader reader; @@ -63,7 +63,7 @@ public class DbTestUtils { throw new RuntimeException("Unable to clean the database because I can't find " + file); } - Connection conn = Transaction.getStandaloneUsageConnection(); + Connection conn = TransactionLegacy.getStandaloneUsageConnection(); ScriptRunner runner = new ScriptRunner(conn, autoCommit, stopOnError); FileReader reader; diff --git a/framework/db/test/com/cloud/utils/db/TestTransaction.java b/framework/db/test/com/cloud/utils/db/TestTransaction.java new file mode 100644 index 00000000000..4ee08f1f66f --- /dev/null +++ b/framework/db/test/com/cloud/utils/db/TestTransaction.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.utils.db; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import java.io.FileNotFoundException; +import java.sql.Connection; +import java.util.concurrent.atomic.AtomicInteger; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestTransaction { + + TransactionLegacy txn; + Connection conn; + + @Before + public void setup() { + setup(TransactionLegacy.CLOUD_DB); + } + + public void setup(short db) { + txn = TransactionLegacy.open(db); + conn = Mockito.mock(Connection.class); + txn.setConnection(conn); + } + + @After + public void after() { + TransactionLegacy.currentTxn().close(); + } + + @Test + public void testCommit() throws Exception { + assertEquals(42L, Transaction.execute(new TransactionCallback() { + @Override + public Object doInTransaction(TransactionStatus status) { + return 42L; + } + })); + + verify(conn).setAutoCommit(false); + verify(conn, times(1)).commit(); + verify(conn, times(0)).rollback(); + verify(conn, times(1)).close(); + } + + @Test + public void testRollback() throws Exception { + try { + Transaction.execute(new TransactionCallback() { + @Override + public Object doInTransaction(TransactionStatus status) { + throw new RuntimeException("Panic!"); + } + }); + fail(); + } catch (RuntimeException e) { + assertEquals("Panic!", e.getMessage()); + } + + verify(conn).setAutoCommit(false); + verify(conn, times(0)).commit(); + verify(conn, times(1)).rollback(); + verify(conn, times(1)).close(); + } + + @Test + public void testRollbackWithException() throws Exception { + try { + Transaction.execute(new TransactionCallbackWithException() { + @Override + public Object doInTransaction(TransactionStatus status) throws FileNotFoundException { + assertEquals(TransactionLegacy.CLOUD_DB, TransactionLegacy.currentTxn().getDatabaseId().shortValue()); + + throw new FileNotFoundException("Panic!"); + } + }); + fail(); + } catch (FileNotFoundException e) { + assertEquals("Panic!", e.getMessage()); + } + + verify(conn).setAutoCommit(false); + verify(conn, times(0)).commit(); + verify(conn, times(1)).rollback(); + verify(conn, times(1)).close(); + } + + @Test + public void testWithExceptionNoReturn() throws Exception { + final AtomicInteger i = new AtomicInteger(0); + assertTrue(Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws FileNotFoundException { + i.incrementAndGet(); + } + })); + + assertEquals(1, i.get()); + verify(conn).setAutoCommit(false); + verify(conn, times(1)).commit(); + verify(conn, times(0)).rollback(); + verify(conn, times(1)).close(); + } + + @Test + public void testOtherdatabaseRollback() throws Exception { + after(); + setup(TransactionLegacy.AWSAPI_DB); + + try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + assertEquals(TransactionLegacy.AWSAPI_DB, TransactionLegacy.currentTxn().getDatabaseId().shortValue()); + + throw new RuntimeException("Panic!"); + } + }); + fail(); + } catch (RuntimeException e) { + assertEquals("Panic!", e.getMessage()); + } + + + verify(conn).setAutoCommit(false); + verify(conn, times(0)).commit(); + verify(conn, times(1)).rollback(); + verify(conn, times(1)).close(); + } + +} diff --git a/framework/db/test/com/cloud/utils/db/TransactionTest.java b/framework/db/test/com/cloud/utils/db/TransactionTest.java index 101a533f836..92b2f36a2fc 100644 --- a/framework/db/test/com/cloud/utils/db/TransactionTest.java +++ b/framework/db/test/com/cloud/utils/db/TransactionTest.java @@ -41,7 +41,7 @@ public class TransactionTest { Connection conn = null; PreparedStatement pstmt = null; try { - conn = Transaction.getStandaloneConnection(); + conn = TransactionLegacy.getStandaloneConnection(); pstmt = conn.prepareStatement("CREATE TABLE `cloud`.`test` (" + "`id` bigint unsigned NOT NULL UNIQUE AUTO_INCREMENT," + "`fld_int` int unsigned," @@ -75,27 +75,27 @@ public class TransactionTest { */ public void testUserManagedConnection() { DbTestDao testDao = ComponentContext.inject(DbTestDao.class); - Transaction txn = Transaction.open("SingleConnectionThread"); + TransactionLegacy txn = TransactionLegacy.open("SingleConnectionThread"); Connection conn = null; try { - conn = Transaction.getStandaloneConnectionWithException(); + conn = TransactionLegacy.getStandaloneConnectionWithException(); txn.transitToUserManagedConnection(conn); // try two SQLs to make sure that they are using the same connection // acquired above. testDao.create(1, 1, "Record 1"); - Connection checkConn = Transaction.currentTxn().getConnection(); + Connection checkConn = TransactionLegacy.currentTxn().getConnection(); if (checkConn != conn) { Assert.fail("A new db connection is acquired instead of using old one after create sql"); } testDao.update(2, 2, "Record 1"); - Connection checkConn2 = Transaction.currentTxn().getConnection(); + Connection checkConn2 = TransactionLegacy.currentTxn().getConnection(); if (checkConn2 != conn) { Assert.fail("A new db connection is acquired instead of using old one after update sql"); } } catch (SQLException e) { Assert.fail(e.getMessage()); } finally { - txn.transitToAutoManagedConnection(Transaction.CLOUD_DB); + txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); txn.close(); if (conn != null) { @@ -117,28 +117,28 @@ public class TransactionTest { // acquire a db connection and keep it Connection conn = null; try { - conn = Transaction.getStandaloneConnectionWithException(); + conn = TransactionLegacy.getStandaloneConnectionWithException(); } catch (SQLException ex) { throw new CloudRuntimeException("Problem with getting db connection", ex); } // start heartbeat loop, make sure that each loop still use the same // connection - Transaction txn = null; + TransactionLegacy txn = null; for (int i = 0; i < 3; i++) { - txn = Transaction.open("HeartbeatSimulator"); + txn = TransactionLegacy.open("HeartbeatSimulator"); try { txn.transitToUserManagedConnection(conn); testDao.create(i, i, "Record " + i); - Connection checkConn = Transaction.currentTxn().getConnection(); + Connection checkConn = TransactionLegacy.currentTxn().getConnection(); if (checkConn != conn) { Assert.fail("A new db connection is acquired instead of using old one in loop " + i); } } catch (SQLException e) { Assert.fail(e.getMessage()); } finally { - txn.transitToAutoManagedConnection(Transaction.CLOUD_DB); + txn.transitToAutoManagedConnection(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -161,7 +161,7 @@ public class TransactionTest { Connection conn = null; PreparedStatement pstmt = null; try { - conn = Transaction.getStandaloneConnection(); + conn = TransactionLegacy.getStandaloneConnection(); pstmt = conn.prepareStatement("truncate table `cloud`.`test`"); pstmt.execute(); @@ -189,7 +189,7 @@ public class TransactionTest { Connection conn = null; PreparedStatement pstmt = null; try { - conn = Transaction.getStandaloneConnection(); + conn = TransactionLegacy.getStandaloneConnection(); pstmt = conn.prepareStatement("DROP TABLE IF EXISTS `cloud`.`test`"); pstmt.execute(); diff --git a/agent/scripts/agent.sh b/framework/db/test/db.properties old mode 100755 new mode 100644 similarity index 80% rename from agent/scripts/agent.sh rename to framework/db/test/db.properties index 867571c3d62..cc1215ff35c --- a/agent/scripts/agent.sh +++ b/framework/db/test/db.properties @@ -1,4 +1,3 @@ -#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,17 +14,5 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - -#run.sh runs the agent client. - -# set -x - -while true -do - ./run.sh "$@" - ex=$? - if [ $ex -eq 0 ] || [ $ex -eq 1 ] || [ $ex -eq 66 ] || [ $ex -gt 128 ]; then - exit $ex - fi -done +# Just here to make the unit test not blow up \ No newline at end of file diff --git a/framework/ipc/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml b/framework/ipc/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml new file mode 100644 index 00000000000..effa23efded --- /dev/null +++ b/framework/ipc/resources/META-INF/cloudstack/core/spring-framework-ipc-core-context.xml @@ -0,0 +1,59 @@ + + + + + + + + org.apache.cloudstack.framework + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/framework/ipc/src/org/apache/cloudstack/framework/client/ClientTransportProvider.java b/framework/ipc/src/org/apache/cloudstack/framework/client/ClientTransportProvider.java index 023b3181b20..923578cf5f1 100644 --- a/framework/ipc/src/org/apache/cloudstack/framework/client/ClientTransportProvider.java +++ b/framework/ipc/src/org/apache/cloudstack/framework/client/ClientTransportProvider.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.framework.serializer.MessageSerializer; import org.apache.cloudstack.framework.transport.TransportEndpoint; import org.apache.cloudstack.framework.transport.TransportEndpointSite; import org.apache.cloudstack.framework.transport.TransportProvider; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -62,10 +63,9 @@ public class ClientTransportProvider implements TransportProvider { _executor = Executors.newFixedThreadPool(_poolSize, new NamedThreadFactory("Transport-Worker")); _connection = new ClientTransportConnection(this); - _executor.execute(new Runnable() { - - @Override - public void run() { + _executor.execute(new ManagedContextRunnable() { + @Override + protected void runInContext() { try { _connection.connect(_serverAddress, _serverPort); } catch(Throwable e) { diff --git a/framework/ipc/src/org/apache/cloudstack/framework/server/ServerTransportProvider.java b/framework/ipc/src/org/apache/cloudstack/framework/server/ServerTransportProvider.java index b19a7c9265f..45c3e2a41d7 100644 --- a/framework/ipc/src/org/apache/cloudstack/framework/server/ServerTransportProvider.java +++ b/framework/ipc/src/org/apache/cloudstack/framework/server/ServerTransportProvider.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.framework.transport.TransportEndpoint; import org.apache.cloudstack.framework.transport.TransportEndpointSite; import org.apache.cloudstack.framework.transport.TransportPdu; import org.apache.cloudstack.framework.transport.TransportProvider; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -132,11 +133,10 @@ public class ServerTransportProvider implements TransportProvider { @Override public void requestSiteOutput(final TransportEndpointSite site) { - _executor.execute(new Runnable() { - - @Override - public void run() { - try { + _executor.execute(new ManagedContextRunnable() { + @Override + protected void runInContext() { + try { site.processOutput(); site.ackOutputProcessSignal(); } catch(Throwable e) { diff --git a/framework/jobs/resources/META-INF/cloudstack/core/spring-framework-jobs-core-context.xml b/framework/jobs/resources/META-INF/cloudstack/core/spring-framework-jobs-core-context.xml new file mode 100644 index 00000000000..85cad0216c0 --- /dev/null +++ b/framework/jobs/resources/META-INF/cloudstack/core/spring-framework-jobs-core-context.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java index 01365939127..595800d2524 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/AsyncJobExecutionContext.java @@ -17,12 +17,12 @@ package org.apache.cloudstack.framework.jobs; import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.jobs.dao.AsyncJobJoinMapDao; import org.apache.cloudstack.framework.jobs.impl.AsyncJobJoinMapVO; import org.apache.cloudstack.framework.jobs.impl.JobSerializerHelper; import org.apache.cloudstack.framework.jobs.impl.SyncQueueItem; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -39,7 +39,7 @@ public class AsyncJobExecutionContext { _joinMapDao = joinMapDao; } - private static ThreadLocal s_currentExectionContext = new ThreadLocal(); + private static ManagedThreadLocal s_currentExectionContext = new ManagedThreadLocal(); public AsyncJobExecutionContext() { } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java index fb3845caa31..ed161e75d2c 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class AsyncJobDaoImpl extends GenericDaoBase implements AsyncJobDao { private static final Logger s_logger = Logger.getLogger(AsyncJobDaoImpl.class.getName()); @@ -182,7 +182,7 @@ public class AsyncJobDaoImpl extends GenericDaoBase implements + ", job_result='" + jobResultMessage + "' where job_status=" + JobInfo.Status.IN_PROGRESS.ordinal() + " AND (job_executing_msid=? OR (job_executing_msid IS NULL AND job_init_msid=?))"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java index 20d8ba69fdc..d4ca0d76cb7 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/AsyncJobJoinMapDaoImpl.java @@ -34,7 +34,7 @@ import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.UpdateBuilder; import com.cloud.utils.exception.CloudRuntimeException; @@ -157,7 +157,7 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase findJobsToWake(long joinedJobId) { // TODO: We should fix this. We shouldn't be crossing daos in a dao code. List standaloneList = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); String sql = "SELECT job_id FROM async_job_join_map WHERE join_job_id = ? AND job_id NOT IN (SELECT content_id FROM sync_queue_item)"; try { PreparedStatement pstmt = txn.prepareStatement(sql); @@ -231,7 +231,7 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase findJobsToWakeBetween(Date cutDate) { List standaloneList = new ArrayList(); - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { String sql = "SELECT job_id FROM async_job_join_map WHERE next_wakeup < ? AND expiration > ? AND job_id NOT IN (SELECT content_id FROM sync_queue_item)"; PreparedStatement pstmt = txn.prepareStatement(sql); @@ -260,7 +260,7 @@ public class AsyncJobJoinMapDaoImpl extends GenericDaoBase wakeupByJoinedJobCompletion(long joinedJobId) { // List standaloneList = new ArrayList(); // -// Transaction txn = Transaction.currentTxn(); +// TransactionLegacy txn = TransactionLegacy.currentTxn(); // PreparedStatement pstmt = null; // try { // txn.start(); diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java index f7d9d72dc0b..01efc4e91d3 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class SyncQueueDaoImpl extends GenericDaoBase implements SyncQueueDao { private static final Logger s_logger = Logger.getLogger(SyncQueueDaoImpl.class.getName()); @@ -51,7 +51,7 @@ public class SyncQueueDaoImpl extends GenericDaoBase implemen String sql = "INSERT IGNORE INTO sync_queue(sync_objtype, sync_objid, created, last_updated)" + " values(?, ?, ?, ?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java index ccb7f103742..2f04a7cc890 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/dao/SyncQueueItemDaoImpl.java @@ -37,7 +37,7 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @DB public class SyncQueueItemDaoImpl extends GenericDaoBase implements SyncQueueItemDao { @@ -49,7 +49,7 @@ public class SyncQueueItemDaoImpl extends GenericDaoBase queueIdSearch = createSearchBuilder(Long.class); queueIdSearch.and("contentId", queueIdSearch.entity().getContentId(), Op.EQ); queueIdSearch.and("contentType", queueIdSearch.entity().getContentType(), Op.EQ); - queueIdSearch.selectField(queueIdSearch.entity().getId()); + queueIdSearch.selectFields(queueIdSearch.entity().getId()); queueIdSearch.done(); } @@ -83,7 +83,7 @@ public class SyncQueueItemDaoImpl extends GenericDaoBase " ORDER BY i.id " + " LIMIT 0, ?"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmt = null; try { pstmt = txn.prepareAutoCloseStatement(sql); diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java index 2a4a06cc958..29a299ff423 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobManagerImpl.java @@ -18,7 +18,6 @@ package org.apache.cloudstack.framework.jobs.impl; import java.io.File; -import java.io.FileInputStream; import java.util.Arrays; import java.util.Collections; import java.util.Date; @@ -36,7 +35,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; @@ -54,6 +52,7 @@ import org.apache.cloudstack.framework.messagebus.MessageDetector; import org.apache.cloudstack.framework.messagebus.PublishScope; import org.apache.cloudstack.jobs.JobInfo; import org.apache.cloudstack.jobs.JobInfo.Status; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; import com.cloud.cluster.ClusterManagerListener; @@ -70,8 +69,11 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.mgmt.JmxUtil; @@ -177,19 +179,22 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @SuppressWarnings("unchecked") @Override @DB - public long submitAsyncJob(AsyncJob job, String syncObjType, long syncObjId) { - Transaction txt = Transaction.currentTxn(); + public long submitAsyncJob(final AsyncJob job, final String syncObjType, final long syncObjId) { try { @SuppressWarnings("rawtypes") - GenericDao dao = GenericDaoBase.getDao(job.getClass()); + final GenericDao dao = GenericDaoBase.getDao(job.getClass()); - txt.start(); - job.setInitMsid(getMsid()); - dao.persist(job); + return Transaction.execute(new TransactionCallback() { + @Override + public Long doInTransaction(TransactionStatus status) { + job.setInitMsid(getMsid()); + dao.persist(job); - syncAsyncJobExecution(job, syncObjType, syncObjId, 1); - txt.commit(); - return job.getId(); + syncAsyncJobExecution(job, syncObjType, syncObjId, 1); + + return job.getId(); + } + }); } catch (Exception e) { String errMsg = "Unable to schedule async job for command " + job.getCmd() + ", unexpected exception."; s_logger.warn(errMsg, e); @@ -199,123 +204,110 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override @DB - public void completeAsyncJob(long jobId, Status jobStatus, int resultCode, String resultObject) { + public void completeAsyncJob(final long jobId, final Status jobStatus, final int resultCode, final String resultObject) { if (s_logger.isDebugEnabled()) { s_logger.debug("Complete async job-" + jobId + ", jobStatus: " + jobStatus + ", resultCode: " + resultCode + ", result: " + resultObject); } - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - AsyncJobVO job = _jobDao.findById(jobId); - if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + - resultObject); - } - - txn.rollback(); - return; + final AsyncJobVO job = _jobDao.findById(jobId); + if (job == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("job-" + jobId + " no longer exists, we just log completion info here. " + jobStatus + ", resultCode: " + resultCode + ", result: " + + resultObject); } - if (job.getStatus() != JobInfo.Status.IN_PROGRESS) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " is already completed."); - } - - txn.rollback(); - return; - } - - job.setCompleteMsid(getMsid()); - job.setStatus(jobStatus); - job.setResultCode(resultCode); - - // reset attached object - job.setInstanceType(null); - job.setInstanceId(null); - - if (resultObject != null) { - job.setResult(resultObject); - } - - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - - List wakeupList = wakeupByJoinedJobCompletion(jobId); - _joinMapDao.disjoinAllJobs(jobId); - - txn.commit(); - - for (Long id : wakeupList) { - // TODO, we assume that all jobs in this category is API job only - AsyncJobVO jobToWakeup = _jobDao.findById(id); - if (jobToWakeup != null && (jobToWakeup.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) - scheduleExecution(jobToWakeup, false); - } - - _messageBus.publish(null, AsyncJob.Topics.JOB_STATE, PublishScope.GLOBAL, jobId); - } catch (Exception e) { - s_logger.error("Unexpected exception while completing async job-" + jobId, e); - txn.rollback(); + return; } + + if (job.getStatus() != JobInfo.Status.IN_PROGRESS) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("job-" + jobId + " is already completed."); + } + + return; + } + + List wakeupList = Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + job.setCompleteMsid(getMsid()); + job.setStatus(jobStatus); + job.setResultCode(resultCode); + + // reset attached object + job.setInstanceType(null); + job.setInstanceId(null); + + if (resultObject != null) { + job.setResult(resultObject); + } + + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + + List wakeupList = wakeupByJoinedJobCompletion(jobId); + _joinMapDao.disjoinAllJobs(jobId); + + return wakeupList; + } + }); + + for (Long id : wakeupList) { + // TODO, we assume that all jobs in this category is API job only + AsyncJobVO jobToWakeup = _jobDao.findById(id); + if (jobToWakeup != null && (jobToWakeup.getPendingSignals() & AsyncJob.Constants.SIGNAL_MASK_WAKEUP) != 0) + scheduleExecution(jobToWakeup, false); + } + + _messageBus.publish(null, AsyncJob.Topics.JOB_STATE, PublishScope.GLOBAL, jobId); } @Override @DB - public void updateAsyncJobStatus(long jobId, int processStatus, String resultObject) { + public void updateAsyncJobStatus(final long jobId, final int processStatus, final String resultObject) { if (s_logger.isDebugEnabled()) { s_logger.debug("Update async-job progress, job-" + jobId + ", processStatus: " + processStatus + ", result: " + resultObject); } - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - AsyncJobVO job = _jobDao.findById(jobId); - if (job == null) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); - } - - txt.rollback(); - return; + final AsyncJobVO job = _jobDao.findById(jobId); + if (job == null) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("job-" + jobId + " no longer exists, we just log progress info here. progress status: " + processStatus); } - job.setProcessStatus(processStatus); - if (resultObject != null) { - job.setResult(resultObject); - } - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - txt.commit(); - } catch (Exception e) { - s_logger.error("Unexpected exception while updating async job-" + jobId + " status: ", e); - txt.rollback(); + return; } + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + job.setProcessStatus(processStatus); + if (resultObject != null) { + job.setResult(resultObject); + } + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + } + }); } @Override @DB - public void updateAsyncJobAttachment(long jobId, String instanceType, Long instanceId) { + public void updateAsyncJobAttachment(final long jobId, final String instanceType, final Long instanceId) { if (s_logger.isDebugEnabled()) { s_logger.debug("Update async-job attachment, job-" + jobId + ", instanceType: " + instanceType + ", instanceId: " + instanceId); } - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - AsyncJobVO job = _jobDao.createForUpdate(); - job.setInstanceType(instanceType); - job.setInstanceId(instanceId); - job.setLastUpdated(DateUtil.currentGMTTime()); - _jobDao.update(jobId, job); - - txt.commit(); - } catch (Exception e) { - s_logger.error("Unexpected exception while updating async job-" + jobId + " attachment: ", e); - txt.rollback(); - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + AsyncJobVO job = _jobDao.createForUpdate(); + job.setInstanceType(instanceType); + job.setInstanceId(instanceId); + job.setLastUpdated(DateUtil.currentGMTTime()); + _jobDao.update(jobId, job); + } + }); } @Override @@ -490,18 +482,15 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } private Runnable getExecutorRunnable(final AsyncJob job) { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { - Transaction txn = null; + protected void runInContext() { long runNumber = getJobRunNumber(); try { // // setup execution environment // - txn = Transaction.open(Transaction.CLOUD_DB); - try { JmxUtil.registerMBean("AsyncJobManager", "Active Job " + job.getId(), new AsyncJobMBeanImpl(job)); } catch (Exception e) { @@ -564,9 +553,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, s_logger.trace("Unable to unregister job " + job.getId() + " to JMX monitoring due to exception " + ExceptionUtil.toString(e)); } - if (txn != null) - txn.close(); - // // clean execution environment // @@ -687,10 +673,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } private Runnable getHeartbeatTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { - Transaction txn = Transaction.open("AsyncJobManagerImpl.getHeartbeatTask"); + protected void runInContext() { try { List l = _queueMgr.dequeueFromAny(getMsid(), MAX_ONETIME_SCHEDULE_SIZE); if (l != null && l.size() > 0) { @@ -711,12 +696,6 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } } catch (Throwable e) { s_logger.error("Unexpected exception when trying to execute queue item, ", e); - } finally { - try { - txn.close(); - } catch (Throwable e) { - s_logger.error("Unexpected exception", e); - } } } }; @@ -724,9 +703,9 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @DB private Runnable getGCTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { GlobalLock scanLock = GlobalLock.getInternLock("AsyncJobManagerGC"); try { if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { @@ -785,13 +764,15 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, } @DB - protected void expungeAsyncJob(AsyncJobVO job) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - _jobDao.expunge(job.getId()); - //purge corresponding sync queue item - _queueMgr.purgeAsyncJobQueueItemId(job.getId()); - txn.commit(); + protected void expungeAsyncJob(final AsyncJobVO job) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _jobDao.expunge(job.getId()); + //purge corresponding sync queue item + _queueMgr.purgeAsyncJobQueueItemId(job.getId()); + } + }); } private long getMsid() { @@ -825,58 +806,60 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, Collections.sort(result); Long[] ids = result.toArray(new Long[result.size()]); - SearchCriteria jobsSC = JobIdsSearch.create("ids", ids); - SearchCriteria queueItemsSC = QueueJobIdsSearch.create("contentIds", ids); + final SearchCriteria jobsSC = JobIdsSearch.create("ids", ids); + final SearchCriteria queueItemsSC = QueueJobIdsSearch.create("contentIds", ids); - Transaction txn = Transaction.currentTxn(); - txn.start(); - AsyncJobVO job = _jobDao.createForUpdate(); - job.setPendingSignals(AsyncJob.Constants.SIGNAL_MASK_WAKEUP); - _jobDao.update(job, jobsSC); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + AsyncJobVO job = _jobDao.createForUpdate(); + job.setPendingSignals(AsyncJob.Constants.SIGNAL_MASK_WAKEUP); + _jobDao.update(job, jobsSC); - SyncQueueItemVO item = _queueItemDao.createForUpdate(); - item.setLastProcessNumber(null); - item.setLastProcessMsid(null); - _queueItemDao.update(item, queueItemsSC); - txn.commit(); + SyncQueueItemVO item = _queueItemDao.createForUpdate(); + item.setLastProcessNumber(null); + item.setLastProcessMsid(null); + _queueItemDao.update(item, queueItemsSC); + } + }); } return _joinMapDao.findJobsToWake(joinedJobId); } @DB protected List wakeupScan() { - Date cutDate = DateUtil.currentGMTTime(); - Transaction txn = Transaction.currentTxn(); + final Date cutDate = DateUtil.currentGMTTime(); SearchCriteria sc = JoinJobTimeSearch.create(); sc.setParameters("beginTime", cutDate); sc.setParameters("endTime", cutDate); - List result = _joinMapDao.customSearch(sc, null); + final List result = _joinMapDao.customSearch(sc, null); - txn.start(); - if (result.size() > 0) { - Collections.sort(result); - Long[] ids = result.toArray(new Long[result.size()]); + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + if (result.size() > 0) { + Collections.sort(result); + Long[] ids = result.toArray(new Long[result.size()]); - AsyncJobVO job = _jobDao.createForUpdate(); - job.setPendingSignals(AsyncJob.Constants.SIGNAL_MASK_WAKEUP); + AsyncJobVO job = _jobDao.createForUpdate(); + job.setPendingSignals(AsyncJob.Constants.SIGNAL_MASK_WAKEUP); - SearchCriteria sc2 = JobIdsSearch.create("ids", ids); - SearchCriteria queueItemsSC = QueueJobIdsSearch.create("contentIds", ids); + SearchCriteria sc2 = JobIdsSearch.create("ids", ids); + SearchCriteria queueItemsSC = QueueJobIdsSearch.create("contentIds", ids); - _jobDao.update(job, sc2); + _jobDao.update(job, sc2); - SyncQueueItemVO item = _queueItemDao.createForUpdate(); - item.setLastProcessNumber(null); - item.setLastProcessMsid(null); - _queueItemDao.update(item, queueItemsSC); - } + SyncQueueItemVO item = _queueItemDao.createForUpdate(); + item.setLastProcessNumber(null); + item.setLastProcessMsid(null); + _queueItemDao.update(item, queueItemsSC); + } - List wakupIds = _joinMapDao.findJobsToWakeBetween(cutDate); - txn.commit(); - - return wakupIds; + return _joinMapDao.findJobsToWakeBetween(cutDate); + } + }); } @Override @@ -884,7 +867,7 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, try { final File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); final Properties dbProps = new Properties(); - dbProps.load(new FileInputStream(dbPropsFile)); + PropertiesUtil.loadFromFile(dbProps, dbPropsFile); final int cloudMaxActive = Integer.parseInt(dbProps.getProperty("db.cloud.maxActive")); @@ -898,13 +881,13 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, JoinJobSearch = _joinMapDao.createSearchBuilder(Long.class); JoinJobSearch.and(JoinJobSearch.entity().getJoinJobId(), Op.EQ, "joinJobId"); - JoinJobSearch.selectField(JoinJobSearch.entity().getJobId()); + JoinJobSearch.selectFields(JoinJobSearch.entity().getJobId()); JoinJobSearch.done(); JoinJobTimeSearch = _joinMapDao.createSearchBuilder(Long.class); JoinJobTimeSearch.and(JoinJobTimeSearch.entity().getNextWakeupTime(), Op.LT, "beginTime"); JoinJobTimeSearch.and(JoinJobTimeSearch.entity().getExpiration(), Op.GT, "endTime"); - JoinJobTimeSearch.selectField(JoinJobTimeSearch.entity().getJobId()).done(); + JoinJobTimeSearch.selectFields(JoinJobTimeSearch.entity().getJobId()).done(); JobIdsSearch = _jobDao.createSearchBuilder(); JobIdsSearch.and(JobIdsSearch.entity().getId(), Op.IN, "ids").done(); @@ -913,13 +896,13 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, QueueJobIdsSearch.and(QueueJobIdsSearch.entity().getContentId(), Op.IN, "contentIds").done(); JoinJobIdsSearch = _joinMapDao.createSearchBuilder(Long.class); - JoinJobIdsSearch.selectField(JoinJobIdsSearch.entity().getJobId()); + JoinJobIdsSearch.selectFields(JoinJobIdsSearch.entity().getJobId()); JoinJobIdsSearch.and(JoinJobIdsSearch.entity().getJoinJobId(), Op.EQ, "joinJobId"); JoinJobIdsSearch.and(JoinJobIdsSearch.entity().getJobId(), Op.NIN, "jobIds"); JoinJobIdsSearch.done(); ContentIdsSearch = _queueItemDao.createSearchBuilder(Long.class); - ContentIdsSearch.selectField(ContentIdsSearch.entity().getContentId()).done(); + ContentIdsSearch.selectFields(ContentIdsSearch.entity().getContentId()).done(); AsyncJobExecutionContext.init(this, _joinMapDao); OutcomeImpl.init(this); @@ -933,18 +916,18 @@ public class AsyncJobManagerImpl extends ManagerBase implements AsyncJobManager, @Override public void onManagementNodeLeft(List nodeList, long selfNodeId) { - for (ManagementServerHost msHost : nodeList) { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + for (final ManagementServerHost msHost : nodeList) { try { - txn.start(); - List items = _queueMgr.getActiveQueueItems(msHost.getId(), true); - cleanupPendingJobs(items); - _jobDao.resetJobProcess(msHost.getId(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart"); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List items = _queueMgr.getActiveQueueItems(msHost.getId(), true); + cleanupPendingJobs(items); + _jobDao.resetJobProcess(msHost.getId(), ApiErrorCode.INTERNAL_ERROR.getHttpCode(), "job cancelled because of management server restart"); + } + }); } catch (Throwable e) { s_logger.warn("Unexpected exception ", e); - } finally { - txn.close(); } } } diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java index 3bf362251fc..8ea75289dfd 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/AsyncJobMonitor.java @@ -20,17 +20,18 @@ import java.util.HashMap; import java.util.Map; import java.util.Timer; import java.util.TimerTask; +import java.util.concurrent.atomic.AtomicInteger; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.MessageDispatcher; import org.apache.cloudstack.framework.messagebus.MessageHandler; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import com.cloud.utils.component.ManagerBase; @@ -41,14 +42,14 @@ public class AsyncJobMonitor extends ManagerBase { private final Map _activeTasks = new HashMap(); private final Timer _timer = new Timer(); - - private volatile int _activePoolThreads = 0; - private volatile int _activeInplaceThreads = 0; - - // configuration - private long _inactivityCheckIntervalMs = 60000; - private long _inactivityWarningThresholdMs = 90000; - + + private final AtomicInteger _activePoolThreads = new AtomicInteger(); + private final AtomicInteger _activeInplaceThreads = new AtomicInteger(); + + // configuration + private long _inactivityCheckIntervalMs = 60000; + private long _inactivityWarningThresholdMs = 90000; + public AsyncJobMonitor() { } @@ -96,10 +97,9 @@ public class AsyncJobMonitor extends ManagerBase { throws ConfigurationException { _messageBus.subscribe(AsyncJob.Topics.JOB_HEARTBEAT, MessageDispatcher.getDispatcher(this)); - _timer.scheduleAtFixedRate(new TimerTask() { - + _timer.scheduleAtFixedRate(new ManagedContextTimerTask() { @Override - public void run() { + protected void runInContext() { heartbeat(); } @@ -118,9 +118,9 @@ public class AsyncJobMonitor extends ManagerBase { ActiveTaskRecord record = new ActiveTaskRecord(jobId, threadId, fromPoolThread); _activeTasks.put(runNumber, record); if(fromPoolThread) - _activePoolThreads++; + _activePoolThreads.incrementAndGet(); else - _activeInplaceThreads++; + _activeInplaceThreads.incrementAndGet(); } } @@ -132,23 +132,23 @@ public class AsyncJobMonitor extends ManagerBase { s_logger.info("Remove job-" + record.getJobId() + " from job monitoring"); if(record.isPoolThread()) - _activePoolThreads--; + _activePoolThreads.decrementAndGet(); else - _activeInplaceThreads--; + _activeInplaceThreads.decrementAndGet(); _activeTasks.remove(runNumber); } } } - + public int getActivePoolThreads() { - return _activePoolThreads; + return _activePoolThreads.get(); } - + public int getActiveInplaceThread() { - return _activeInplaceThreads; + return _activeInplaceThreads.get(); } - + private static class ActiveTaskRecord { long _jobId; long _threadId; diff --git a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java index b9b5d6bdabd..7fb02454c88 100644 --- a/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java +++ b/framework/jobs/src/org/apache/cloudstack/framework/jobs/impl/SyncQueueManagerImpl.java @@ -21,8 +21,8 @@ import java.util.Date; import java.util.List; import javax.inject.Inject; -import org.apache.log4j.Logger; +import org.apache.log4j.Logger; import org.apache.cloudstack.framework.jobs.dao.SyncQueueDao; import org.apache.cloudstack.framework.jobs.dao.SyncQueueItemDao; @@ -30,6 +30,9 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManager { @@ -40,84 +43,83 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage @Override @DB - public SyncQueueVO queue(String syncObjType, long syncObjId, String itemType, long itemId, long queueSizeLimit) { - Transaction txn = Transaction.currentTxn(); + public SyncQueueVO queue(final String syncObjType, final long syncObjId, final String itemType, final long itemId, final long queueSizeLimit) { try { - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public SyncQueueVO doInTransaction(TransactionStatus status) { + _syncQueueDao.ensureQueue(syncObjType, syncObjId); + SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId); + if(queueVO == null) + throw new CloudRuntimeException("Unable to queue item into DB, DB is full?"); - _syncQueueDao.ensureQueue(syncObjType, syncObjId); - SyncQueueVO queueVO = _syncQueueDao.find(syncObjType, syncObjId); - if(queueVO == null) - throw new CloudRuntimeException("Unable to queue item into DB, DB is full?"); - - queueVO.setQueueSizeLimit(queueSizeLimit); - _syncQueueDao.update(queueVO.getId(), queueVO); - - Date dt = DateUtil.currentGMTTime(); - SyncQueueItemVO item = new SyncQueueItemVO(); - item.setQueueId(queueVO.getId()); - item.setContentType(itemType); - item.setContentId(itemId); - item.setCreated(dt); - - _syncQueueItemDao.persist(item); - txn.commit(); - - return queueVO; - } catch(Exception e) { - s_logger.error("Unexpected exception: ", e); - txn.rollback(); - } - return null; - } - - @Override - @DB - public SyncQueueItemVO dequeueFromOne(long queueId, Long msid) { - Transaction txt = Transaction.currentTxn(); - try { - txt.start(); - - SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true); - if(queueVO == null) { - s_logger.error("Sync queue(id: " + queueId + ") does not exist"); - txt.commit(); - return null; - } - - if(queueReadyToProcess(queueVO)) { - SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId()); - if(itemVO != null) { - Long processNumber = queueVO.getLastProcessNumber(); - if(processNumber == null) - processNumber = new Long(1); - else - processNumber = processNumber + 1; - Date dt = DateUtil.currentGMTTime(); - queueVO.setLastProcessNumber(processNumber); - queueVO.setLastUpdated(dt); - queueVO.setQueueSize(queueVO.getQueueSize() + 1); + queueVO.setQueueSizeLimit(queueSizeLimit); _syncQueueDao.update(queueVO.getId(), queueVO); - itemVO.setLastProcessMsid(msid); - itemVO.setLastProcessNumber(processNumber); - itemVO.setLastProcessTime(dt); - _syncQueueItemDao.update(itemVO.getId(), itemVO); + Date dt = DateUtil.currentGMTTime(); + SyncQueueItemVO item = new SyncQueueItemVO(); + item.setQueueId(queueVO.getId()); + item.setContentType(itemType); + item.setContentId(itemId); + item.setCreated(dt); - txt.commit(); - return itemVO; - } else { - if(s_logger.isDebugEnabled()) - s_logger.debug("Sync queue (" + queueId + ") is currently empty"); + _syncQueueItemDao.persist(item); + return queueVO; } - } else { - if(s_logger.isDebugEnabled()) - s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); - } - txt.commit(); + }); + } catch(Exception e) { + s_logger.error("Unexpected exception: ", e); + } + return null; + } + + @Override + @DB + public SyncQueueItemVO dequeueFromOne(final long queueId, final Long msid) { + try { + return Transaction.execute(new TransactionCallback() { + @Override + public SyncQueueItemVO doInTransaction(TransactionStatus status) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(queueId, true); + if(queueVO == null) { + s_logger.error("Sync queue(id: " + queueId + ") does not exist"); + return null; + } + + if(queueReadyToProcess(queueVO)) { + SyncQueueItemVO itemVO = _syncQueueItemDao.getNextQueueItem(queueVO.getId()); + if(itemVO != null) { + Long processNumber = queueVO.getLastProcessNumber(); + if(processNumber == null) + processNumber = new Long(1); + else + processNumber = processNumber + 1; + Date dt = DateUtil.currentGMTTime(); + queueVO.setLastProcessNumber(processNumber); + queueVO.setLastUpdated(dt); + queueVO.setQueueSize(queueVO.getQueueSize() + 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + + itemVO.setLastProcessMsid(msid); + itemVO.setLastProcessNumber(processNumber); + itemVO.setLastProcessTime(dt); + _syncQueueItemDao.update(itemVO.getId(), itemVO); + + return itemVO; + } else { + if(s_logger.isDebugEnabled()) + s_logger.debug("Sync queue (" + queueId + ") is currently empty"); + } + } else { + if(s_logger.isDebugEnabled()) + s_logger.debug("There is a pending process in sync queue(id: " + queueId + ")"); + } + + return null; + } + }); } catch(Exception e) { s_logger.error("Unexpected exception: ", e); - txt.rollback(); } return null; @@ -125,101 +127,104 @@ public class SyncQueueManagerImpl extends ManagerBase implements SyncQueueManage @Override @DB - public List dequeueFromAny(Long msid, int maxItems) { + public List dequeueFromAny(final Long msid, final int maxItems) { + + final List resultList = new ArrayList(); - List resultList = new ArrayList(); - Transaction txt = Transaction.currentTxn(); try { - txt.start(); - - List l = _syncQueueItemDao.getNextQueueItems(maxItems); - if(l != null && l.size() > 0) { - for(SyncQueueItemVO item : l) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true); - SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true); - if(queueReadyToProcess(queueVO) && itemVO.getLastProcessNumber() == null) { - Long processNumber = queueVO.getLastProcessNumber(); - if(processNumber == null) - processNumber = new Long(1); - else - processNumber = processNumber + 1; - - Date dt = DateUtil.currentGMTTime(); - queueVO.setLastProcessNumber(processNumber); - queueVO.setLastUpdated(dt); - queueVO.setQueueSize(queueVO.getQueueSize() + 1); - _syncQueueDao.update(queueVO.getId(), queueVO); - - itemVO.setLastProcessMsid(msid); - itemVO.setLastProcessNumber(processNumber); - itemVO.setLastProcessTime(dt); - _syncQueueItemDao.update(item.getId(), itemVO); - - resultList.add(item); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + List l = _syncQueueItemDao.getNextQueueItems(maxItems); + if(l != null && l.size() > 0) { + for(SyncQueueItemVO item : l) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(item.getQueueId(), true); + SyncQueueItemVO itemVO = _syncQueueItemDao.lockRow(item.getId(), true); + if(queueReadyToProcess(queueVO) && itemVO.getLastProcessNumber() == null) { + Long processNumber = queueVO.getLastProcessNumber(); + if(processNumber == null) + processNumber = new Long(1); + else + processNumber = processNumber + 1; + + Date dt = DateUtil.currentGMTTime(); + queueVO.setLastProcessNumber(processNumber); + queueVO.setLastUpdated(dt); + queueVO.setQueueSize(queueVO.getQueueSize() + 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + + itemVO.setLastProcessMsid(msid); + itemVO.setLastProcessNumber(processNumber); + itemVO.setLastProcessTime(dt); + _syncQueueItemDao.update(item.getId(), itemVO); + + resultList.add(item); + } + } } } - } - txt.commit(); + }); + return resultList; } catch(Exception e) { s_logger.error("Unexpected exception: ", e); - txt.rollback(); } + return null; } @Override @DB - public void purgeItem(long queueItemId) { - Transaction txt = Transaction.currentTxn(); + public void purgeItem(final long queueItemId) { try { - txt.start(); - - SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); - if(itemVO != null) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); - - _syncQueueItemDao.expunge(itemVO.getId()); - - // if item is active, reset queue information - if (itemVO.getLastProcessMsid() != null) { - queueVO.setLastUpdated(DateUtil.currentGMTTime()); - // decrement the count - assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!"; - queueVO.setQueueSize(queueVO.getQueueSize() - 1); - _syncQueueDao.update(queueVO.getId(), queueVO); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); + if(itemVO != null) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); + + _syncQueueItemDao.expunge(itemVO.getId()); + + // if item is active, reset queue information + if (itemVO.getLastProcessMsid() != null) { + queueVO.setLastUpdated(DateUtil.currentGMTTime()); + // decrement the count + assert (queueVO.getQueueSize() > 0) : "Count reduce happens when it's already <= 0!"; + queueVO.setQueueSize(queueVO.getQueueSize() - 1); + _syncQueueDao.update(queueVO.getId(), queueVO); + } + } } - } - txt.commit(); + }); } catch(Exception e) { s_logger.error("Unexpected exception: ", e); - txt.rollback(); } } @Override @DB - public void returnItem(long queueItemId) { - Transaction txt = Transaction.currentTxn(); + public void returnItem(final long queueItemId) { try { - txt.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); + if(itemVO != null) { + SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); - SyncQueueItemVO itemVO = _syncQueueItemDao.findById(queueItemId); - if(itemVO != null) { - SyncQueueVO queueVO = _syncQueueDao.lockRow(itemVO.getQueueId(), true); + itemVO.setLastProcessMsid(null); + itemVO.setLastProcessNumber(null); + itemVO.setLastProcessTime(null); + _syncQueueItemDao.update(queueItemId, itemVO); - itemVO.setLastProcessMsid(null); - itemVO.setLastProcessNumber(null); - itemVO.setLastProcessTime(null); - _syncQueueItemDao.update(queueItemId, itemVO); - - queueVO.setLastUpdated(DateUtil.currentGMTTime()); - _syncQueueDao.update(queueVO.getId(), queueVO); - } - txt.commit(); + queueVO.setLastUpdated(DateUtil.currentGMTTime()); + _syncQueueDao.update(queueVO.getId(), queueVO); + } + } + }); } catch(Exception e) { s_logger.error("Unexpected exception: ", e); - txt.rollback(); } } diff --git a/framework/managed-context/pom.xml b/framework/managed-context/pom.xml new file mode 100644 index 00000000000..b4a9d83a005 --- /dev/null +++ b/framework/managed-context/pom.xml @@ -0,0 +1,36 @@ + + + 4.0.0 + cloud-framework-managed-context + Apache CloudStack Framework - Managed Context + + org.apache.cloudstack + cloud-maven-standard + 4.3.0-SNAPSHOT + ../../maven-standard/pom.xml + + + + org.slf4j + slf4j-api + + + diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/AbstractManagedContextListener.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/AbstractManagedContextListener.java new file mode 100644 index 00000000000..21f63a68f81 --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/AbstractManagedContextListener.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +public class AbstractManagedContextListener implements ManagedContextListener { + + @Override + public T onEnterContext(boolean reentry) { + return null; + } + + @Override + public void onLeaveContext(T data, boolean reentry) { + } + +} \ No newline at end of file diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContext.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContext.java new file mode 100644 index 00000000000..5023725d0de --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContext.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +import java.util.concurrent.Callable; + +public interface ManagedContext { + + public void registerListener(ManagedContextListener listener); + + public void unregisterListener(ManagedContextListener listener); + + public void runWithContext(Runnable run); + + public T callWithContext(Callable callable) throws Exception; + +} \ No newline at end of file diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextListener.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextListener.java new file mode 100644 index 00000000000..2f85a5f69ab --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextListener.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +public interface ManagedContextListener { + + /** + * @param reentry True if listener is being invoked in a nested context + * @return + */ + public T onEnterContext(boolean reentry); + + + /** + * @param data The data returned from the onEnterContext call + * @param reentry True if listener is being invoked in a nested context + */ + public void onLeaveContext(T data, boolean reentry); + +} diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java new file mode 100644 index 00000000000..2f3d0c8a71f --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextRunnable.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +import org.apache.cloudstack.managed.context.impl.DefaultManagedContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public abstract class ManagedContextRunnable implements Runnable { + + private static final int SLEEP_COUNT = 120; + + private static final Logger log = LoggerFactory.getLogger(ManagedContextRunnable.class); + private static final ManagedContext DEFAULT_MANAGED_CONTEXT = new DefaultManagedContext(); + private static ManagedContext context; + private static boolean managedContext = false; + + + /* This is slightly dirty, but the idea is that we only save the ManagedContext + * in a static global. Any ManagedContextListener can be a fully managed object + * and not have to rely on global statics + */ + public static ManagedContext initializeGlobalContext(ManagedContext context) { + setManagedContext(true); + return ManagedContextRunnable.context = context; + } + + @Override + final public void run() { + getContext().runWithContext(new Runnable() { + @Override + public void run() { + runInContext(); + } + }); + } + + protected abstract void runInContext(); + + protected ManagedContext getContext() { + if ( ! managedContext ) + return DEFAULT_MANAGED_CONTEXT; + + for ( int i = 0 ; i < SLEEP_COUNT ; i++ ) { + if ( context == null ) { + try { + Thread.sleep(1000); + + if ( context == null ) + log.info("Sleeping until ManagedContext becomes available"); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } else { + return context; + } + } + + throw new RuntimeException("Failed to obtain ManagedContext"); + } + + public static boolean isManagedContext() { + return managedContext; + } + + public static void setManagedContext(boolean managedContext) { + ManagedContextRunnable.managedContext = managedContext; + } + +} \ No newline at end of file diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextTimerTask.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextTimerTask.java new file mode 100644 index 00000000000..894d27c7c42 --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextTimerTask.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +import java.util.TimerTask; + +public abstract class ManagedContextTimerTask extends TimerTask { + + @Override + public final void run() { + new ManagedContextRunnable() { + @Override + protected void runInContext() { + ManagedContextTimerTask.this.runInContext(); + } + }.run(); + } + + protected abstract void runInContext(); + +} diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextUtils.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextUtils.java new file mode 100644 index 00000000000..75bb2056f02 --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/ManagedContextUtils.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context; + +public class ManagedContextUtils { + + private static final ThreadLocal OWNER = new ThreadLocal(); + + public static boolean setAndCheckOwner(Object owner) { + if ( OWNER.get() == null ) { + OWNER.set(owner); + return true; + } + + return false; + } + + public static boolean clearOwner(Object owner) { + if ( OWNER.get() == owner ) { + OWNER.remove(); + return true; + } + + return false; + } + + public static boolean isInContext() { + return OWNER.get() != null; + } + + public static void rethrowException(Throwable t) { + if ( t instanceof RuntimeException ) { + throw (RuntimeException)t; + } else if ( t instanceof Error ) { + throw (Error)t; + } + } + +} diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java new file mode 100644 index 00000000000..6f5cbc98bec --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContext.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context.impl; + +import java.util.List; +import java.util.Stack; +import java.util.concurrent.Callable; +import java.util.concurrent.CopyOnWriteArrayList; + +import org.apache.cloudstack.managed.context.ManagedContext; +import org.apache.cloudstack.managed.context.ManagedContextListener; +import org.apache.cloudstack.managed.context.ManagedContextUtils; +import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DefaultManagedContext implements ManagedContext { + + private static final Logger log = LoggerFactory.getLogger(DefaultManagedContext.class); + + List> listeners = + new CopyOnWriteArrayList>(); + + @Override + public void registerListener(ManagedContextListener listener) { + listeners.add(listener); + } + + @Override + public void unregisterListener(ManagedContextListener listener) { + listeners.remove(listener); + } + + @Override + public void runWithContext(final Runnable run) { + try { + callWithContext(new Callable() { + @Override + public Object call() throws Exception { + run.run(); + return null; + } + }); + } catch (Exception e) { + /* Only care about non-checked exceptions + * as the nature of runnable prevents checked + * exceptions from happening + */ + ManagedContextUtils.rethrowException(e); + } + } + + @SuppressWarnings("unchecked") + @Override + public T callWithContext(Callable callable) throws Exception { + Object owner = new Object(); + + Stack invocations = new Stack(); + boolean reentry = ! ManagedContextUtils.setAndCheckOwner(owner); + Throwable firstError = null; + + try { + for ( ManagedContextListener listener : listeners ) { + Object data = null; + + try { + data = listener.onEnterContext(reentry); + } catch ( Throwable t ) { + /* If one listener fails, still call all other listeners + * and then we will call onLeaveContext for all + */ + if ( firstError == null ) { + firstError = t; + } + log.error("Failed onEnterContext for listener [{}]", listener, t); + } + + /* Stack data structure is used because in between onEnter and onLeave + * the listeners list could have changed + */ + invocations.push(new ListenerInvocation((ManagedContextListener) listener, data)); + } + + try { + if ( firstError == null ) { + /* Only call if all the listeners didn't blow up on onEnterContext */ + return callable.call(); + } else { + throwException(firstError); + return null; + } + } finally { + Throwable lastError = null; + + while ( ! invocations.isEmpty() ) { + ListenerInvocation invocation = invocations.pop(); + try { + invocation.listener.onLeaveContext(invocation.data, reentry); + } catch ( Throwable t ) { + lastError = t; + log.error("Failed onLeaveContext for listener [{}]", invocation.listener, t); + } + } + + if ( firstError == null && lastError != null ) { + throwException(lastError); + } + } + } finally { + if ( ManagedContextUtils.clearOwner(owner) ) + ManagedThreadLocal.reset(); + } + }; + + protected void throwException(Throwable t) throws Exception { + ManagedContextUtils.rethrowException(t); + if ( t instanceof Exception ) { + throw (Exception)t; + } + } + public List> getListeners() { + return listeners; + } + + public void setListeners(List> listeners) { + this.listeners = new CopyOnWriteArrayList>(listeners); + } + + private static class ListenerInvocation { + ManagedContextListener listener; + Object data; + + public ListenerInvocation(ManagedContextListener listener, Object data) { + super(); + this.listener = listener; + this.data = data; + } + } +} diff --git a/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java new file mode 100644 index 00000000000..bde535cc179 --- /dev/null +++ b/framework/managed-context/src/main/java/org/apache/cloudstack/managed/threadlocal/ManagedThreadLocal.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.threadlocal; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.managed.context.ManagedContextUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ManagedThreadLocal extends ThreadLocal { + + private static final ThreadLocal> MANAGED_THREAD_LOCAL = new ThreadLocal>() { + @Override + protected Map initialValue() { + return new HashMap(); + } + }; + + private static boolean VALIDATE_CONTEXT = false; + private static final Logger log = LoggerFactory.getLogger(ManagedThreadLocal.class); + + @SuppressWarnings("unchecked") + @Override + public T get() { + validateInContext(this); + Map map = MANAGED_THREAD_LOCAL.get(); + Object result = map.get(this); + if ( result == null ) { + result = initialValue(); + map.put(this, result); + } + return (T) result; + } + + @Override + public void set(T value) { + validateInContext(this); + Map map = MANAGED_THREAD_LOCAL.get(); + map.put(this, value); + } + + public static void reset() { + validateInContext(null); + MANAGED_THREAD_LOCAL.remove(); + } + + @Override + public void remove() { + Map map = MANAGED_THREAD_LOCAL.get(); + map.remove(this); + } + + private static void validateInContext(Object tl) { + if ( VALIDATE_CONTEXT && ! ManagedContextUtils.isInContext() ) { + String msg = "Using a managed thread local in a non managed context this WILL cause errors at runtime. TL [" + + tl + "]"; + log.error(msg, new IllegalStateException(msg)); + } + } + + public static void setValidateInContext(boolean validate) { + VALIDATE_CONTEXT = validate; + } +} diff --git a/framework/managed-context/src/test/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContextTest.java b/framework/managed-context/src/test/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContextTest.java new file mode 100644 index 00000000000..aa2d2e6dca0 --- /dev/null +++ b/framework/managed-context/src/test/java/org/apache/cloudstack/managed/context/impl/DefaultManagedContextTest.java @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.managed.context.impl; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +import org.apache.cloudstack.managed.context.ManagedContextListener; +import org.apache.cloudstack.managed.threadlocal.ManagedThreadLocal; +import org.junit.Before; +import org.junit.Test; + +public class DefaultManagedContextTest { + + DefaultManagedContext context; + + @Before + public void init() { + ManagedThreadLocal.setValidateInContext(false); + + context = new DefaultManagedContext(); + } + + @Test + public void testCallable() throws Exception { + assertEquals(5, context.callWithContext(new Callable() { + @Override + public Integer call() throws Exception { + return 5; + } + }).intValue()); + } + + @Test + public void testRunnable() throws Exception { + final List touch = new ArrayList(); + + context.runWithContext(new Runnable() { + @Override + public void run() { + touch.add(new Object()); + } + }); + + assertEquals(1, touch.size()); + } + + @Test + public void testGoodListeners() throws Exception { + final List touch = new ArrayList(); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter"); + return "hi"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave"); + assertEquals("hi", data); + } + }); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter1"); + return "hi"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave1"); + assertEquals("hi", data); + } + }); + + assertEquals(5, context.callWithContext(new Callable() { + @Override + public Integer call() throws Exception { + return 5; + } + }).intValue()); + + assertEquals("enter", touch.get(0)); + assertEquals("enter1", touch.get(1)); + assertEquals("leave1", touch.get(2)); + assertEquals("leave", touch.get(3)); + } + + @Test + public void testBadListeners() throws Exception { + final List touch = new ArrayList(); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter"); + throw new RuntimeException("I'm a failure"); + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave"); + assertNull(data); + } + }); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter1"); + return "hi"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave1"); + assertEquals("hi", data); + } + }); + + try { + context.callWithContext(new Callable() { + @Override + public Integer call() throws Exception { + return 5; + } + }).intValue(); + + fail(); + } catch ( Throwable t ) { + assertTrue(t instanceof RuntimeException); + assertEquals("I'm a failure", t.getMessage()); + } + + assertEquals("enter", touch.get(0)); + assertEquals("enter1", touch.get(1)); + assertEquals("leave1", touch.get(2)); + assertEquals("leave", touch.get(3)); + } + + @Test + public void testBadInvocation() throws Exception { + final List touch = new ArrayList(); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter"); + return "hi"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave"); + assertEquals("hi", data); + } + }); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter1"); + return "hi1"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave1"); + assertEquals("hi1", data); + } + }); + + try { + context.callWithContext(new Callable() { + @Override + public Integer call() throws Exception { + throw new RuntimeException("I'm a failure"); + } + }).intValue(); + + fail(); + } catch ( Throwable t ) { + assertTrue(t.getMessage(), t instanceof RuntimeException); + assertEquals("I'm a failure", t.getMessage()); + } + + assertEquals("enter", touch.get(0)); + assertEquals("enter1", touch.get(1)); + assertEquals("leave1", touch.get(2)); + assertEquals("leave", touch.get(3)); + } + + @Test + public void testBadListernInExit() throws Exception { + final List touch = new ArrayList(); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter"); + return "hi"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave"); + assertEquals("hi", data); + + throw new RuntimeException("I'm a failure"); + } + }); + + context.registerListener(new ManagedContextListener() { + @Override + public Object onEnterContext(boolean reentry) { + touch.add("enter1"); + return "hi1"; + } + + @Override + public void onLeaveContext(Object data, boolean reentry) { + touch.add("leave1"); + assertEquals("hi1", data); + } + }); + + try { + context.callWithContext(new Callable() { + @Override + public Integer call() throws Exception { + return 5; + } + }).intValue(); + + fail(); + } catch ( Throwable t ) { + assertTrue(t.getMessage(), t instanceof RuntimeException); + assertEquals("I'm a failure", t.getMessage()); + } + + assertEquals("enter", touch.get(0)); + assertEquals("enter1", touch.get(1)); + assertEquals("leave1", touch.get(2)); + assertEquals("leave", touch.get(3)); + } +} diff --git a/framework/pom.xml b/framework/pom.xml index 1764076d498..14e3368d4ba 100644 --- a/framework/pom.xml +++ b/framework/pom.xml @@ -37,5 +37,8 @@ cluster db config + managed-context + spring/lifecycle + spring/module diff --git a/framework/spring/lifecycle/pom.xml b/framework/spring/lifecycle/pom.xml new file mode 100644 index 00000000000..647101c69ca --- /dev/null +++ b/framework/spring/lifecycle/pom.xml @@ -0,0 +1,34 @@ + + + 4.0.0 + cloud-framework-spring-lifecycle + Apache CloudStack Framework - Spring Life Cycle + + org.apache.cloudstack + cloud-maven-standard + 4.3.0-SNAPSHOT + ../../../maven-standard/pom.xml + + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + org.apache.cloudstack + cloud-framework-config + ${project.version} + + + diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractBeanCollector.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractBeanCollector.java new file mode 100644 index 00000000000..a3c0d600c71 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractBeanCollector.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.config.BeanPostProcessor; + +/** + * This class provides a method to do basically the same as @Inject of a type, but + * it will only find the types in the current context and not the parent. This class + * should only be used for very specific Spring bootstrap logic. In general @Inject + * is infinitely better. Basically you need a very good reason to use this. + * + */ +public abstract class AbstractBeanCollector extends AbstractSmartLifeCycle implements BeanPostProcessor { + + Class[] typeClasses = new Class[] {}; + Map, Set> beans = new HashMap, Set>(); + + @Override + public int getPhase() { + return 2000; + } + + @Override + public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { + for ( Class typeClass : typeClasses ) { + if ( typeClass.isAssignableFrom(bean.getClass()) ) { + doPostProcessBeforeInitialization(bean, beanName); + break; + } + } + + return bean; + } + + protected void doPostProcessBeforeInitialization(Object bean, String beanName) throws BeansException { + } + + protected void doPostProcessAfterInitialization(Object bean, Class typeClass, String beanName) throws BeansException { + Set beansOfType = beans.get(typeClass); + + if ( beansOfType == null ) { + beansOfType = new HashSet(); + beans.put(typeClass, beansOfType); + } + + beansOfType.add(bean); + } + + @Override + public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { + for ( Class typeClass : typeClasses ) { + if ( typeClass.isAssignableFrom(bean.getClass()) ) { + doPostProcessAfterInitialization(bean, typeClass, beanName); + } + } + + return bean; + } + + protected Set getBeans(Class typeClass) { + @SuppressWarnings("unchecked") + Set result = (Set) beans.get(typeClass); + + if ( result == null ) + return Collections.emptySet(); + + return result; + } + + public Class getTypeClass() { + if ( typeClasses == null || typeClasses.length == 0 ) + return null; + + return typeClasses[0]; + } + + public void setTypeClass(Class typeClass) { + this.typeClasses = new Class[] { typeClass }; + } + + public Class[] getTypeClasses() { + return typeClasses; + } + + public void setTypeClasses(Class[] typeClasses) { + this.typeClasses = typeClasses; + } + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java new file mode 100644 index 00000000000..071817b0b66 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/AbstractSmartLifeCycle.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +import org.springframework.context.SmartLifecycle; + +public abstract class AbstractSmartLifeCycle implements SmartLifecycle { + + boolean running = false; + + @Override + public void start() { + running = true; + } + + @Override + public void stop() { + running = false; + } + + @Override + public boolean isRunning() { + return running; + } + + @Override + public boolean isAutoStartup() { + return true; + } + + @Override + public void stop(Runnable callback) { + stop(); + callback.run(); + } + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java new file mode 100644 index 00000000000..1b7ea513061 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycle.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; + +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanRegistrationException; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; +import javax.naming.ConfigurationException; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.cloud.utils.component.ComponentLifecycle; +import com.cloud.utils.component.SystemIntegrityChecker; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.mgmt.JmxUtil; +import com.cloud.utils.mgmt.ManagementBean; + +public class CloudStackExtendedLifeCycle extends AbstractBeanCollector { + + private static final Logger log = LoggerFactory.getLogger(CloudStackExtendedLifeCycle.class); + + Map> sorted = new TreeMap>(); + + public CloudStackExtendedLifeCycle() { + super(); + setTypeClasses(new Class[] { + ComponentLifecycle.class, + SystemIntegrityChecker.class + }); + } + + @Override + public void start() { + sortBeans(); + checkIntegrity(); + configure(); + + super.start(); + } + + protected void checkIntegrity() { + for ( SystemIntegrityChecker checker : getBeans(SystemIntegrityChecker.class) ) { + log.info("Running system integrity checker {}", checker); + + checker.check(); + } + } + + public void startBeans() { + log.info("Starting CloudStack Components"); + + with(new WithComponentLifeCycle() { + @Override + public void with(ComponentLifecycle lifecycle) { + lifecycle.start(); + + if ( lifecycle instanceof ManagementBean ) { + ManagementBean mbean = (ManagementBean)lifecycle; + try { + JmxUtil.registerMBean(mbean); + } catch (MalformedObjectNameException e) { + log.warn("Unable to register MBean: " + mbean.getName(), e); + } catch (InstanceAlreadyExistsException e) { + log.warn("Unable to register MBean: " + mbean.getName(), e); + } catch (MBeanRegistrationException e) { + log.warn("Unable to register MBean: " + mbean.getName(), e); + } catch (NotCompliantMBeanException e) { + log.warn("Unable to register MBean: " + mbean.getName(), e); + } + log.info("Registered MBean: " + mbean.getName()); + } + } + }); + + log.info("Done Starting CloudStack Components"); + } + + public void stopBeans() { + with(new WithComponentLifeCycle() { + @Override + public void with(ComponentLifecycle lifecycle) { + lifecycle.stop(); + } + }); + } + + private void configure() { + log.info("Configuring CloudStack Components"); + + with(new WithComponentLifeCycle() { + @Override + public void with(ComponentLifecycle lifecycle) { + try { + lifecycle.configure(lifecycle.getName(), lifecycle.getConfigParams()); + } catch (ConfigurationException e) { + log.error("Failed to configure {}", lifecycle.getName(), e); + throw new CloudRuntimeException(e); + } + } + }); + + log.info("Done Configuring CloudStack Components"); + } + + private void sortBeans() { + for ( ComponentLifecycle lifecycle : getBeans(ComponentLifecycle.class) ) { + Set set = sorted.get(lifecycle.getRunLevel()); + + if ( set == null ) { + set = new HashSet(); + sorted.put(lifecycle.getRunLevel(), set); + } + + set.add(lifecycle); + } + } + + @Override + public void stop() { + with(new WithComponentLifeCycle() { + @Override + public void with(ComponentLifecycle lifecycle) { + lifecycle.stop(); + } + }); + + super.stop(); + } + + protected void with(WithComponentLifeCycle with) { + for ( Set lifecycles : sorted.values() ) { + for ( ComponentLifecycle lifecycle : lifecycles ) { + with.with(lifecycle); + } + } + } + + @Override + public int getPhase() { + return 2000; + } + + private static interface WithComponentLifeCycle { + public void with(ComponentLifecycle lifecycle); + } +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java new file mode 100644 index 00000000000..33d4aea24e4 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackExtendedLifeCycleStart.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +public class CloudStackExtendedLifeCycleStart extends AbstractSmartLifeCycle implements Runnable { + + CloudStackExtendedLifeCycle lifeCycle; + + @Override + public void stop() { + lifeCycle.stopBeans(); + super.stop(); + } + + @Override + public int getPhase() { + return 3000; + } + + public CloudStackExtendedLifeCycle getLifeCycle() { + return lifeCycle; + } + + public void setLifeCycle(CloudStackExtendedLifeCycle lifeCycle) { + this.lifeCycle = lifeCycle; + } + + @Override + public void run() { + lifeCycle.startBeans(); + } + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackLog4jSetup.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackLog4jSetup.java new file mode 100644 index 00000000000..163703d0447 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/CloudStackLog4jSetup.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +import org.springframework.context.SmartLifecycle; + +import com.cloud.utils.LogUtils; + +public class CloudStackLog4jSetup implements SmartLifecycle { + + @Override + public void start() { + LogUtils.initLog4j("log4j-cloud.xml"); + } + + @Override + public void stop() { + } + + @Override + public boolean isRunning() { + return false; + } + + @Override + public int getPhase() { + return 0; + } + + @Override + public boolean isAutoStartup() { + return true; + } + + @Override + public void stop(Runnable callback) { + callback.run(); + } + +} \ No newline at end of file diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/ConfigDepotLifeCycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/ConfigDepotLifeCycle.java new file mode 100644 index 00000000000..b380028964e --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/ConfigDepotLifeCycle.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle; + +import javax.inject.Inject; + +import org.apache.cloudstack.framework.config.ConfigDepotAdmin; +import org.apache.cloudstack.framework.config.Configurable; +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.config.BeanPostProcessor; + +public class ConfigDepotLifeCycle implements BeanPostProcessor { + + @Inject + ConfigDepotAdmin configDepotAdmin; + + @Override + public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { + if ( bean instanceof Configurable ) { + configDepotAdmin.populateConfiguration((Configurable)bean); + } + + return bean; + } + + @Override + public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { + return bean; + } + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java new file mode 100644 index 00000000000..5614a32aee5 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/DumpRegistry.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle.registry; + +import java.util.List; + +import com.cloud.utils.component.ComponentLifecycleBase; +import com.cloud.utils.component.Named; +import com.cloud.utils.component.Registry; + +import javax.inject.Inject; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DumpRegistry extends ComponentLifecycleBase { + + private static final Logger log = LoggerFactory.getLogger(DumpRegistry.class); + + List> registries; + + public List> getRegistries() { + return registries; + } + + @Inject + public void setRegistries(List> registries) { + this.registries = registries; + } + + @Override + public boolean start() { + for ( Registry registry : registries ) { + StringBuilder buffer = new StringBuilder(); + + for ( Object o : registry.getRegistered() ) { + if ( buffer.length() > 0 ) + buffer.append(", "); + + buffer.append(getName(o)); + } + + log.info("Registry [{}] contains [{}]", registry.getName(), buffer); + } + + return super.start(); + } + + protected String getName(Object o) { + String name = null; + if (o instanceof Named) { + name = ((Named) o).getName(); + } + + if (name == null) { + name = o.getClass().getSimpleName(); + } + + return name; + } +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java new file mode 100644 index 00000000000..2bd362eb8e3 --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/ExtensionRegistry.java @@ -0,0 +1,245 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle.registry; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; + +import javax.annotation.PostConstruct; + +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.commons.lang.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.BeanNameAware; + +import com.cloud.utils.component.Named; +import com.cloud.utils.component.Registry; + +public class ExtensionRegistry implements Registry, Configurable, BeanNameAware { + + private static final Logger log = LoggerFactory.getLogger(ExtensionRegistry.class); + + String name; + String beanName; + + String orderConfigKey; + String orderConfigDefault; + ConfigKey orderConfigKeyObj; + + String excludeKey; + String excludeDefault; + ConfigKey excludeKeyObj; + + String configComponentName; + List preRegistered; + List registered = new CopyOnWriteArrayList(); + List readOnly = Collections.unmodifiableList(registered); + + @Override + public boolean register(Object item) { + if ( registered.contains(item) ) + return false; + + String[] order = new String[] {}; + Set exclude = new HashSet(); + + if ( orderConfigKeyObj != null ) { + Object value = orderConfigKeyObj.value(); + if ( value != null && value.toString().trim().length() > 0 ) { + order = value.toString().trim().split("\\s*,\\s*"); + } + } + + if ( excludeKeyObj != null ) { + Object value = excludeKeyObj.value(); + if ( value != null && value.toString().trim().length() > 0 ) { + for ( String e : value.toString().trim().split("\\s*,\\s*") ) { + exclude.add(e); + } + } + } + + String name = getName(item); + + if ( name != null && exclude.size() > 0 && exclude.contains(name) ) { + return false; + } + + if ( name == null && order.length > 0 ) { + throw new RuntimeException("getName() is null for [" + item + "]"); + } + + int i = 0; + for ( String orderTest : order ) { + if ( orderTest.equals(name) ) { + registered.add(i, item); + i = -1; + break; + } + + if ( registered.size() <= i ) { + break; + } + + if ( getName(registered.get(i)).equals(orderTest) ) { + i++; + } + } + + if ( i != -1 ) { + registered.add(item); + } + + log.debug("Registering extension [{}] in [{}]", name, this.name); + + return true; + } + + protected String getName(Object object) { + if ( object instanceof Named ) { + String name = ((Named)object).getName(); + if ( name != null ) + return name; + } + + return object == null ? null : object.getClass().getSimpleName(); + } + + @Override + public void unregister(Object type) { + registered.remove(type); + } + + @Override + public List getRegistered() { + return readOnly; + } + + @Override + public String getConfigComponentName() { + return configComponentName == null ? this.getClass().getSimpleName() : configComponentName; + } + + @Override + public ConfigKey[] getConfigKeys() { + List> result = new ArrayList>(); + + if ( orderConfigKey != null && orderConfigKeyObj == null ) { + orderConfigKeyObj = new ConfigKey("Advanced", String.class, orderConfigKey, orderConfigDefault, + "The order of precedence for the extensions", false); + } + + if ( orderConfigKeyObj != null ) + result.add(orderConfigKeyObj); + + if ( excludeKey != null && excludeKeyObj == null ) { + excludeKeyObj = new ConfigKey("Advanced", String.class, excludeKey, excludeDefault, + "Extensions to exclude from being registered", false); + } + + if ( excludeKeyObj != null ) { + result.add(excludeKeyObj); + } + + return result.toArray(new ConfigKey[result.size()]); + } + + @PostConstruct + public void init() { + if ( name == null ) { + for ( String part : beanName.replaceAll("([A-Z])", " $1").split("\\s+") ) { + part = StringUtils.capitalize(part.toLowerCase());; + + name = name == null ? part : name + " " + part; + } + } + + if ( preRegistered != null ) { + for ( Object o : preRegistered ) { + register(o); + } + } + } + + + public String getOrderConfigKey() { + return orderConfigKey; + } + + public void setOrderConfigKey(String orderConfigKey) { + this.orderConfigKey = orderConfigKey; + } + + public void setConfigComponentName(String configComponentName) { + this.configComponentName = configComponentName; + } + + public String getOrderConfigDefault() { + return orderConfigDefault; + } + + public void setOrderConfigDefault(String orderConfigDefault) { + this.orderConfigDefault = orderConfigDefault; + } + + public String getExcludeKey() { + return excludeKey; + } + + public void setExcludeKey(String excludeKey) { + this.excludeKey = excludeKey; + } + + public String getExcludeDefault() { + return excludeDefault; + } + + public void setExcludeDefault(String excludeDefault) { + this.excludeDefault = excludeDefault; + } + + @Override + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + @Override + public void setBeanName(String name) { + this.beanName = name; + } + + public List getPreRegistered() { + return preRegistered; + } + + public void setPreRegistered(List preRegistered) { + this.preRegistered = preRegistered; + } + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/PluggableServiceLifecycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/PluggableServiceLifecycle.java new file mode 100644 index 00000000000..3eeeed83c7b --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/PluggableServiceLifecycle.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle.registry; + +import com.cloud.utils.component.ComponentContext; +import com.cloud.utils.component.PluggableService; + +public class PluggableServiceLifecycle extends RegistryLifecycle { + + @Override + public void start() { + super.start(); + + for (Object obj : beans) { + if (obj instanceof PluggableService) { + for (Class cmd : ((PluggableService) obj).getCommands()) { + ComponentContext.addDelegateContext(cmd, applicationContext); + } + } + } + } + + @Override + public void stop() { + for (Object obj : beans) { + if (obj instanceof PluggableService) { + for (Class cmd : ((PluggableService) obj).getCommands()) { + ComponentContext.removeDelegateContext(cmd); + } + } + } + + super.stop(); + } + + +} diff --git a/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java new file mode 100644 index 00000000000..bd7a0334a6f --- /dev/null +++ b/framework/spring/lifecycle/src/main/java/org/apache/cloudstack/spring/lifecycle/registry/RegistryLifecycle.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.lifecycle.registry; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.BeansException; +import org.springframework.beans.factory.config.BeanPostProcessor; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ApplicationContextAware; +import org.springframework.context.SmartLifecycle; + +import com.cloud.utils.component.Registry; + +public class RegistryLifecycle implements BeanPostProcessor, SmartLifecycle, ApplicationContextAware { + + private static final Logger log = LoggerFactory.getLogger(RegistryLifecycle.class); + + Registry registry; + + /* The bean name works around circular dependency issues in Spring. This shouldn't be + * needed if your beans are already nicely organized. If they look like spaghetti, then you + * can use this. + */ + String registryBeanName; + Set beans = new HashSet(); + Class typeClass; + ApplicationContext applicationContext; + + @Override + public Object postProcessBeforeInitialization(Object bean, String beanName) throws BeansException { + if ( typeClass.isAssignableFrom(bean.getClass()) ) + beans.add(bean); + + return bean; + } + + @Override + public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { + return bean; + } + + @Override + public void start() { + Iterator iter = beans.iterator(); + Registry registry = lookupRegistry(); + + while ( iter.hasNext() ) { + Object next = iter.next(); + if ( registry.register(next) ) { + log.debug("Registered {}", next); + } else { + iter.remove(); + } + } + } + + @Override + public void stop() { + Registry registry = lookupRegistry(); + + for ( Object bean : beans ) { + registry.unregister(bean); + } + + beans.clear(); + } + + @Override + public boolean isRunning() { + return false; + } + + @Override + public int getPhase() { + return 2000; + } + + @Override + public boolean isAutoStartup() { + return true; + } + + @Override + public void stop(Runnable callback) { + stop(); + callback.run(); + } + + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + } + + @SuppressWarnings("unchecked") + protected Registry lookupRegistry() { + return registry == null ? applicationContext.getBean(registryBeanName, Registry.class) : registry; + } + + public Registry getRegistry() { + return registry; + } + + public void setRegistry(Registry registry) { + this.registry = registry; + } + + public Class getTypeClass() { + return typeClass; + } + + public void setTypeClass(Class typeClass) { + this.typeClass = typeClass; + } + + public String getRegistryBeanName() { + return registryBeanName; + } + + public void setRegistryBeanName(String registryBeanName) { + this.registryBeanName = registryBeanName; + } + +} diff --git a/framework/spring/module/pom.xml b/framework/spring/module/pom.xml new file mode 100644 index 00000000000..b9d95a8447f --- /dev/null +++ b/framework/spring/module/pom.xml @@ -0,0 +1,50 @@ + + + 4.0.0 + cloud-framework-spring-module + + org.apache.cloudstack + cloud-maven-standard + 4.3.0-SNAPSHOT + ../../../maven-standard/pom.xml + + + + org.apache.cloudstack + cloud-utils + ${project.version} + + + commons-io + commons-io + + + org.springframework + spring-web + + + javax.servlet + servlet-api + provided + true + + + diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/context/ResourceApplicationContext.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/context/ResourceApplicationContext.java new file mode 100644 index 00000000000..60d0262ab36 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/context/ResourceApplicationContext.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.context; + +import org.springframework.context.support.AbstractXmlApplicationContext; +import org.springframework.core.io.Resource; + +public class ResourceApplicationContext extends AbstractXmlApplicationContext { + + Resource[] configResources; + String applicationName = ""; + + public ResourceApplicationContext() { + } + + public ResourceApplicationContext(Resource... configResources) { + super(); + this.configResources = configResources; + } + + @Override + protected Resource[] getConfigResources() { + return configResources; + } + + public void setConfigResources(Resource[] configResources) { + this.configResources = configResources; + } + + @Override + public String getApplicationName() { + return applicationName; + } + + public void setApplicationName(String applicationName) { + this.applicationName = applicationName; + } + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java new file mode 100644 index 00000000000..e624a5b04d4 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/CloudStackSpringContext.java @@ -0,0 +1,137 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.factory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import org.apache.cloudstack.spring.module.locator.ModuleDefinitionLocator; +import org.apache.cloudstack.spring.module.locator.impl.ClasspathModuleDefinitionLocator; +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.model.ModuleDefinitionSet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.ApplicationContext; +import org.springframework.context.ConfigurableApplicationContext; +import org.springframework.core.io.Resource; + +public class CloudStackSpringContext { + + private static final Logger log = LoggerFactory.getLogger(CloudStackSpringContext.class); + + public static final String CLOUDSTACK_CONTEXT = "META-INF/cloudstack"; + public static final String CLOUDSTACK_BASE = "bootstrap"; + + ModuleBasedContextFactory factory = new ModuleBasedContextFactory(); + ModuleDefinitionLocator loader = new ClasspathModuleDefinitionLocator(); + ModuleDefinitionSet moduleDefinitionSet; + String baseName; + String contextName; + + public CloudStackSpringContext(String context, String base) throws IOException { + this.baseName = base; + this.contextName = context; + + factory = new ModuleBasedContextFactory(); + loader = new ClasspathModuleDefinitionLocator(); + init(); + } + + public CloudStackSpringContext() throws IOException { + this(CLOUDSTACK_CONTEXT, CLOUDSTACK_BASE); + } + + public void init() throws IOException { + Collection defs = loader.locateModules(contextName); + + if ( defs.size() == 0 ) + throw new RuntimeException("No modules found to load for Spring"); + + moduleDefinitionSet = factory.loadModules(defs, baseName); + } + + public void registerShutdownHook() { + ApplicationContext base = moduleDefinitionSet.getApplicationContext(baseName); + + if ( base instanceof ConfigurableApplicationContext ) { + ((ConfigurableApplicationContext)base).registerShutdownHook(); + } + } + + public ModuleDefinition getModuleDefinitionForWeb(String name) { + ModuleDefinition def = moduleDefinitionSet.getModuleDefinition(name); + + if ( def != null ) { + return def; + } + + /* Grab farthest descendant that is deterministic */ + def = moduleDefinitionSet.getModuleDefinition(baseName); + + if ( def == null ) { + throw new RuntimeException("Failed to find base spring module to extend for web"); + } + + while ( def.getChildren().size() == 1 ) { + def = def.getChildren().iterator().next(); + } + + return def; + } + + public ApplicationContext getApplicationContextForWeb(String name) { + ModuleDefinition def = getModuleDefinitionForWeb(name); + + return moduleDefinitionSet.getApplicationContext(def.getName()); + } + + public String[] getConfigLocationsForWeb(String name, String[] configured) { + if ( configured == null ) + configured = new String[] {}; + + ModuleDefinition def = getModuleDefinitionForWeb(name); + + List inherited = new ArrayList(); + + while ( def != null ) { + inherited.addAll(def.getInheritableContextLocations()); + def = moduleDefinitionSet.getModuleDefinition(def.getParentName()); + } + + List urlList = new ArrayList(); + + for ( Resource r : inherited ) { + try { + String urlString = r.getURL().toExternalForm(); + urlList.add(urlString); + } catch (IOException e) { + log.error("Failed to create URL for {}", r.getDescription(), e); + } + } + + String[] result = new String[urlList.size() + configured.length]; + result = urlList.toArray(result); + + System.arraycopy(configured, 0, result, urlList.size(), configured.length); + + return result; + } +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactory.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactory.java new file mode 100644 index 00000000000..3f89d3ab935 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactory.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.factory; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.model.ModuleDefinitionSet; +import org.apache.cloudstack.spring.module.model.impl.DefaultModuleDefinitionSet; + +public class ModuleBasedContextFactory { + + public ModuleDefinitionSet loadModules(Collection defs, String root) throws IOException { + + Map modules = wireUpModules(root, defs); + + DefaultModuleDefinitionSet moduleSet = new DefaultModuleDefinitionSet(modules, root); + moduleSet.load(); + + return moduleSet; + } + + protected Map wireUpModules(String root, Collection defs) throws IOException { + Map modules = new HashMap(); + + for ( ModuleDefinition def : defs ) { + modules.put(def.getName(), def); + } + + ModuleDefinition rootDef = null; + Map result = new HashMap(); + + for ( ModuleDefinition def : modules.values() ) { + if ( def.getName().equals(root) ) { + rootDef = def; + } + + if ( def.getParentName() != null ) { + ModuleDefinition parentDef = modules.get(def.getParentName()); + + if ( parentDef != null ) + parentDef.addChild(def); + } + } + + return traverse(rootDef, result); + } + + protected Map traverse(ModuleDefinition base, Map result) { + if ( base == null ) + return result; + + if ( result.containsKey(base.getName()) ) { + throw new RuntimeException("Circular dependency to [" + base.getName() + "] from current set " + + result.keySet()); + } + + result.put(base.getName(), base); + + for ( ModuleDefinition childDef : base.getChildren() ) + traverse(childDef, result); + + return result; + } +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/ModuleDefinitionLocator.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/ModuleDefinitionLocator.java new file mode 100644 index 00000000000..6b14e0abfd7 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/ModuleDefinitionLocator.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.locator; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.cloudstack.spring.module.model.ModuleDefinition; + +/** + * Responsible for locating the ModuleDefinition for a given context. The implementation + * of this class should take extra care to set the ClassLoader of the ModuleDefinition + * properly. + * + */ +public interface ModuleDefinitionLocator { + + Collection locateModules(String context) throws IOException; + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionLocator.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionLocator.java new file mode 100644 index 00000000000..c9deacc23e8 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionLocator.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.locator.impl; + +import java.io.IOException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.spring.module.locator.ModuleDefinitionLocator; +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.model.impl.DefaultModuleDefinition; +import org.apache.cloudstack.spring.module.util.ModuleLocationUtils; +import org.springframework.core.io.Resource; +import org.springframework.core.io.support.PathMatchingResourcePatternResolver; +import org.springframework.core.io.support.ResourcePatternResolver; + +public class ClasspathModuleDefinitionLocator implements ModuleDefinitionLocator { + + protected ResourcePatternResolver getResolver() { + return new PathMatchingResourcePatternResolver(); + } + + public Collection locateModules(String context) throws IOException { + ResourcePatternResolver resolver = getResolver(); + + Map allModules = discoverModules(context, resolver); + + return allModules.values(); + } + + protected Map discoverModules(String baseDir, ResourcePatternResolver resolver) throws IOException { + Map result = new HashMap(); + + for ( Resource r : resolver.getResources(ModuleLocationUtils.getModulesLocation(baseDir)) ) { + DefaultModuleDefinition def = new DefaultModuleDefinition(baseDir, r, resolver); + def.init(); + + if ( def.isValid() ) + result.put(def.getName(), def); + } + + return result; + } + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinition.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinition.java new file mode 100644 index 00000000000..b3c46474224 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinition.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.model; + +import java.util.Collection; +import java.util.List; + +import org.springframework.core.io.Resource; + +public interface ModuleDefinition { + + ClassLoader getClassLoader(); + + String getName(); + + String getParentName(); + + List getConfigLocations(); + + List getContextLocations(); + + List getInheritableContextLocations(); + + List getOverrideContextLocations(); + + boolean isValid(); + + Collection getChildren(); + + void addChild(ModuleDefinition childDef); + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinitionSet.java new file mode 100644 index 00000000000..635a7a193df --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/ModuleDefinitionSet.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.model; + +import org.springframework.context.ApplicationContext; +import org.springframework.core.io.Resource; + +public interface ModuleDefinitionSet { + + ModuleDefinition getModuleDefinition(String name); + + ApplicationContext getApplicationContext(String name); + + Resource[] getConfigResources(String name); + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinition.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinition.java new file mode 100644 index 00000000000..6c5180874b8 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinition.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.model.impl; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.TreeMap; + +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.util.ModuleLocationUtils; +import org.apache.commons.io.IOUtils; +import org.springframework.core.io.Resource; +import org.springframework.core.io.support.ResourcePatternResolver; +import org.springframework.util.StringUtils; + +public class DefaultModuleDefinition implements ModuleDefinition { + + public static final String NAME = "name"; + public static final String PARENT = "parent"; + + String name; + String baseDir; + String parent; + Resource moduleProperties; + ResourcePatternResolver resolver; + boolean valid; + + List configLocations; + List contextLocations; + List inheritableContextLocations; + List overrideContextLocations; + Map children = new TreeMap(); + + public DefaultModuleDefinition(String baseDir, Resource moduleProperties, ResourcePatternResolver resolver) { + this.baseDir = baseDir; + this.resolver = resolver; + this.moduleProperties = moduleProperties; + } + + public void init() throws IOException { + + if ( ! moduleProperties.exists() ) { + return; + } + + resolveNameAndParent(); + + contextLocations = Arrays.asList(resolver.getResources(ModuleLocationUtils.getContextLocation(baseDir, name))); + configLocations = Arrays.asList(resolver.getResources(ModuleLocationUtils.getDefaultsLocation(baseDir, name))); + inheritableContextLocations = Arrays.asList(resolver.getResources(ModuleLocationUtils.getInheritableContextLocation(baseDir, name))); + overrideContextLocations = Arrays.asList(resolver.getResources(ModuleLocationUtils.getOverrideContextLocation(baseDir, name))); + + valid = true; + } + + protected void resolveNameAndParent() throws IOException { + InputStream is = null; + + try { + is = moduleProperties.getInputStream(); + Properties props = new Properties(); + props.load(is); + + name = props.getProperty(NAME); + parent = props.getProperty(PARENT); + + if ( ! StringUtils.hasText(name) ) { + throw new IOException("Missing name property in [" + location() + "]"); + } + + if ( ! StringUtils.hasText(parent) ) { + parent = null; + } + + checkNameMatchesSelf(); + } finally { + IOUtils.closeQuietly(is); + } + } + + protected void checkNameMatchesSelf() throws IOException { + String expectedLocation = ModuleLocationUtils.getModuleLocation(baseDir, name); + Resource self = resolver.getResource(expectedLocation); + + if ( ! self.exists() ) { + throw new IOException("Resource [" + location() + "] is expected to exist at [" + + expectedLocation + "] please ensure the name property is correct"); + } + + String moduleUrl = moduleProperties.getURL().toExternalForm(); + String selfUrl = self.getURL().toExternalForm(); + + if ( ! moduleUrl.equals(selfUrl) ) { + throw new IOException("Resource [" + location() + "] and [" + + self.getURL() + "] do not appear to be the same resource, " + + "please ensure the name property is correct"); + } + } + + private String location() throws IOException { + return moduleProperties.getURL().toString(); + } + + public void addChild(ModuleDefinition def) { + children.put(def.getName(), def); + } + + public Collection getChildren() { + return children.values(); + } + + public String getName() { + return name; + } + + public String getParentName() { + return parent; + } + + public List getConfigLocations() { + return configLocations; + } + + public List getContextLocations() { + return contextLocations; + } + + public List getInheritableContextLocations() { + return inheritableContextLocations; + } + + @Override + public List getOverrideContextLocations() { + return overrideContextLocations; + } + + public boolean isValid() { + return valid; + } + + public ClassLoader getClassLoader() { + return resolver.getClassLoader(); + } + + +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java new file mode 100644 index 00000000000..15df839cf1a --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionSet.java @@ -0,0 +1,243 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.model.impl; + +import java.io.IOException; +import java.net.URL; +import java.util.ArrayList; +import java.util.EmptyStackException; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.cloudstack.spring.module.context.ResourceApplicationContext; +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.model.ModuleDefinitionSet; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.BeansException; +import org.springframework.context.ApplicationContext; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.core.io.Resource; +import org.springframework.core.io.UrlResource; + +public class DefaultModuleDefinitionSet implements ModuleDefinitionSet { + + private static final Logger log = LoggerFactory.getLogger(DefaultModuleDefinitionSet.class); + + public static final String DEFAULT_CONFIG_RESOURCES = "DefaultConfigResources"; + public static final String DEFAULT_CONFIG_XML = "defaults-context.xml"; + + String root; + Map modules; + Map contexts = new HashMap(); + ApplicationContext rootContext = null; + + public DefaultModuleDefinitionSet(Map modules, String root) { + super(); + this.root = root; + this.modules = modules; + } + + public void load() throws IOException { + if ( ! loadRootContext() ) + return; + + printHierarchy(); + loadContexts(); + startContexts(); + } + + protected boolean loadRootContext() { + ModuleDefinition def = modules.get(root); + + if ( def == null ) + return false; + + ApplicationContext defaultsContext = getDefaultsContext(); + + rootContext = loadContext(def, defaultsContext); + + return true; + } + + protected void startContexts() { + withModule(new WithModule() { + public void with(ModuleDefinition def, Stack parents) { + try { + ApplicationContext context = getApplicationContext(def.getName()); + try { + Runnable runnable = context.getBean("moduleStartup", Runnable.class); + log.info("Starting module [{}]", def.getName()); + runnable.run(); + } catch ( BeansException e ) { + // Ignore + } + } catch ( EmptyStackException e ) { + // The root context is already loaded, so ignore the exception + } + } + }); + } + + protected void loadContexts() { + withModule(new WithModule() { + public void with(ModuleDefinition def, Stack parents) { + try { + ApplicationContext parent = getApplicationContext(parents.peek().getName()); + loadContext(def, parent); + } catch ( EmptyStackException e ) { + // The root context is already loaded, so ignore the exception + } + } + }); + } + protected ApplicationContext loadContext(ModuleDefinition def, ApplicationContext parent) { + ResourceApplicationContext context = new ResourceApplicationContext(); + context.setApplicationName("/" + def.getName()); + + Resource[] resources = getConfigResources(def.getName()); + context.setConfigResources(resources); + context.setParent(parent); + context.setClassLoader(def.getClassLoader()); + + long start = System.currentTimeMillis(); + if ( log.isInfoEnabled() ) { + for ( Resource resource : resources ) { + log.info("Loading module context [{}] from {}", def.getName(), resource); + } + } + context.refresh(); + log.info("Loaded module context [{}] in {} ms", def.getName(), (System.currentTimeMillis() - start)); + + contexts.put(def.getName(), context); + + return context; + } + + protected boolean shouldLoad(ModuleDefinition def) { + return true; + } + + protected ApplicationContext getDefaultsContext() { + URL config = DefaultModuleDefinitionSet.class.getResource(DEFAULT_CONFIG_XML); + + ResourceApplicationContext context = new ResourceApplicationContext(new UrlResource(config)); + context.setApplicationName("/defaults"); + context.refresh(); + + @SuppressWarnings("unchecked") + final List resources = (List) context.getBean(DEFAULT_CONFIG_RESOURCES); + + withModule(new WithModule() { + public void with(ModuleDefinition def, Stack parents) { + for ( Resource defaults : def.getConfigLocations() ) { + resources.add(defaults); + } + } + }); + + return context; + } + + protected void printHierarchy() { + withModule(new WithModule() { + public void with(ModuleDefinition def, Stack parents) { + log.info(String.format("Module Hierarchy:%" + ((parents.size() * 2) + 1) + "s%s", "", def.getName())); + } + }); + } + + protected void withModule(WithModule with) { + ModuleDefinition rootDef = modules.get(root); + withModule(rootDef, new Stack(), with); + } + + protected void withModule(ModuleDefinition def, Stack parents, WithModule with) { + if ( def == null ) + return; + + if ( ! shouldLoad(def) ) { + return; + } + + with.with(def, parents); + + parents.push(def); + + for ( ModuleDefinition child : def.getChildren() ) { + withModule(child, parents, with); + } + + parents.pop(); + } + + private static interface WithModule { + public void with(ModuleDefinition def, Stack parents); + } + + @Configuration + public static class ConfigContext { + + List resources; + + public ConfigContext(List resources) { + super(); + this.resources = resources; + } + + @Bean(name = DEFAULT_CONFIG_RESOURCES) + public List defaultConfigResources() { + return new ArrayList(); + } + } + + public ApplicationContext getApplicationContext(String name) { + return contexts.get(name); + } + + public Resource[] getConfigResources(String name) { + Set resources = new LinkedHashSet(); + + ModuleDefinition original = null; + ModuleDefinition def = original = modules.get(name); + + if ( def == null ) + return new Resource[] {}; + + resources.addAll(def.getContextLocations()); + + while ( def != null ) { + resources.addAll(def.getInheritableContextLocations()); + def = modules.get(def.getParentName()); + } + + resources.addAll(original.getOverrideContextLocations()); + + return resources.toArray(new Resource[resources.size()]); + } + + public ModuleDefinition getModuleDefinition(String name) { + return modules.get(name); + } +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/Main.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/Main.java new file mode 100644 index 00000000000..3a9660c4793 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/Main.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.util; + +import org.apache.cloudstack.spring.module.factory.CloudStackSpringContext; + +public class Main { + + long start = System.currentTimeMillis(); + + public Main() { + + } + + public void start() throws Exception { + CloudStackSpringContext context = new CloudStackSpringContext(); + context.registerShutdownHook(); + + if ( Boolean.getBoolean("force.exit") ) { + System.exit(0); + } + } + + public long getTime() { + return System.currentTimeMillis() - start; + } + + + public static void main(String... args) { + Main main = new Main(); + + try { + main.start(); + System.out.println("STARTUP COMPLETE [" + main.getTime() + "] ms"); + } catch ( Exception e ) { + e.printStackTrace(); + System.out.println("STARTUP FAILED [" + main.getTime() + "] ms"); + System.err.println("STARTUP FAILED [" + main.getTime() + "] ms"); + System.exit(1); + } + } +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/ModuleLocationUtils.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/ModuleLocationUtils.java new file mode 100644 index 00000000000..eeab154b395 --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/util/ModuleLocationUtils.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.util; + +public class ModuleLocationUtils { + + private static final String ALL_MODULE_PROPERTIES = "classpath*:%s/*/module.properties"; + private static final String MODULE_PROPERTIES = "classpath:%s/%s/module.properties"; + private static final String CONTEXT_LOCATION = "classpath*:%s/%s/*context.xml"; + private static final String INHERTIABLE_CONTEXT_LOCATION = "classpath*:%s/%s/*context-inheritable.xml"; + private static final String OVERRIDE_CONTEXT_LOCATION = "classpath*:%s/%s/*context-override.xml"; + private static final String DEFAULTS_LOCATION = "classpath*:%s/%s/*defaults.properties"; + + public static String getModulesLocation(String baseDir) { + return String.format(ALL_MODULE_PROPERTIES, baseDir); + } + + public static String getModuleLocation(String baseDir, String name) { + return String.format(MODULE_PROPERTIES, baseDir, name); + } + + public static String getContextLocation(String baseDir, String name) { + return String.format(CONTEXT_LOCATION, baseDir, name); + } + + public static String getInheritableContextLocation(String baseDir, String name) { + return String.format(INHERTIABLE_CONTEXT_LOCATION, baseDir, name); + } + + public static String getOverrideContextLocation(String baseDir, String name) { + return String.format(OVERRIDE_CONTEXT_LOCATION, baseDir, name); + } + + public static String getDefaultsLocation(String baseDir, String name) { + return String.format(DEFAULTS_LOCATION, baseDir, name); + } +} diff --git a/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java new file mode 100644 index 00000000000..e704437774b --- /dev/null +++ b/framework/spring/module/src/main/java/org/apache/cloudstack/spring/module/web/CloudStackContextLoaderListener.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.web; + +import java.io.IOException; + +import javax.servlet.ServletContext; +import javax.servlet.ServletContextEvent; + +import org.apache.cloudstack.spring.module.factory.CloudStackSpringContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.ApplicationContext; +import org.springframework.web.context.ConfigurableWebApplicationContext; +import org.springframework.web.context.ContextLoaderListener; + +public class CloudStackContextLoaderListener extends ContextLoaderListener { + + public static final String WEB_PARENT_MODULE = "parentModule"; + public static final String WEB_PARENT_MODULE_DEFAULT = "web"; + + private static final Logger log = LoggerFactory.getLogger(CloudStackContextLoaderListener.class); + + CloudStackSpringContext cloudStackContext; + String configuredParentName; + + @Override + protected ApplicationContext loadParentContext(ServletContext servletContext) { + return cloudStackContext.getApplicationContextForWeb(configuredParentName); + } + + @Override + public void contextInitialized(ServletContextEvent event) { + try { + cloudStackContext = new CloudStackSpringContext(); + } catch (IOException e) { + log.error("Failed to start CloudStack", e); + throw new RuntimeException("Failed to initialize CloudStack Spring modules", e); + } + + configuredParentName = event.getServletContext().getInitParameter(WEB_PARENT_MODULE); + if ( configuredParentName == null ) { + configuredParentName = WEB_PARENT_MODULE_DEFAULT; + } + + super.contextInitialized(event); + } + + @Override + protected void customizeContext(ServletContext servletContext, ConfigurableWebApplicationContext applicationContext) { + super.customizeContext(servletContext, applicationContext); + + String[] newLocations = cloudStackContext.getConfigLocationsForWeb(configuredParentName, + applicationContext.getConfigLocations()); + + applicationContext.setConfigLocations(newLocations); + } + +} diff --git a/framework/spring/module/src/main/resources/org/apache/cloudstack/spring/module/model/impl/defaults-context.xml b/framework/spring/module/src/main/resources/org/apache/cloudstack/spring/module/model/impl/defaults-context.xml new file mode 100644 index 00000000000..b19833a735b --- /dev/null +++ b/framework/spring/module/src/main/resources/org/apache/cloudstack/spring/module/model/impl/defaults-context.xml @@ -0,0 +1,28 @@ + + + + + + diff --git a/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/InitTest.java b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/InitTest.java new file mode 100644 index 00000000000..db3549b73b1 --- /dev/null +++ b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/InitTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.factory; + +import javax.annotation.PostConstruct; + +public class InitTest { + + public static boolean initted = false; + + @PostConstruct + public void init() { + setInitted(true); + } + + public boolean isInitted() { + return initted; + } + + public void setInitted(boolean initted) { + InitTest.initted = initted; + } +} diff --git a/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactoryTest.java b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactoryTest.java new file mode 100644 index 00000000000..2947615d597 --- /dev/null +++ b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/factory/ModuleBasedContextFactoryTest.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.factory; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.cloudstack.spring.module.locator.impl.ClasspathModuleDefinitionLocator; +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.apache.cloudstack.spring.module.model.ModuleDefinitionSet; +import org.junit.Before; +import org.junit.Test; +import org.springframework.beans.factory.NoSuchBeanDefinitionException; +import org.springframework.context.ApplicationContext; + +public class ModuleBasedContextFactoryTest { + + Collection defs; + + @Before + public void setUp() throws IOException { + InstantiationCounter.count = 0; + + ClasspathModuleDefinitionLocator locator = new ClasspathModuleDefinitionLocator(); + defs = locator.locateModules("testhierarchy"); + } + + @Test + public void testLoad() throws IOException { + + ModuleBasedContextFactory factory = new ModuleBasedContextFactory(); + + ModuleDefinitionSet set = factory.loadModules(defs, "base"); + + assertNotNull(set.getApplicationContext("base")); + } + + @Test + public void testOverride() throws IOException { + + InitTest.initted = false; + + ModuleBasedContextFactory factory = new ModuleBasedContextFactory(); + + ModuleDefinitionSet set = factory.loadModules(defs, "base"); + + assertTrue(!InitTest.initted); + assertEquals("a string", set.getApplicationContext("child1").getBean("override", String.class)); + } + + @Test + public void testBeans() throws IOException { + ModuleBasedContextFactory factory = new ModuleBasedContextFactory(); + ModuleDefinitionSet set = factory.loadModules(defs, "base"); + + testBeansInContext(set, "base", 1, new String[] { "base" }, new String[] { "child1", "child2", "child1-1" }); + testBeansInContext(set, "child1", 2, new String[] { "base", "child1" }, new String[] { "child2", "child1-1" }); + testBeansInContext(set, "child2", 4, new String[] { "base", "child2" }, new String[] { "child1", "child1-1" }); + testBeansInContext(set, "child1-1", 3, new String[] { "base", "child1", "child1-1" }, new String[] { "child2" }); + } + + protected void testBeansInContext(ModuleDefinitionSet set, String name, int order, String[] parents, String[] notTheres) { + ApplicationContext context = set.getApplicationContext(name); + + String nameBean = context.getBean("name", String.class); + assertEquals(name, nameBean); + + for ( String parent : parents ) { + String parentBean = context.getBean(parent, String.class); + assertEquals(parent, parentBean); + } + + for ( String notThere : notTheres ) { + try { + context.getBean(notThere, String.class); + fail(); + } catch ( NoSuchBeanDefinitionException e ) { + } + } + + int count = context.getBean("count", InstantiationCounter.class).getCount(); + + assertEquals(order, count); + } + + public static class InstantiationCounter { + public static Integer count = 0; + + int myCount; + + public InstantiationCounter() { + synchronized (count) { + myCount = count + 1; + count = myCount; + } + } + + public int getCount() { + return myCount; + } + + } +} diff --git a/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionSetLocatorTest.java b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionSetLocatorTest.java new file mode 100644 index 00000000000..5114187b9a5 --- /dev/null +++ b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/locator/impl/ClasspathModuleDefinitionSetLocatorTest.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.locator.impl; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Collection; + +import org.apache.cloudstack.spring.module.model.ModuleDefinition; +import org.junit.Test; + +public class ClasspathModuleDefinitionSetLocatorTest { + + @Test + public void testDiscover() throws IOException { + ClasspathModuleDefinitionLocator factory = new ClasspathModuleDefinitionLocator(); + + Collection modules = factory.locateModules("testhierarchy"); + + assertEquals(5, modules.size()); + } + +} diff --git a/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionTest.java b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionTest.java new file mode 100644 index 00000000000..31a82baab20 --- /dev/null +++ b/framework/spring/module/src/test/java/org/apache/cloudstack/spring/module/model/impl/DefaultModuleDefinitionTest.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.spring.module.model.impl; + +import static org.junit.Assert.*; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import org.apache.cloudstack.spring.module.util.ModuleLocationUtils; +import org.junit.Test; +import org.springframework.core.io.Resource; +import org.springframework.core.io.support.PathMatchingResourcePatternResolver; +import org.springframework.core.io.support.ResourcePatternResolver; + +public class DefaultModuleDefinitionTest { + + ResourcePatternResolver resolver = new PathMatchingResourcePatternResolver(); + + protected DefaultModuleDefinition createDef(String name) { + Resource resource = + resolver.getResource(ModuleLocationUtils.getModuleLocation("testfiles", name)); + + return new DefaultModuleDefinition("testfiles", resource, resolver); + } + + @Test + public void testBlankName() { + DefaultModuleDefinition def = createDef("blankname"); + + try { + def.init(); + fail(); + } catch ( IOException e ) { + assertTrue(e.getMessage().contains("Missing name property")); + } + + } + + @Test + public void testMissingName() { + DefaultModuleDefinition def = createDef("missingname"); + + try { + def.init(); + fail(); + } catch ( IOException e ) { + assertTrue(e.getMessage().contains("Missing name property")); + } + + } + + @Test + public void testBadName() { + DefaultModuleDefinition def = createDef("badname"); + + try { + def.init(); + fail(); + } catch ( IOException e ) { + assertTrue(e.getMessage().contains("is expected to exist at")); + } + } + + @Test + public void testGood() throws IOException { + DefaultModuleDefinition def = createDef("good"); + def.init(); + assertTrue(def.isValid()); + } + + @Test + public void testWrongName() { + DefaultModuleDefinition def = createDef("wrongname"); + + try { + def.init(); + fail(); + } catch ( IOException e ) { + assertTrue(e.getMessage().contains("do not appear to be the same resource")); + } + } + + @Test + public void testAllFiles() throws IOException { + DefaultModuleDefinition def = createDef("all"); + + def.init(); + + assertEquals(2, def.getContextLocations().size()); + has(def.getContextLocations(), "empty-context.xml", "empty2-context.xml"); + + assertEquals(2, def.getConfigLocations().size()); + has(def.getConfigLocations(), "test2-defaults.properties", "defaults.properties"); + + assertEquals(2, def.getInheritableContextLocations().size()); + has(def.getInheritableContextLocations(), "empty-context-inheritable.xml", "empty2-context-inheritable.xml"); + } + + protected void has(List resources, String... files) throws IOException { + int count = 0; + + for ( Resource r : resources ) { + for ( String file : files ) { + if ( r.getURL().toExternalForm().contains(file) ) { + count++; + break; + } + } + } + + assertEquals(resources + " does not contain " + Arrays.toString(files), files.length, count); + } +} diff --git a/agent/scripts/_run.sh b/framework/spring/module/src/test/resources/testfiles/all/defaults.properties similarity index 76% rename from agent/scripts/_run.sh rename to framework/spring/module/src/test/resources/testfiles/all/defaults.properties index 2ba44bf1abf..c08d10b386a 100644 --- a/agent/scripts/_run.sh +++ b/framework/spring/module/src/test/resources/testfiles/all/defaults.properties @@ -15,12 +15,4 @@ # specific language governing permissions and limitations # under the License. -#script to start multiple agents on one host -num=$1 -port=8787 -while [ $num -gt 0 ] -do -let "port=$port + $num" -java -Xrunjdwp:transport=dt_socket,address=$port,server=y,suspend=n -cp ./'*' com.cloud.agent.AgentShell & -let "num=$num - 1" -done +blah=1 diff --git a/framework/spring/module/src/test/resources/testfiles/all/empty-context-inheritable.xml b/framework/spring/module/src/test/resources/testfiles/all/empty-context-inheritable.xml new file mode 100644 index 00000000000..7c6b8fd47b1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/empty-context-inheritable.xml @@ -0,0 +1,26 @@ + + + + diff --git a/framework/spring/module/src/test/resources/testfiles/all/empty-context.xml b/framework/spring/module/src/test/resources/testfiles/all/empty-context.xml new file mode 100644 index 00000000000..7c6b8fd47b1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/empty-context.xml @@ -0,0 +1,26 @@ + + + + diff --git a/framework/spring/module/src/test/resources/testfiles/all/empty2-context-inheritable.xml b/framework/spring/module/src/test/resources/testfiles/all/empty2-context-inheritable.xml new file mode 100644 index 00000000000..7c6b8fd47b1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/empty2-context-inheritable.xml @@ -0,0 +1,26 @@ + + + + diff --git a/framework/spring/module/src/test/resources/testfiles/all/empty2-context.xml b/framework/spring/module/src/test/resources/testfiles/all/empty2-context.xml new file mode 100644 index 00000000000..7c6b8fd47b1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/empty2-context.xml @@ -0,0 +1,26 @@ + + + + diff --git a/framework/spring/module/src/test/resources/testfiles/all/module.properties b/framework/spring/module/src/test/resources/testfiles/all/module.properties new file mode 100644 index 00000000000..3faaf94b5ea --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=all diff --git a/framework/spring/module/src/test/resources/testfiles/all/test2-defaults.properties b/framework/spring/module/src/test/resources/testfiles/all/test2-defaults.properties new file mode 100644 index 00000000000..00ae6c00d2e --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/all/test2-defaults.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + diff --git a/framework/spring/module/src/test/resources/testfiles/badname/module.properties b/framework/spring/module/src/test/resources/testfiles/badname/module.properties new file mode 100644 index 00000000000..354accfa8ab --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/badname/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=what diff --git a/framework/spring/module/src/test/resources/testfiles/blankname/module.properties b/framework/spring/module/src/test/resources/testfiles/blankname/module.properties new file mode 100644 index 00000000000..b11279b50c6 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/blankname/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# A bunch of whitespace is after name +name= diff --git a/framework/spring/module/src/test/resources/testfiles/good/empty-context.xml b/framework/spring/module/src/test/resources/testfiles/good/empty-context.xml new file mode 100644 index 00000000000..7c6b8fd47b1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/good/empty-context.xml @@ -0,0 +1,26 @@ + + + + diff --git a/framework/spring/module/src/test/resources/testfiles/good/module.properties b/framework/spring/module/src/test/resources/testfiles/good/module.properties new file mode 100644 index 00000000000..47e60ec2ce1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/good/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=good diff --git a/framework/spring/module/src/test/resources/testfiles/missingname/module.properties b/framework/spring/module/src/test/resources/testfiles/missingname/module.properties new file mode 100644 index 00000000000..00ae6c00d2e --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/missingname/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + diff --git a/framework/spring/module/src/test/resources/testfiles/wrongname/module.properties b/framework/spring/module/src/test/resources/testfiles/wrongname/module.properties new file mode 100644 index 00000000000..47e60ec2ce1 --- /dev/null +++ b/framework/spring/module/src/test/resources/testfiles/wrongname/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=good diff --git a/framework/spring/module/src/test/resources/testhierarchy/base/module.properties b/framework/spring/module/src/test/resources/testhierarchy/base/module.properties new file mode 100644 index 00000000000..955a32c2521 --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/base/module.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=base diff --git a/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml b/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml new file mode 100644 index 00000000000..188301e599c --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/base/test-context-inheritable.xml @@ -0,0 +1,28 @@ + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml new file mode 100644 index 00000000000..a72d8c6128a --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/base/test-context.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1-1/module.properties b/framework/spring/module/src/test/resources/testhierarchy/child1-1/module.properties new file mode 100644 index 00000000000..4abe53f1b94 --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child1-1/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=child1-1 +parent=child1 diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml new file mode 100644 index 00000000000..7a2a9ad6d9b --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child1-1/test-context.xml @@ -0,0 +1,34 @@ + + + + + + + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1/module.properties b/framework/spring/module/src/test/resources/testhierarchy/child1/module.properties new file mode 100644 index 00000000000..9f4df489fe3 --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child1/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=child1 +parent=base diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml new file mode 100644 index 00000000000..ceffeb5bfff --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context-override.xml @@ -0,0 +1,30 @@ + + + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml new file mode 100644 index 00000000000..f906183fcc6 --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child1/test-context.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/child2/module.properties b/framework/spring/module/src/test/resources/testhierarchy/child2/module.properties new file mode 100644 index 00000000000..f03edfcc347 --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child2/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=child2 +parent=base diff --git a/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml new file mode 100644 index 00000000000..24bac54818d --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/child2/test-context.xml @@ -0,0 +1,33 @@ + + + + + + + + + + + diff --git a/framework/spring/module/src/test/resources/testhierarchy/orphan1/module.properties b/framework/spring/module/src/test/resources/testhierarchy/orphan1/module.properties new file mode 100644 index 00000000000..d4a0e6cce2d --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/orphan1/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=orphan1 +parent=missing1 diff --git a/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml b/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml new file mode 100644 index 00000000000..658beeae5de --- /dev/null +++ b/framework/spring/module/src/test/resources/testhierarchy/orphan1/test-context.xml @@ -0,0 +1,30 @@ + + + + + + + + diff --git a/m2-settings.xml b/m2-settings.xml deleted file mode 100644 index 9f6c934e282..00000000000 --- a/m2-settings.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - admin - central - - - admin - snapshots - - - - - - - - false - - repo1 - repo1 - http://repo1.maven.org/maven2 - - - - false - - central - libs-release - http://cs.ibuildthecloud.com/artifactory/libs-release - - - artifactory - - - - artifactory - - - diff --git a/maven-standard/pom.xml b/maven-standard/pom.xml new file mode 100644 index 00000000000..e4a81d846de --- /dev/null +++ b/maven-standard/pom.xml @@ -0,0 +1,48 @@ + + + 4.0.0 + cloud-maven-standard + Apache CloudStack Maven Conventions Parent + Historically ACS was built with a custom build system mixing ant and wscript. When the conversion to maven was done the existing directory structure in git was kept. So the src, testing, and resources folders in ACS don't follow the standard maven conventions. This parent pom forces the folders back to the standard conventions + pom + + org.apache.cloudstack + cloudstack + 4.3.0-SNAPSHOT + ../pom.xml + + + ${basedir}/src/main/java + ${basedir}/src/main/scripts + ${basedir}/src/test/java + ${basedir}/target/classes + ${basedir}/target/test-classes + + + ${basedir}/src/main/resources + + + + + ${basedir}/src/test/resources + + + + diff --git a/packaging/centos63/cloud-agent.rc b/packaging/centos63/cloud-agent.rc index b3784882975..ab49524af4b 100755 --- a/packaging/centos63/cloud-agent.rc +++ b/packaging/centos63/cloud-agent.rc @@ -61,8 +61,7 @@ PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | se # We need to append the JSVC daemon JAR to the classpath # AgentShell implements the JSVC daemon methods -# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings -export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts" +export CLASSPATH="/usr/share/java/commons-daemon.jar:$ACP:$PCP:/etc/cloudstack/agent:/usr/share/cloudstack-common/scripts" start() { echo -n $"Starting $PROGNAME: " diff --git a/packaging/centos63/cloud-management.rc b/packaging/centos63/cloud-management.rc index 35f31b28538..2f04793b79c 100755 --- a/packaging/centos63/cloud-management.rc +++ b/packaging/centos63/cloud-management.rc @@ -57,6 +57,8 @@ stop() { done if [ "$(ps --pid $pid | grep -c $pid)" -eq "0" ]; then log_success_msg "Stopping ${NAME}:" + rm -f /var/run/${NAME}.pid + rm -f /var/lock/subsys/${NAME} else log_failure_msg "Stopping ${NAME}:" fi @@ -77,7 +79,7 @@ set_ulimit() { } handle_pid_file() { - if [ "$1" -ne 0 ] ; then + if [ "$1" -ne 0 ] && [ "$1" -ne 3 ]; then echo "The pid file locates at /var/run/${NAME}.pid and lock file at /var/lock/subsys/${NAME}. Starting ${NAME} will take care of them or you can manually clean up." fi diff --git a/packaging/centos63/cloud.spec b/packaging/centos63/cloud.spec index 599463e2352..ebe45113eb1 100644 --- a/packaging/centos63/cloud.spec +++ b/packaging/centos63/cloud.spec @@ -112,7 +112,6 @@ The Apache CloudStack files shared between agent and management server %package agent Summary: CloudStack Agent for KVM hypervisors Requires: java >= 1.6.0 -Requires: jna >= 3.2.4 Requires: %{name}-common = %{_ver} Requires: libvirt Requires: bridge-utils @@ -173,13 +172,15 @@ echo Doing CloudStack build cp packaging/centos63/replace.properties build/replace.properties echo VERSION=%{_maventag} >> build/replace.properties echo PACKAGE=%{name} >> build/replace.properties +touch build/gitrev.txt +echo $(git rev-parse HEAD) > build/gitrev.txt -if [ "%{_ossnoss}" == "NONOSS" -o "%{_ossnoss}" == "nonoss" ] ; then - echo "Executing mvn packaging for NONOSS ..." - mvn -Pawsapi,systemvm -Dnonoss package clean install +if [ "%{_ossnoss}" == "NOREDIST" -o "%{_ossnoss}" == "noredist" ] ; then + echo "Executing mvn packaging with non-redistributable libraries ..." + mvn -Pawsapi,systemvm -Dnoredist clean package else - echo "Executing mvn packaging for OSS ..." - mvn -Pawsapi package -Dsystemvm clean install + echo "Executing mvn packaging ..." + mvn -Pawsapi,systemvm clean package fi %install @@ -200,13 +201,16 @@ mkdir -p ${RPM_BUILD_ROOT}%{_sysconfdir}/sysconfig mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms mkdir -p ${RPM_BUILD_ROOT}%{python_sitearch}/ +mkdir -p ${RPM_BUILD_ROOT}%/usr/bin cp -r scripts/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts -install -D services/console-proxy/server/dist/systemvm.iso ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms/systemvm.iso -install -D services/console-proxy/server/dist/systemvm.zip ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms/systemvm.zip +install -D systemvm/dist/systemvm.iso ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms/systemvm.iso +install -D systemvm/dist/systemvm.zip ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/vms/systemvm.zip install python/lib/cloud_utils.py ${RPM_BUILD_ROOT}%{python_sitearch}/cloud_utils.py cp -r python/lib/cloudutils ${RPM_BUILD_ROOT}%{python_sitearch}/ python -m py_compile ${RPM_BUILD_ROOT}%{python_sitearch}/cloud_utils.py python -m compileall ${RPM_BUILD_ROOT}%{python_sitearch}/cloudutils +cp build/gitrev.txt ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts +cp packaging/centos63/cloudstack-sccs ${RPM_BUILD_ROOT}/usr/bin mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco cp -r plugins/network-elements/cisco-vnmc/scripts/network/cisco/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-common/scripts/network/cisco @@ -287,6 +291,8 @@ install -D agent/target/transformed/agent.properties ${RPM_BUILD_ROOT}%{_sysconf install -D agent/target/transformed/environment.properties ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/environment.properties install -D agent/target/transformed/log4j-cloud.xml ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/agent/log4j-cloud.xml install -D agent/target/transformed/cloud-setup-agent ${RPM_BUILD_ROOT}%{_bindir}/%{name}-setup-agent +install -D agent/target/transformed/cloudstack-agent-upgrade ${RPM_BUILD_ROOT}%{_bindir}/%{name}-agent-upgrade +install -D agent/target/transformed/libvirtqemuhook ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib/libvirtqemuhook install -D agent/target/transformed/cloud-ssh ${RPM_BUILD_ROOT}%{_bindir}/%{name}-ssh install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-%{_maventag}.jar ${RPM_BUILD_ROOT}%{_datadir}/%name-agent/lib/cloud-plugin-hypervisor-kvm-%{_maventag}.jar cp plugins/hypervisors/kvm/target/dependencies/* ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib @@ -316,7 +322,7 @@ cp awsapi/resource/Axis2/axis2.xml ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/w cp awsapi/target/WEB-INF/services/cloud-ec2.aar ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/services -for name in applicationContext.xml cloud-bridge.properties commons-logging.properties ec2-service.properties ; do +for name in cloud-bridge.properties commons-logging.properties ec2-service.properties ; do mv ${RPM_BUILD_ROOT}%{_datadir}/%{name}-bridge/webapps/awsapi/WEB-INF/classes/$name \ ${RPM_BUILD_ROOT}%{_sysconfdir}/%{name}/management/$name done @@ -461,6 +467,13 @@ fi %post agent if [ "$1" == "1" ] ; then + echo "Running %{_bindir}/%{name}-agent-upgrade to update bridge name for upgrade from CloudStack 4.0.x (and before) to CloudStack 4.1 (and later)" + %{_bindir}/%{name}-agent-upgrade + if [ ! -d %{_sysconfdir}/libvirt/hooks ] ; then + mkdir %{_sysconfdir}/libvirt/hooks + fi + cp -a ${RPM_BUILD_ROOT}%{_datadir}/%{name}-agent/lib/libvirtqemuhook %{_sysconfdir}/libvirt/hooks/qemu + /sbin/service libvirtd restart /sbin/chkconfig --add cloudstack-agent > /dev/null 2>&1 || true /sbin/chkconfig --level 345 cloudstack-agent on > /dev/null 2>&1 || true fi @@ -473,11 +486,20 @@ if [ -f "%{_sysconfdir}/cloud.rpmsave/agent/agent.properties" ]; then mv %{_sysconfdir}/cloud.rpmsave/agent/agent.properties %{_sysconfdir}/cloud.rpmsave/agent/agent.properties.rpmsave fi +%preun usage +/sbin/service cloudstack-usage stop || true +if [ "$1" == "0" ] ; then + /sbin/chkconfig --del cloudstack-usage > /dev/null 2>&1 || true + /sbin/service cloudstack-usage stop > /dev/null 2>&1 || true +fi + %post usage if [ -f "%{_sysconfdir}/%{name}/management/db.properties" ]; then echo Replacing db.properties with management server db.properties rm -f %{_sysconfdir}/%{name}/usage/db.properties ln -s %{_sysconfdir}/%{name}/management/db.properties %{_sysconfdir}/%{name}/usage/db.properties + /sbin/chkconfig --add cloudstack-usage > /dev/null 2>&1 || true + /sbin/chkconfig --level 345 cloudstack-usage on > /dev/null 2>&1 || true fi #%post awsapi @@ -514,7 +536,6 @@ fi %config(noreplace) %{_sysconfdir}/%{name}/management/tomcat-users.xml %config(noreplace) %{_sysconfdir}/%{name}/management/web.xml %config(noreplace) %{_sysconfdir}/%{name}/management/environment.properties -%config(noreplace) %{_sysconfdir}/%{name}/management/applicationContext.xml %config(noreplace) %{_sysconfdir}/%{name}/management/cloud-bridge.properties %config(noreplace) %{_sysconfdir}/%{name}/management/commons-logging.properties %config(noreplace) %{_sysconfdir}/%{name}/management/ec2-service.properties @@ -548,12 +569,14 @@ fi %files agent %attr(0755,root,root) %{_bindir}/%{name}-setup-agent +%attr(0755,root,root) %{_bindir}/%{name}-agent-upgrade %attr(0755,root,root) %{_bindir}/%{name}-ssh %attr(0755,root,root) %{_sysconfdir}/init.d/%{name}-agent %attr(0755,root,root) %{_datadir}/%{name}-common/scripts/network/cisco %config(noreplace) %{_sysconfdir}/%{name}/agent %dir %{_localstatedir}/log/%{name}/agent %attr(0644,root,root) %{_datadir}/%{name}-agent/lib/*.jar +%attr(0755,root,root) %{_datadir}/%{name}-agent/lib/libvirtqemuhook %dir %{_datadir}/%{name}-agent/plugins %{_defaultdocdir}/%{name}-agent-%{version}/LICENSE %{_defaultdocdir}/%{name}-agent-%{version}/NOTICE @@ -562,6 +585,7 @@ fi %dir %attr(0755,root,root) %{python_sitearch}/cloudutils %dir %attr(0755,root,root) %{_datadir}/%{name}-common/vms %attr(0755,root,root) %{_datadir}/%{name}-common/scripts +%attr(0755,root,root) /usr/bin/cloudstack-sccs %attr(0644, root, root) %{_datadir}/%{name}-common/vms/systemvm.iso %attr(0644, root, root) %{_datadir}/%{name}-common/vms/systemvm.zip %attr(0644,root,root) %{python_sitearch}/cloud_utils.py diff --git a/docs/publican-cloudstack/overrides.cfg b/packaging/centos63/cloudstack-sccs similarity index 80% rename from docs/publican-cloudstack/overrides.cfg rename to packaging/centos63/cloudstack-sccs index 94a24ef2962..e05d372c7fe 100644 --- a/docs/publican-cloudstack/overrides.cfg +++ b/packaging/centos63/cloudstack-sccs @@ -1,13 +1,14 @@ -# Config::Simple 4.59 -# Thu Aug 11 14:07:41 2011 +#!/bin/sh + # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information# +# distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an @@ -16,5 +17,4 @@ # specific language governing permissions and limitations # under the License. -strict: 0 - +cat /usr/share/cloudstack-common/scripts/gitrev.txt diff --git a/packaging/centos63/package.sh b/packaging/centos63/package.sh index f30a0e7120a..04b92586514 100755 --- a/packaging/centos63/package.sh +++ b/packaging/centos63/package.sh @@ -21,54 +21,23 @@ function usage() { echo "usage: ./package.sh [-p|--pack] [-h|--help] [ARGS]" echo "" echo "The commonly used Arguments are:" - echo "oss|OSS To package OSS specific" - echo "nonoss|NONOSS To package NONOSS specific" + echo "oss|OSS To package with only redistributable libraries (default)" + echo "noredist|NOREDIST To package with non-redistributable libraries" echo "" echo "Examples: ./package.sh -p|--pack oss|OSS" - echo " ./package.sh -p|--pack nonoss|NONOSS" + echo " ./package.sh -p|--pack noredist|NOREDIST" echo " ./package.sh (Default OSS)" exit 1 } -function defaultPackaging() { -CWD=`pwd` -RPMDIR=$CWD/../../dist/rpmbuild -PACK_PROJECT=cloudstack - -VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` -if echo $VERSION | grep SNAPSHOT ; then - REALVER=`echo $VERSION | cut -d '-' -f 1` - DEFVER="-D_ver $REALVER" - DEFPRE="-D_prerelease 1" - DEFREL="-D_rel SNAPSHOT" -else - REALVER=$VERSION - DEFVER="-D_ver $REALVER" - DEFREL="-D_rel 1" -fi - -mkdir -p $RPMDIR/SPECS -mkdir -p $RPMDIR/BUILD -mkdir -p $RPMDIR/SRPMS -mkdir -p $RPMDIR/RPMS -mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION - -(cd ../../; tar -c --exclude .git --exclude dist . | tar -C $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION -x ) -(cd $RPMDIR/SOURCES/; tar -czf $PACK_PROJECT-$VERSION.tgz $PACK_PROJECT-$VERSION) - -cp cloud.spec $RPMDIR/SPECS - -(cd $RPMDIR; rpmbuild --define "_topdir $RPMDIR" "${DEFVER}" "${DEFREL}" ${DEFPRE+"${DEFPRE}"} -ba SPECS/cloud.spec) - -exit -} - function packaging() { CWD=`pwd` RPMDIR=$CWD/../../dist/rpmbuild PACK_PROJECT=cloudstack -DEFOSSNOSS="-D_ossnoss $packageval" +if [ -n "$1" ] ; then + DEFOSSNOSS="-D_ossnoss $packageval" +fi VERSION=`(cd ../../; mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:evaluate -Dexpression=project.version) | grep '^[0-9]\.'` @@ -95,7 +64,7 @@ mkdir -p $RPMDIR/SOURCES/$PACK_PROJECT-$VERSION cp cloud.spec $RPMDIR/SPECS -(cd $RPMDIR; rpmbuild --define "_topdir $RPMDIR" "${DEFVER}" "${DEFREL}" ${DEFPRE+\"${DEFPRE}\"} "${DEFOSSNOSS}" -bb SPECS/cloud.spec) +(cd $RPMDIR; rpmbuild --define "_topdir $RPMDIR" "${DEFVER}" "${DEFREL}" ${DEFPRE+"${DEFPRE}"} ${DEFOSSNOSS+"$DEFOSSNOSS"} -bb SPECS/cloud.spec) exit } @@ -103,7 +72,7 @@ exit if [ $# -lt 1 ] ; then - defaultPackaging + packaging elif [ $# -gt 0 ] ; then @@ -123,9 +92,9 @@ elif [ $# -gt 0 ] ; then echo "Doing CloudStack Packaging ....." packageval=$2 if [ "$packageval" == "oss" -o "$packageval" == "OSS" ] ; then - defaultPackaging - elif [ "$packageval" == "nonoss" -o "$packageval" == "NONOSS" ] ; then packaging + elif [ "$packageval" == "noredist" -o "$packageval" == "NOREDIST" ] ; then + packaging noredist else echo "Error: Incorrect value provided in package.sh script, Please see help ./package.sh --help|-h for more details." exit 1 @@ -136,19 +105,8 @@ elif [ $# -gt 0 ] ; then usage exit 1 ;; - --) - echo "Unrecognized option..." - usage - exit 1 - ;; - -*) - echo "Unrecognized option..." - usage - exit 1 - ;; *) shift - break ;; esac done diff --git a/packaging/debian/init/cloud-agent b/packaging/debian/init/cloud-agent index 29f64881626..e7338752f3e 100755 --- a/packaging/debian/init/cloud-agent +++ b/packaging/debian/init/cloud-agent @@ -60,8 +60,7 @@ PCP=`ls /usr/share/cloudstack-agent/plugins/*.jar 2>/dev/null | tr '\n' ':' | se # We need to append the JSVC daemon JAR to the classpath # AgentShell implements the JSVC daemon methods -# We also need JNA in the classpath (from the distribution) for the Libvirt Java bindings -export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/jna.jar:$ACP:$PCP:/etc/cloudstack/agent" +export CLASSPATH="/usr/share/java/commons-daemon.jar:$ACP:$PCP:/etc/cloudstack/agent" wait_for_network() { i=1 diff --git a/patches/pom.xml b/patches/pom.xml deleted file mode 100644 index 6457c7a7015..00000000000 --- a/patches/pom.xml +++ /dev/null @@ -1,84 +0,0 @@ - - - 4.0.0 - cloud-patches - Apache CloudStack SystemVM Patches - pom - - org.apache.cloudstack - cloudstack - 4.3.0-SNAPSHOT - - - install - - - maven-antrun-plugin - 1.7 - - - build-cloud-scripts - package - - run - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/patches/systemvm/debian/vpn/opt/cloud/bin/vpc_vpn_l2tp.sh b/patches/systemvm/debian/vpn/opt/cloud/bin/vpc_vpn_l2tp.sh deleted file mode 100755 index 0595869adf8..00000000000 --- a/patches/systemvm/debian/vpn/opt/cloud/bin/vpc_vpn_l2tp.sh +++ /dev/null @@ -1,178 +0,0 @@ -#!/bin/bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - -#set -x -usage() { - printf "Usage:\n" - printf "Create VPN : %s -c -r -l -p -s -D -z < zone cidr> \n" $(basename $0) - printf "Delete VPN : %s -d -l -s -D -z < zone cidr> \n" $(basename $0) - printf "Add VPN User : %s -u \n" $(basename $0) - printf "Remote VPN User: %s -U /etc/ipsec.d/ipsec.any.secrets - sed -i -e "s/^ip range = .*$/ip range = $client_range/" /etc/xl2tpd/xl2tpd.conf - sed -i -e "s/^local ip = .*$/local ip = $local_ip/" /etc/xl2tpd/xl2tpd.conf - - sed -i -e "s/^ms-dns.*$/ms-dns $local_ip/" /etc/ppp/options.xl2tpd - - iptables_ "-D" $public_ip - iptables_ "-I" $public_ip - - ipsec_server "restart" - - ipsec auto --rereadsecrets - ipsec auto --replace L2TP-PSK -} - -destroy_l2tp_ipsec_vpn_server() { - local public_ip=$1 - - ipsec auto --down L2TP-PSK - - iptables_ "-D" $public_ip - - ipsec_server "stop" -} - -remove_l2tp_ipsec_user() { - local u=$1 - sed -i -e "/^$u .*$/d" /etc/ppp/chap-secrets - if [ -x /usr/bin/tdbdump ]; then - pid=$(tdbdump /var/run/pppd2.tdb | grep -w $u | awk -F';' '{print $4}' | awk -F= '{print $2}') - [ "$pid" != "" ] && kill -9 $pid - fi - return 0 -} - -add_l2tp_ipsec_user() { - local u=$1 - local passwd=$2 - - remove_l2tp_ipsec_user $u - echo "$u * $passwd *" >> /etc/ppp/chap-secrets -} - -rflag= -pflag= -lflag= -sflag= -create= -destroy= -useradd= -userdel= - -while getopts 'cdl:p:r:s:u:U:D:z' OPTION -do - case $OPTION in - c) create=1 - ;; - d) destroy=1 - ;; - u) useradd=1 - user_pwd="$OPTARG" - ;; - U) userdel=1 - user="$OPTARG" - ;; - r) rflag=1 - client_range="$OPTARG" - ;; - p) pflag=1 - ipsec_psk="$OPTARG" - ;; - l) lflag=1 - local_ip="$OPTARG" - ;; - s) sflag=1 - server_ip="$OPTARG" - ;; - D) dev="$OPTARG" - ;; - z) zcidr="$OPTARG" - ;; - ?) usage - exit 2 - ;; - esac -done - -[ "$create$destroy" == "11" ] || [ "$create$destroy$useradd$userdel" == "" ] && usage && exit 2 -[ "$create" == "1" ] && [ "$lflag$pflag$rflag$sflag" != "1111" ] && usage && exit 2 - -if [ "$create" == "1" ]; then - create_l2tp_ipsec_vpn_server $ipsec_psk $server_ip $client_range $local_ip - exit $? -fi - -if [ "$destroy" == "1" ]; then - destroy_l2tp_ipsec_vpn_server $server_ip - exit $? -fi - -if [ "$useradd" == "1" ]; then - u=$(echo $user_pwd | awk -F',' '{print $1}') - pwd=$(echo $user_pwd | awk -F',' '{print $2}') - add_l2tp_ipsec_user $u $pwd - exit $? -fi -if [ "$userdel" == "1" ]; then - remove_l2tp_ipsec_user $user - exit $? -fi diff --git a/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/module.properties b/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/module.properties new file mode 100644 index 00000000000..06fc721bc09 --- /dev/null +++ b/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=acl-static-role-based +parent=api \ No newline at end of file diff --git a/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml b/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml new file mode 100644 index 00000000000..f13acc190b6 --- /dev/null +++ b/plugins/acl/static-role-based/resources/META-INF/cloudstack/acl-static-role-based/spring-acl-static-role-based-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java index d4d73d1f77b..bf3acf5f427 100644 --- a/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java +++ b/plugins/acl/static-role-based/src/org/apache/cloudstack/acl/StaticRoleBasedAPIAccessChecker.java @@ -26,6 +26,7 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.APICommand; import org.apache.log4j.Logger; import com.cloud.exception.PermissionDeniedException; @@ -43,16 +44,21 @@ public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIC protected static final Logger s_logger = Logger.getLogger(StaticRoleBasedAPIAccessChecker.class); - private static Map> s_roleBasedApisMap = + Set commandsPropertiesOverrides = new HashSet(); + Map> commandsPropertiesRoleBasedApisMap = + new HashMap>(); + Map> annotationRoleBasedApisMap = new HashMap>(); - @Inject List _services; + List _services; @Inject AccountService _accountService; protected StaticRoleBasedAPIAccessChecker() { super(); - for (RoleType roleType: RoleType.values()) - s_roleBasedApisMap.put(roleType, new HashSet()); + for (RoleType roleType: RoleType.values()) { + commandsPropertiesRoleBasedApisMap.put(roleType, new HashSet()); + annotationRoleBasedApisMap.put(roleType, new HashSet()); + } } @Override @@ -64,7 +70,10 @@ public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIC } RoleType roleType = _accountService.getRoleType(account); - boolean isAllowed = s_roleBasedApisMap.get(roleType).contains(commandName); + boolean isAllowed = commandsPropertiesOverrides.contains(commandName) ? + commandsPropertiesRoleBasedApisMap.get(roleType).contains(commandName) : + annotationRoleBasedApisMap.get(roleType).contains(commandName); + if (!isAllowed) { throw new PermissionDeniedException("The API does not exist or is blacklisted. Role type=" + roleType.toString() + " is not allowed to request the api: " + commandName); } @@ -80,19 +89,46 @@ public class StaticRoleBasedAPIAccessChecker extends AdapterBase implements APIC return true; } + + @Override + public boolean start() { + for ( PluggableService service : _services ) { + for ( Class clz : service.getCommands() ) { + APICommand command = clz.getAnnotation(APICommand.class); + for ( RoleType role : command.authorized() ) { + Set commands = annotationRoleBasedApisMap.get(role); + if (!commands.contains(command.name())) + commands.add(command.name()); + } + } + } + return super.start(); + } + private void processMapping(Map configMap) { for (Map.Entry entry: configMap.entrySet()) { String apiName = entry.getKey(); String roleMask = entry.getValue(); + commandsPropertiesOverrides.add(apiName); try { short cmdPermissions = Short.parseShort(roleMask); for (RoleType roleType: RoleType.values()) { if ((cmdPermissions & roleType.getValue()) != 0) - s_roleBasedApisMap.get(roleType).add(apiName); + commandsPropertiesRoleBasedApisMap.get(roleType).add(apiName); } } catch (NumberFormatException nfe) { s_logger.info("Malformed key=value pair for entry: " + entry.toString()); } } } + + public List getServices() { + return _services; + } + + @Inject + public void setServices(List _services) { + this._services = _services; + } + } diff --git a/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/module.properties b/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/module.properties new file mode 100644 index 00000000000..e204fe7ce43 --- /dev/null +++ b/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=explicit-dedication +parent=planner \ No newline at end of file diff --git a/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml b/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml new file mode 100644 index 00000000000..5864f947765 --- /dev/null +++ b/plugins/affinity-group-processors/explicit-dedication/resources/META-INF/cloudstack/explicit-dedication/spring-explicit-dedication-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + diff --git a/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java index da231723d32..cc1d3f350e6 100644 --- a/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java +++ b/plugins/affinity-group-processors/explicit-dedication/src/org/apache/cloudstack/affinity/ExplicitDedicationProcessor.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -48,6 +47,8 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @@ -410,7 +411,7 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement @DB @Override - public void handleDeleteGroup(AffinityGroup group) { + public void handleDeleteGroup(final AffinityGroup group) { // When a group of the 'ExplicitDedication' type gets deleted, make sure // to remove the dedicated resources in the group as well. if (group != null) { @@ -419,20 +420,21 @@ public class ExplicitDedicationProcessor extends AffinityProcessorBase implement if (s_logger.isDebugEnabled()) { s_logger.debug("Releasing the dedicated resources under group: " + group); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - SearchBuilder listByAffinityGroup = _dedicatedDao.createSearchBuilder(); - listByAffinityGroup.and("affinityGroupId", listByAffinityGroup.entity().getAffinityGroupId(), - SearchCriteria.Op.EQ); - listByAffinityGroup.done(); - SearchCriteria sc = listByAffinityGroup.create(); - sc.setParameters("affinityGroupId", group.getId()); - - _dedicatedDao.lockRows(sc, null, true); - _dedicatedDao.remove(sc); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchBuilder listByAffinityGroup = _dedicatedDao.createSearchBuilder(); + listByAffinityGroup.and("affinityGroupId", listByAffinityGroup.entity().getAffinityGroupId(), + SearchCriteria.Op.EQ); + listByAffinityGroup.done(); + SearchCriteria sc = listByAffinityGroup.create(); + sc.setParameters("affinityGroupId", group.getId()); + + _dedicatedDao.lockRows(sc, null, true); + _dedicatedDao.remove(sc); + } + }); } else { if (s_logger.isDebugEnabled()) { s_logger.debug("No dedicated resources to releease under group: " + group); diff --git a/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/module.properties b/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/module.properties new file mode 100644 index 00000000000..1ea1e8417fe --- /dev/null +++ b/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=host-anti-affinity +parent=planner \ No newline at end of file diff --git a/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml b/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml new file mode 100644 index 00000000000..bc09cc3c60d --- /dev/null +++ b/plugins/affinity-group-processors/host-anti-affinity/resources/META-INF/cloudstack/host-anti-affinity/spring-host-anti-affinity-context.xml @@ -0,0 +1,37 @@ + + + + + + + + + + diff --git a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java index 860240faef0..5be109e533b 100755 --- a/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java +++ b/plugins/api/discovery/src/org/apache/cloudstack/discovery/ApiDiscoveryServiceImpl.java @@ -16,14 +16,24 @@ // under the License. package org.apache.cloudstack.discovery; -import com.cloud.serializer.Param; -import com.cloud.user.User; -import com.cloud.utils.ReflectUtil; -import com.cloud.utils.StringUtils; -import com.cloud.utils.component.PluggableService; -import com.google.gson.annotations.SerializedName; +import java.lang.reflect.Field; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import javax.ejb.Local; +import javax.inject.Inject; + import org.apache.cloudstack.acl.APIChecker; -import org.apache.cloudstack.api.*; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.BaseAsyncCreateCmd; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.command.user.discovery.ListApisCmd; import org.apache.cloudstack.api.response.ApiDiscoveryResponse; import org.apache.cloudstack.api.response.ApiParameterResponse; @@ -32,27 +42,29 @@ import org.apache.cloudstack.api.response.ListResponse; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import javax.annotation.PostConstruct; -import javax.ejb.Local; -import javax.inject.Inject; -import java.lang.reflect.Field; -import java.util.*; +import com.cloud.serializer.Param; +import com.cloud.user.User; +import com.cloud.utils.ReflectUtil; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ComponentLifecycleBase; +import com.cloud.utils.component.PluggableService; +import com.google.gson.annotations.SerializedName; @Component @Local(value = ApiDiscoveryService.class) -public class ApiDiscoveryServiceImpl implements ApiDiscoveryService { +public class ApiDiscoveryServiceImpl extends ComponentLifecycleBase implements ApiDiscoveryService { private static final Logger s_logger = Logger.getLogger(ApiDiscoveryServiceImpl.class); - @Inject protected List _apiAccessCheckers = null; - @Inject protected List _services = null; + List _apiAccessCheckers = null; + List _services = null; private static Map s_apiNameDiscoveryResponseMap = null; protected ApiDiscoveryServiceImpl() { super(); } - @PostConstruct - void init() { + @Override + public boolean start() { if (s_apiNameDiscoveryResponseMap == null) { long startTime = System.nanoTime(); s_apiNameDiscoveryResponseMap = new HashMap(); @@ -66,6 +78,8 @@ public class ApiDiscoveryServiceImpl implements ApiDiscoveryService { long endTime = System.nanoTime(); s_logger.info("Api Discovery Service: Annotation, docstrings, api relation graph processed in " + (endTime - startTime) / 1000000.0 + " ms"); } + + return true; } protected Map> cacheResponseMap(Set> cmdClasses) { @@ -249,4 +263,21 @@ public class ApiDiscoveryServiceImpl implements ApiDiscoveryService { cmdList.add(ListApisCmd.class); return cmdList; } + + public List getApiAccessCheckers() { + return _apiAccessCheckers; + } + + public void setApiAccessCheckers(List _apiAccessCheckers) { + this._apiAccessCheckers = _apiAccessCheckers; + } + + public List getServices() { + return _services; + } + + @Inject + public void setServices(List _services) { + this._services = _services; + } } diff --git a/plugins/api/discovery/test/org/apache/cloudstack/discovery/ApiDiscoveryTest.java b/plugins/api/discovery/test/org/apache/cloudstack/discovery/ApiDiscoveryTest.java index afff746c848..a34484becab 100644 --- a/plugins/api/discovery/test/org/apache/cloudstack/discovery/ApiDiscoveryTest.java +++ b/plugins/api/discovery/test/org/apache/cloudstack/discovery/ApiDiscoveryTest.java @@ -65,7 +65,7 @@ public class ApiDiscoveryTest { Set> cmdClasses = new HashSet>(); cmdClasses.add(ListApisCmd.class); - _discoveryService.init(); + _discoveryService.start(); _discoveryService.cacheResponseMap(cmdClasses); } diff --git a/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/module.properties b/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/module.properties new file mode 100644 index 00000000000..c998a87d937 --- /dev/null +++ b/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=rate-limit +parent=api \ No newline at end of file diff --git a/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml b/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml new file mode 100644 index 00000000000..17153cf1c9d --- /dev/null +++ b/plugins/api/rate-limit/resources/META-INF/cloudstack/rate-limit/spring-rate-limit-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/plugins/dedicated-resources/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml b/plugins/dedicated-resources/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml new file mode 100644 index 00000000000..e2879f71086 --- /dev/null +++ b/plugins/dedicated-resources/resources/META-INF/cloudstack/core/spring-dedicated-resources-core-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java index 6a6b47ccfd7..cda46efe2ee 100755 --- a/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java +++ b/plugins/dedicated-resources/src/org/apache/cloudstack/dedicated/DedicatedResourceManagerImpl.java @@ -48,7 +48,6 @@ import org.apache.cloudstack.api.response.DedicatePodResponse; import org.apache.cloudstack.api.response.DedicateZoneResponse; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -79,6 +78,9 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.UserVmVO; import com.cloud.vm.dao.UserVmDao; @@ -113,7 +115,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Zone") - public List dedicateZone(Long zoneId, Long domainId, String accountName) { + public List dedicateZone(final Long zoneId, final Long domainId, final String accountName) { Long accountId = null; List hosts = null; if(accountName != null){ @@ -124,7 +126,7 @@ public class DedicatedResourceManagerImpl implements DedicatedService { List childDomainIds = getDomainChildIds(domainId); childDomainIds.add(domainId); checkAccountAndDomain(accountId, domainId); - DataCenterVO dc = _zoneDao.findById(zoneId); + final DataCenterVO dc = _zoneDao.findById(zoneId); if (dc == null) { throw new InvalidParameterValueException("Unable to find zone by id " + zoneId); } else { @@ -220,46 +222,50 @@ public class DedicatedResourceManagerImpl implements DedicatedService { checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountId); - if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); - } + final Long accountIdFinal = accountId; + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + // find or create the affinity group by name under this account/domain + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + if (group == null) { + s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(zoneId, null, null, null, null, null, + group.getId()); + try { + dedicatedResource.setDomainId(domainId); + if (accountIdFinal != null) { + dedicatedResource.setAccountId(accountIdFinal); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + + // save the domainId in the zone + dc.setDomainId(domainId); + if (!_zoneDao.update(zoneId, dc)) { + throw new CloudRuntimeException( + "Failed to dedicate zone, could not set domainId. Please contact Cloud Support."); + } + + } catch (Exception e) { + s_logger.error("Unable to dedicate zone due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; - DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(zoneId, null, null, null, null, null, - group.getId()); - try { - dedicatedResource.setDomainId(domainId); - if (accountId != null) { - dedicatedResource.setAccountId(accountId); } - dedicatedResource = _dedicatedDao.persist(dedicatedResource); - - // save the domainId in the zone - dc.setDomainId(domainId); - if (!_zoneDao.update(zoneId, dc)) { - throw new CloudRuntimeException( - "Failed to dedicate zone, could not set domainId. Please contact Cloud Support."); - } - - } catch (Exception e) { - s_logger.error("Unable to dedicate zone due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); - } - txn.commit(); - - List result = new ArrayList(); - result.add(dedicatedResource); - return result; + }); } @Override @DB @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Pod") - public List dedicatePod(Long podId, Long domainId, String accountName) { + public List dedicatePod(final Long podId, final Long domainId, final String accountName) { Long accountId = null; if(accountName != null){ Account caller = CallContext.current().getCallingAccount(); @@ -353,37 +359,40 @@ public class DedicatedResourceManagerImpl implements DedicatedService { checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountId); - if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); - } - DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, podId, null, null, null, null, - group.getId()); - try { - dedicatedResource.setDomainId(domainId); - if (accountId != null) { - dedicatedResource.setAccountId(accountId); - } - dedicatedResource = _dedicatedDao.persist(dedicatedResource); - } catch (Exception e) { - s_logger.error("Unable to dedicate pod due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to dedicate pod. Please contact Cloud Support."); - } - txn.commit(); + final Long accountIdFinal = accountId; + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + // find or create the affinity group by name under this account/domain + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + if (group == null) { + s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, podId, null, null, null, null, + group.getId()); + try { + dedicatedResource.setDomainId(domainId); + if (accountIdFinal != null) { + dedicatedResource.setAccountId(accountIdFinal); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate pod due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate pod. Please contact Cloud Support."); + } - List result = new ArrayList(); - result.add(dedicatedResource); - return result; + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + }); } @Override @DB @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Cluster") - public List dedicateCluster(Long clusterId, Long domainId, String accountName) { + public List dedicateCluster(final Long clusterId, final Long domainId, final String accountName) { Long accountId = null; List hosts = null; if(accountName != null){ @@ -463,37 +472,40 @@ public class DedicatedResourceManagerImpl implements DedicatedService { checkHostsSuitabilityForExplicitDedication(accountId, childDomainIds, hosts); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountId); - if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); - } - DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, clusterId, null, null, null, - group.getId()); - try { - dedicatedResource.setDomainId(domainId); - if (accountId != null) { - dedicatedResource.setAccountId(accountId); - } - dedicatedResource = _dedicatedDao.persist(dedicatedResource); - } catch (Exception e) { - s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to dedicate cluster. Please contact Cloud Support."); - } - txn.commit(); + final Long accountIdFinal = accountId; + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + // find or create the affinity group by name under this account/domain + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + if (group == null) { + s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, clusterId, null, null, null, + group.getId()); + try { + dedicatedResource.setDomainId(domainId); + if (accountIdFinal != null) { + dedicatedResource.setAccountId(accountIdFinal); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate cluster. Please contact Cloud Support."); + } - List result = new ArrayList(); - result.add(dedicatedResource); - return result; + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + }); } @Override @DB @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE, eventDescription = "dedicating a Host") - public List dedicateHost(Long hostId, Long domainId, String accountName) { + public List dedicateHost(final Long hostId, final Long domainId, final String accountName) { Long accountId = null; if(accountName != null){ Account caller = CallContext.current().getCallingAccount(); @@ -558,31 +570,35 @@ public class DedicatedResourceManagerImpl implements DedicatedService { childDomainIds.add(domainId); checkHostSuitabilityForExplicitDedication(accountId, childDomainIds, hostId); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // find or create the affinity group by name under this account/domain - AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountId); - if (group == null) { - s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); - throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); - } - DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, null, hostId, null, null, - group.getId()); - try { - dedicatedResource.setDomainId(domainId); - if (accountId != null) { - dedicatedResource.setAccountId(accountId); - } - dedicatedResource = _dedicatedDao.persist(dedicatedResource); - } catch (Exception e) { - s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to dedicate host. Please contact Cloud Support."); - } - txn.commit(); + final Long accountIdFinal = accountId; + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + // find or create the affinity group by name under this account/domain + AffinityGroup group = findOrCreateDedicatedAffinityGroup(domainId, accountIdFinal); + if (group == null) { + s_logger.error("Unable to dedicate zone due to, failed to create dedication affinity group"); + throw new CloudRuntimeException("Failed to dedicate zone. Please contact Cloud Support."); + } + DedicatedResourceVO dedicatedResource = new DedicatedResourceVO(null, null, null, hostId, null, null, + group.getId()); + try { + dedicatedResource.setDomainId(domainId); + if (accountIdFinal != null) { + dedicatedResource.setAccountId(accountIdFinal); + } + dedicatedResource = _dedicatedDao.persist(dedicatedResource); + } catch (Exception e) { + s_logger.error("Unable to dedicate host due to " + e.getMessage(), e); + throw new CloudRuntimeException("Failed to dedicate host. Please contact Cloud Support."); + } + + List result = new ArrayList(); + result.add(dedicatedResource); + return result; + } + }); - List result = new ArrayList(); - result.add(dedicatedResource); - return result; } private AffinityGroup findOrCreateDedicatedAffinityGroup(Long domainId, Long accountId) { @@ -889,9 +905,8 @@ public class DedicatedResourceManagerImpl implements DedicatedService { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_DEDICATE_RESOURCE_RELEASE, eventDescription = "Releasing dedicated resource") - public boolean releaseDedicatedResource(Long zoneId, Long podId, Long clusterId, Long hostId) throws InvalidParameterValueException{ + public boolean releaseDedicatedResource(final Long zoneId, Long podId, Long clusterId, Long hostId) throws InvalidParameterValueException{ DedicatedResourceVO resource = null; - Long resourceId = null; if (zoneId != null) { resource = _dedicatedDao.findByZoneId(zoneId); } @@ -907,26 +922,28 @@ public class DedicatedResourceManagerImpl implements DedicatedService { if (resource == null){ throw new InvalidParameterValueException("No Dedicated Resource available to release"); } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); - resourceId = resource.getId(); - if (!_dedicatedDao.remove(resourceId)) { - throw new CloudRuntimeException("Failed to delete Resource " + resourceId); - } - if (zoneId != null) { - // remove the domainId set in zone - DataCenterVO dc = _zoneDao.findById(zoneId); - if (dc != null) { - dc.setDomainId(null); - dc.setDomain(null); - if (!_zoneDao.update(zoneId, dc)) { - throw new CloudRuntimeException( - "Failed to release dedicated zone, could not clear domainId. Please contact Cloud Support."); + final DedicatedResourceVO resourceFinal = resource; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + Long resourceId = resourceFinal.getId(); + if (!_dedicatedDao.remove(resourceId)) { + throw new CloudRuntimeException("Failed to delete Resource " + resourceId); + } + if (zoneId != null) { + // remove the domainId set in zone + DataCenterVO dc = _zoneDao.findById(zoneId); + if (dc != null) { + dc.setDomainId(null); + dc.setDomain(null); + if (!_zoneDao.update(zoneId, dc)) { + throw new CloudRuntimeException( + "Failed to release dedicated zone, could not clear domainId. Please contact Cloud Support."); + } + } } } - } - - txn.commit(); + }); // find the group associated and check if there are any more // resources under that group diff --git a/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/module.properties b/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/module.properties new file mode 100644 index 00000000000..6cda90463a3 --- /dev/null +++ b/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=implicit-dedication +parent=planner \ No newline at end of file diff --git a/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml b/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml new file mode 100644 index 00000000000..d14b4502ab9 --- /dev/null +++ b/plugins/deployment-planners/implicit-dedication/resources/META-INF/cloudstack/implicit-dedication/spring-implicit-dedication-context.xml @@ -0,0 +1,25 @@ + + + + + + + + + diff --git a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java index 6eee28d7a94..e73bc729518 100644 --- a/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java +++ b/plugins/deployment-planners/implicit-dedication/src/com/cloud/deploy/ImplicitDedicationPlanner.java @@ -223,7 +223,7 @@ public class ImplicitDedicationPlanner extends FirstFitPlanner implements Deploy private boolean isServiceOfferingUsingPlannerInPreferredMode(long serviceOfferingId) { boolean preferred = false; - Map details = serviceOfferingDetailsDao.findDetails(serviceOfferingId); + Map details = serviceOfferingDetailsDao.listDetailsKeyPairs(serviceOfferingId); if (details != null && !details.isEmpty()) { String preferredAttribute = details.get("ImplicitDedicationMode"); if (preferredAttribute != null && preferredAttribute.equals("Preferred")) { diff --git a/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java index c0ce9d06465..f1fa71c3c56 100644 --- a/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java +++ b/plugins/deployment-planners/implicit-dedication/test/org/apache/cloudstack/implicitplanner/ImplicitPlannerTest.java @@ -405,7 +405,7 @@ public class ImplicitPlannerTest { Map details = new HashMap(); details.put("ImplicitDedicationMode", plannerMode); - when(serviceOfferingDetailsDao.findDetails(offeringId)).thenReturn(details); + when(serviceOfferingDetailsDao.listDetailsKeyPairs(offeringId)).thenReturn(details); // Initialize hosts in clusters HostVO host1 = mock(HostVO.class); diff --git a/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/module.properties b/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/module.properties new file mode 100644 index 00000000000..7a430b28228 --- /dev/null +++ b/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=user-concentrated-pod +parent=planner \ No newline at end of file diff --git a/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/spring-user-concentrated-pod-context.xml b/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/spring-user-concentrated-pod-context.xml new file mode 100644 index 00000000000..e26cb2be57e --- /dev/null +++ b/plugins/deployment-planners/user-concentrated-pod/resources/META-INF/cloudstack/user-concentrated-pod/spring-user-concentrated-pod-context.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java index 1c0c6bef6f2..478c8d7aaed 100644 --- a/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java +++ b/plugins/event-bus/rabbitmq/src/org/apache/cloudstack/mom/rabbitmq/RabbitMQEventBus.java @@ -20,7 +20,9 @@ package org.apache.cloudstack.mom.rabbitmq; import com.rabbitmq.client.*; + import org.apache.cloudstack.framework.events.*; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import com.cloud.utils.Ternary; @@ -28,6 +30,7 @@ import com.cloud.utils.component.ManagerBase; import javax.ejb.Local; import javax.naming.ConfigurationException; + import java.io.IOException; import java.net.ConnectException; import java.util.Map; @@ -493,12 +496,13 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus { } // retry logic to connect back to AMQP server after loss of connection - private class ReconnectionTask implements Runnable { + private class ReconnectionTask extends ManagedContextRunnable { boolean connected = false; Connection connection = null; - public void run() { + @Override + protected void runInContext() { while (!connected) { try { diff --git a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java index 8f7b6d8df8d..e0c2f57d62d 100644 --- a/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java +++ b/plugins/file-systems/netapp/src/com/cloud/netapp/NetappManagerImpl.java @@ -50,7 +50,7 @@ import com.cloud.netapp.dao.PoolDao; import com.cloud.netapp.dao.VolumeDao; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -225,7 +225,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager throw new ResourceInUseException("There are luns on the volume"); } - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); PoolVO pool = _poolDao.findById(volume.getPoolId()); if (pool == null) { @@ -388,7 +388,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager } Long volumeId = null; - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); NetappVolumeVO volume = null; volume = _volumeDao.findVolume(ipAddress, aggName, volName); @@ -624,7 +624,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager String[] result = new String[3]; StringBuilder lunName = new StringBuilder("lun-"); LunVO lun = null; - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); PoolVO pool = _poolDao.findPool(poolName); @@ -802,7 +802,7 @@ public class NetappManagerImpl extends ManagerBase implements NetappManager @DB public void destroyLunOnFiler(String lunName) throws InvalidParameterValueException, ServerException{ - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); LunVO lun = _lunDao.findByName(lunName); diff --git a/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/module.properties b/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/module.properties new file mode 100644 index 00000000000..9a04174ec7f --- /dev/null +++ b/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=host-allocator-random +parent=allocator \ No newline at end of file diff --git a/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml b/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml new file mode 100644 index 00000000000..8df1bdb97cc --- /dev/null +++ b/plugins/host-allocators/random/resources/META-INF/cloudstack/host-allocator-random/spring-host-allocator-random-context.xml @@ -0,0 +1,34 @@ + + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/module.properties b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/module.properties new file mode 100644 index 00000000000..654b0d8b5cb --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=baremetal-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/spring-baremetal-compute-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/spring-baremetal-compute-context.xml new file mode 100644 index 00000000000..cce68051239 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-compute/spring-baremetal-compute-context.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/module.properties b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/module.properties new file mode 100644 index 00000000000..3307c8cfbd3 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=baremetal-discoverer +parent=discoverer \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml new file mode 100644 index 00000000000..8792909ffed --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-discoverer/spring-baremetal-discoverer-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/module.properties b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/module.properties new file mode 100644 index 00000000000..acfe59415de --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=baremetal-network +parent=network \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/spring-baremetal-network-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/spring-baremetal-network-context.xml new file mode 100644 index 00000000000..40d9f505775 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-network/spring-baremetal-network-context.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/module.properties b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/module.properties new file mode 100644 index 00000000000..c6c4e744b75 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=baremetal-planner +parent=planner \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/spring-baremetal-planner-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/spring-baremetal-planner-context.xml new file mode 100644 index 00000000000..8c14c3e2903 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-planner/spring-baremetal-planner-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/module.properties b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/module.properties new file mode 100644 index 00000000000..b4269a853e0 --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=baremetal-storage +parent=storage \ No newline at end of file diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/spring-baremetal-storage-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/spring-baremetal-storage-context.xml new file mode 100644 index 00000000000..e0a9e36ef7d --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/baremetal-storage/spring-baremetal-storage-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml new file mode 100644 index 00000000000..11cc6c89e3f --- /dev/null +++ b/plugins/hypervisors/baremetal/resources/META-INF/cloudstack/core/spring-baremetal-core-context.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java index 8123ee0f6b7..3a6aef7cb96 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalDhcpDaoImpl.java @@ -18,25 +18,20 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.database; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.GenericQueryBuilder; @Component @Local(value=BaremetalDhcpDao.class) -@DB(txn=false) +@DB() public class BaremetalDhcpDaoImpl extends GenericDaoBase implements BaremetalDhcpDao { public BaremetalDhcpDaoImpl() { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java index acd7f136b6d..bede34760fe 100644 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/database/BaremetalPxeDaoImpl.java @@ -18,24 +18,19 @@ // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.database; -import java.util.List; -import java.util.Map; - import javax.ejb.Local; -import javax.naming.ConfigurationException; import org.springframework.stereotype.Component; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.GenericQueryBuilder; @Component @Local(value = {BaremetalPxeDao.class}) -@DB(txn = false) +@DB() public class BaremetalPxeDaoImpl extends GenericDaoBase implements BaremetalPxeDao { } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java index 5cb5a14b53b..4c07dae70ab 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalPingServiceImpl.java @@ -23,7 +23,6 @@ package com.cloud.baremetal.networkservice; import java.net.URI; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -36,7 +35,6 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; -import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.baremetal.IpmISetBootDevCommand; @@ -62,9 +60,8 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ServerResource; import com.cloud.uservm.UserVm; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; @@ -84,9 +81,9 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements @Override public boolean prepare(VirtualMachineProfile profile, NicProfile pxeNic, DeployDestination dest, ReservationContext context) { - SearchCriteriaService sc = SearchCriteria2.create(BaremetalPxeVO.class); - sc.addAnd(sc.getEntity().getDeviceType(), Op.EQ, BaremetalPxeType.PING.toString()); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, dest.getPod().getId()); + QueryBuilder sc = QueryBuilder.create(BaremetalPxeVO.class); + sc.and(sc.entity().getDeviceType(), Op.EQ, BaremetalPxeType.PING.toString()); + sc.and(sc.entity().getPodId(), Op.EQ, dest.getPod().getId()); BaremetalPxeVO pxeVo = sc.find(); if (pxeVo == null) { throw new CloudRuntimeException("No PING PXE server found in pod: " + dest.getPod().getId() + ", you need to add it before starting VM"); @@ -258,15 +255,12 @@ public class BareMetalPingServiceImpl extends BareMetalPxeServiceBase implements } BaremetalPxeVO vo = new BaremetalPxeVO(); - Transaction txn = Transaction.currentTxn(); vo.setHostId(pxeServer.getId()); vo.setNetworkServiceProviderId(ntwkSvcProvider.getId()); vo.setPodId(pod.getId()); vo.setPhysicalNetworkId(pcmd.getPhysicalNetworkId()); vo.setDeviceType(BaremetalPxeType.PING.toString()); - txn.start(); _pxeDao.persist(vo); - txn.commit(); return vo; } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java index a814530b2f7..fa7abd58331 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BareMetalResourceBase.java @@ -517,7 +517,7 @@ public class BareMetalResourceBase extends ManagerBase implements ServerResource count++; } - return success ? new StopAnswer(cmd, "Success", 0, true) : new StopAnswer(cmd, "IPMI power off failed", false); + return success ? new StopAnswer(cmd, "Success", true) : new StopAnswer(cmd, "IPMI power off failed", false); } protected StartAnswer execute(StartCommand cmd) { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java index 787137778cd..29e180deafc 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetaNetworkGuru.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -53,6 +52,8 @@ import com.cloud.network.guru.DirectPodBasedNetworkGuru; import com.cloud.network.guru.NetworkGuru; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachineProfile; @@ -100,18 +101,18 @@ public class BaremetaNetworkGuru extends DirectPodBasedNetworkGuru { } else { // we need to get a new ip address if we try to deploy a vm in a // different pod - IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), oldIp); + final IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), oldIp); if (ipVO != null) { PodVlanMapVO mapVO = _podVlanDao.listPodVlanMapsByVlan(ipVO.getVlanId()); if (mapVO.getPodId() != dest.getPod().getId()) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - // release the old ip here - _ipAddrMgr.markIpAsUnavailable(ipVO.getId()); - _ipAddressDao.unassignIpAddress(ipVO.getId()); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // release the old ip here + _ipAddrMgr.markIpAsUnavailable(ipVO.getId()); + _ipAddressDao.unassignIpAddress(ipVO.getId()); + } + }); nic.setIp4Address(null); getNewIp = true; diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java index 6ab5f6dfd3a..8057cd42f91 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpElement.java @@ -5,19 +5,28 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -// +// // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.networkservice; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + +import javax.ejb.Local; +import javax.inject.Inject; + +import org.apache.log4j.Logger; + import com.cloud.baremetal.database.BaremetalDhcpVO; import com.cloud.dc.DataCenter.NetworkType; import com.cloud.dc.Pod; @@ -39,20 +48,14 @@ import com.cloud.network.element.NetworkElement; import com.cloud.offering.NetworkOffering; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; -import com.cloud.vm.*; +import com.cloud.vm.NicProfile; +import com.cloud.vm.NicVO; +import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine.Type; +import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; -import org.apache.log4j.Logger; - -import javax.ejb.Local; -import javax.inject.Inject; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; @Local(value = NetworkElement.class) public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProvider { @@ -84,8 +87,8 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv private boolean canHandle(DeployDestination dest, TrafficType trafficType, GuestType networkType) { Pod pod = dest.getPod(); if (pod != null && dest.getDataCenter().getNetworkType() == NetworkType.Basic && trafficType == TrafficType.Guest) { - SearchCriteriaService sc = SearchCriteria2.create(BaremetalDhcpVO.class); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, pod.getId()); + QueryBuilder sc = QueryBuilder.create(BaremetalDhcpVO.class); + sc.and(sc.entity().getPodId(), Op.EQ,pod.getId()); return sc.find() != null; } @@ -111,14 +114,11 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv return false; } - Transaction txn = Transaction.currentTxn(); - txn.start(); nic.setMacAddress(host.getPrivateMacAddress()); NicVO vo = _nicDao.findById(nic.getId()); assert vo != null : "Where ths nic " + nic.getId() + " going???"; vo.setMacAddress(nic.getMacAddress()); _nicDao.update(vo.getId(), vo); - txn.commit(); return true; } @@ -159,6 +159,7 @@ public class BaremetalDhcpElement extends AdapterBase implements DhcpServiceProv return true; } + @Override public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { if (vm.getHypervisorType() != HypervisorType.BareMetal || !canHandle(dest, network.getTrafficType(), network.getGuestType())) { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java index 775673a0320..82397f5b31b 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalDhcpManagerImpl.java @@ -5,20 +5,20 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -// +// // Automatically generated by addcopyright.py at 01/29/2013 // Apache License, Version 2.0 (the "License"); you may not use this // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// +// // Automatically generated by addcopyright.py at 04/03/2012 package com.cloud.baremetal.networkservice; @@ -32,9 +32,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; + import org.apache.cloudstack.api.AddBaremetalDhcpCmd; import org.apache.cloudstack.api.ListBaremetalDhcpCmd; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -45,7 +46,6 @@ import com.cloud.baremetal.database.BaremetalDhcpDao; import com.cloud.baremetal.database.BaremetalDhcpVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenterVO; -import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.deploy.DeployDestination; @@ -67,14 +67,11 @@ import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; @@ -280,10 +277,7 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh vo.setHostId(dhcpServer.getId()); vo.setNetworkServiceProviderId(ntwkSvcProvider.getId()); vo.setPhysicalNetworkId(cmd.getPhysicalNetworkId()); - Transaction txn = Transaction.currentTxn(); - txn.start(); _extDhcpDao.persist(vo); - txn.commit(); return vo; } @@ -311,9 +305,9 @@ public class BaremetalDhcpManagerImpl extends ManagerBase implements BaremetalDh return responses; } - SearchCriteriaService sc = SearchCriteria2.create(BaremetalDhcpVO.class); + QueryBuilder sc = QueryBuilder.create(BaremetalDhcpVO.class); if (cmd.getDeviceType() != null) { - sc.addAnd(sc.getEntity().getDeviceType(), Op.EQ, cmd.getDeviceType()); + sc.and(sc.entity().getDeviceType(), Op.EQ, cmd.getDeviceType()); } List vos = sc.list(); diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java index 80a72fbaacf..cb4babf93d7 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalKickStartServiceImpl.java @@ -27,8 +27,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.AddBaremetalKickStartPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; @@ -57,10 +55,8 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.uservm.UserVm; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -87,9 +83,9 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple @Override public boolean prepare(VirtualMachineProfile profile, NicProfile nic, DeployDestination dest, ReservationContext context) { NetworkVO nwVO = _nwDao.findById(nic.getNetworkId()); - SearchCriteriaService sc = SearchCriteria2.create(BaremetalPxeVO.class); - sc.addAnd(sc.getEntity().getDeviceType(), Op.EQ, BaremetalPxeType.KICK_START.toString()); - sc.addAnd(sc.getEntity().getPhysicalNetworkId(), Op.EQ, nwVO.getPhysicalNetworkId()); + QueryBuilder sc = QueryBuilder.create(BaremetalPxeVO.class); + sc.and(sc.entity().getDeviceType(), Op.EQ, BaremetalPxeType.KICK_START.toString()); + sc.and(sc.entity().getPhysicalNetworkId(), Op.EQ, nwVO.getPhysicalNetworkId()); BaremetalPxeVO pxeVo = sc.find(); if (pxeVo == null) { throw new CloudRuntimeException("No kickstart PXE server found in pod: " + dest.getPod().getId() + ", you need to add it before starting VM"); @@ -227,14 +223,11 @@ public class BaremetalKickStartServiceImpl extends BareMetalPxeServiceBase imple } BaremetalPxeVO vo = new BaremetalPxeVO(); - Transaction txn = Transaction.currentTxn(); vo.setHostId(pxeServer.getId()); vo.setNetworkServiceProviderId(ntwkSvcProvider.getId()); vo.setPhysicalNetworkId(kcmd.getPhysicalNetworkId()); vo.setDeviceType(BaremetalPxeType.KICK_START.toString()); - txn.start(); _pxeDao.persist(vo); - txn.commit(); return vo; } diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java index 82094d8ff46..b9dbc776692 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeElement.java @@ -5,16 +5,16 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 -// +// // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. -// +// // Automatically generated by addcopyright.py at 01/29/2013 package com.cloud.baremetal.networkservice; @@ -46,16 +46,13 @@ import com.cloud.network.element.NetworkElement; import com.cloud.offering.NetworkOffering; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.NicVO; import com.cloud.vm.ReservationContext; import com.cloud.vm.VMInstanceVO; -import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; @@ -91,8 +88,8 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { private boolean canHandle(DeployDestination dest, TrafficType trafficType, GuestType networkType) { Pod pod = dest.getPod(); if (pod != null && dest.getDataCenter().getNetworkType() == NetworkType.Basic && trafficType == TrafficType.Guest) { - SearchCriteriaService sc = SearchCriteria2.create(BaremetalPxeVO.class); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, pod.getId()); + QueryBuilder sc = QueryBuilder.create(BaremetalPxeVO.class); + sc.and(sc.entity().getPodId(), Op.EQ, pod.getId()); return sc.find() != null; } @@ -119,14 +116,11 @@ public class BaremetalPxeElement extends AdapterBase implements NetworkElement { VMInstanceVO vo = _vmDao.findById(vm.getId()); if (vo.getLastHostId() == null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); nic.setMacAddress(dest.getHost().getPrivateMacAddress()); NicVO nicVo = _nicDao.findById(nic.getId()); assert vo != null : "Where ths nic " + nic.getId() + " going???"; nicVo.setMacAddress(nic.getMacAddress()); _nicDao.update(nicVo.getId(), nicVo); - txn.commit(); /*This vm is just being created */ if (!_pxeMgr.prepare(vm, nic, dest, context)) { diff --git a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java index 4ce4934e117..ef6ec312d13 100755 --- a/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java +++ b/plugins/hypervisors/baremetal/src/com/cloud/baremetal/networkservice/BaremetalPxeManagerImpl.java @@ -36,9 +36,8 @@ import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddBaremetalKickStartPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxeCmd; import org.apache.cloudstack.api.AddBaremetalPxePingServerCmd; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.api.ListBaremetalPxeServersCmd; -import org.apache.log4j.Logger; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -62,9 +61,8 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.uservm.UserVm; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.NicVO; @@ -220,10 +218,10 @@ public class BaremetalPxeManagerImpl extends ManagerBase implements BaremetalPxe } PhysicalNetworkVO phy = phys.get(0); - SearchCriteriaService sc = SearchCriteria2.create(BaremetalPxeVO.class); + QueryBuilder sc = QueryBuilder.create(BaremetalPxeVO.class); //TODO: handle both kickstart and PING //sc.addAnd(sc.getEntity().getPodId(), Op.EQ, vm.getPodIdToDeployIn()); - sc.addAnd(sc.getEntity().getPhysicalNetworkId(), Op.EQ, phy.getId()); + sc.and(sc.entity().getPhysicalNetworkId(), Op.EQ, phy.getId()); BaremetalPxeVO pxeVo = sc.find(); if (pxeVo == null) { throw new CloudRuntimeException("No PXE server found in pod: " + vm.getPodIdToDeployIn() + ", you need to add it before starting VM"); diff --git a/plugins/hypervisors/kvm/agent-descriptor.xml b/plugins/hypervisors/kvm/agent-descriptor.xml deleted file mode 100644 index 2923c58d141..00000000000 --- a/plugins/hypervisors/kvm/agent-descriptor.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - kvm-agent - - zip - - true - - - - - - - - - ../../../agent/scripts - - 774 - - run.sh - _run.sh - agent.sh - - - - target - - 555 - - cloud-plugin-hypervisor-kvm-*.jar - - - - ../../../scripts - scripts - 555 - - - ../../../agent/conf - conf - 555 - 774 - - agent.properties - log4j-cloud.xml - - - - diff --git a/plugins/hypervisors/kvm/pom.xml b/plugins/hypervisors/kvm/pom.xml index 4c0ec982bdf..e2796218985 100644 --- a/plugins/hypervisors/kvm/pom.xml +++ b/plugins/hypervisors/kvm/pom.xml @@ -51,7 +51,6 @@ net.java.dev.jna jna - provided ${cs.jna.version} @@ -88,52 +87,6 @@ - - maven-assembly-plugin - 2.3 - - kvm-agent - false - - agent-descriptor.xml - - - - - make-agent - package - - single - - - - - - maven-resources-plugin - 2.6 - - - copy-resources - - package - - copy-resources - - - dist - - - target - - kvm-agent.zip - - - - - - - - diff --git a/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/module.properties b/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/module.properties new file mode 100644 index 00000000000..11379727073 --- /dev/null +++ b/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=kvm-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml b/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml new file mode 100644 index 00000000000..ce596f22bbf --- /dev/null +++ b/plugins/hypervisors/kvm/resources/META-INF/cloudstack/kvm-compute/spring-kvm-compute-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java index 4d83d099e78..e173f32bdd5 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java +++ b/plugins/hypervisors/kvm/src/com/cloud/ha/KVMInvestigator.java @@ -58,15 +58,19 @@ public class KVMInvestigator extends AdapterBase implements Investigator { return null; } CheckOnHostCommand cmd = new CheckOnHostCommand(agent); - List neighbors = _resourceMgr.listAllHostsInCluster(agent.getClusterId()); + List neighbors = _resourceMgr.listHostsInClusterByStatus(agent.getClusterId(), Status.Up); for (HostVO neighbor : neighbors) { if (neighbor.getId() == agent.getId() || neighbor.getHypervisorType() != Hypervisor.HypervisorType.KVM) { continue; } - Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); - - return answer.getResult() ? Status.Down : Status.Up; - + try { + Answer answer = _agentMgr.easySend(neighbor.getId(), cmd); + if (answer != null) { + return answer.getResult() ? Status.Down : Status.Up; + } + } catch (Exception e) { + s_logger.debug("Failed to send command to host: " + neighbor.getId()); + } } return null; diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java index 2f795768dbd..84743ec124e 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/BridgeVifDriver.java @@ -23,6 +23,7 @@ import java.io.File; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.net.URI; import javax.naming.ConfigurationException; @@ -45,6 +46,9 @@ public class BridgeVifDriver extends VifDriverBase { private static final Object _vnetBridgeMonitor = new Object(); private String _modifyVlanPath; + private String _modifyVxlanPath; + private String bridgeNameSchema; + @Override public void configure(Map params) throws ConfigurationException { @@ -59,14 +63,20 @@ public class BridgeVifDriver extends VifDriverBase { networkScriptsDir = "scripts/vm/network/vnet"; } - String value = (String)params.get("scripts.timeout"); + bridgeNameSchema = (String) params.get("network.bridge.name.schema"); + + String value = (String) params.get("scripts.timeout"); _timeout = NumbersUtil.parseInt(value, 30 * 60) * 1000; _modifyVlanPath = Script.findScript(networkScriptsDir, "modifyvlan.sh"); if (_modifyVlanPath == null) { throw new ConfigurationException("Unable to find modifyvlan.sh"); } - + _modifyVxlanPath = Script.findScript(networkScriptsDir, "modifyvxlan.sh"); + if (_modifyVxlanPath == null) { + throw new ConfigurationException("Unable to find modifyvxlan.sh"); + } + try { createControlNetwork(); } catch (LibvirtException e) { @@ -84,9 +94,11 @@ public class BridgeVifDriver extends VifDriverBase { LibvirtVMDef.InterfaceDef intf = new LibvirtVMDef.InterfaceDef(); - String vlanId = null; - if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan) { - vlanId = Networks.BroadcastDomainType.getValue(nic.getBroadcastUri()); + String vNetId = null; + String protocol = null; + if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan || nic.getBroadcastType() == Networks.BroadcastDomainType.Vxlan) { + vNetId = Networks.BroadcastDomainType.getValue(nic.getBroadcastUri()); + protocol = Networks.BroadcastDomainType.getSchemeValue(nic.getBroadcastUri()).scheme(); } else if (nic.getBroadcastType() == Networks.BroadcastDomainType.Lswitch) { throw new InternalErrorException("Nicira NVP Logicalswitches are not supported by the BridgeVifDriver"); @@ -94,14 +106,14 @@ public class BridgeVifDriver extends VifDriverBase { String trafficLabel = nic.getName(); if (nic.getType() == Networks.TrafficType.Guest) { Integer networkRateKBps = (nic.getNetworkRateMbps() != null && nic.getNetworkRateMbps().intValue() != -1) ? nic.getNetworkRateMbps().intValue() * 128 : 0; - if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan - && !vlanId.equalsIgnoreCase("untagged")) { - if (trafficLabel != null && !trafficLabel.isEmpty()) { - s_logger.debug("creating a vlan dev and bridge for guest traffic per traffic label " + trafficLabel); - String brName = createVlanBr(vlanId, _pifs.get(trafficLabel)); + if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan && !vNetId.equalsIgnoreCase("untagged") + || nic.getBroadcastType() == Networks.BroadcastDomainType.Vxlan) { + if(trafficLabel != null && !trafficLabel.isEmpty()) { + s_logger.debug("creating a vNet dev and bridge for guest traffic per traffic label " + trafficLabel); + String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType), networkRateKBps); } else { - String brName = createVlanBr(vlanId, _pifs.get("private")); + String brName = createVnetBr(vNetId, "private", protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType), networkRateKBps); } } else { @@ -114,13 +126,13 @@ public class BridgeVifDriver extends VifDriverBase { } else if (nic.getType() == Networks.TrafficType.Public) { Integer networkRateKBps = (nic.getNetworkRateMbps() != null && nic.getNetworkRateMbps().intValue() != -1) ? nic.getNetworkRateMbps().intValue() * 128 : 0; if (nic.getBroadcastType() == Networks.BroadcastDomainType.Vlan - && !vlanId.equalsIgnoreCase("untagged")) { - if (trafficLabel != null && !trafficLabel.isEmpty()) { - s_logger.debug("creating a vlan dev and bridge for public traffic per traffic label " + trafficLabel); - String brName = createVlanBr(vlanId, _pifs.get(trafficLabel)); + && !vNetId.equalsIgnoreCase("untagged")) { + if(trafficLabel != null && !trafficLabel.isEmpty()){ + s_logger.debug("creating a vNet dev and bridge for public traffic per traffic label " + trafficLabel); + String brName = createVnetBr(vNetId, trafficLabel, protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType), networkRateKBps); } else { - String brName = createVlanBr(vlanId, _pifs.get("public")); + String brName = createVnetBr(vNetId, "public", protocol); intf.defBridgeNet(brName, null, nic.getMac(), getGuestNicModel(guestOsType), networkRateKBps); } } else { @@ -142,29 +154,42 @@ public class BridgeVifDriver extends VifDriverBase { } private String setVnetBrName(String pifName, String vnetId) { - String brName = "br" + pifName + "-" + vnetId; - String oldStyleBrName = "cloudVirBr" + vnetId; - - String cmdout = Script.runSimpleBashScript("brctl show | grep " + oldStyleBrName); - if (cmdout != null && cmdout.contains(oldStyleBrName)) { - s_logger.info("Using old style bridge name for vlan " + vnetId + " because existing bridge " + oldStyleBrName + " was found"); - brName = oldStyleBrName; - } - - return brName; + return "br" + pifName + "-"+ vnetId; } - private String createVlanBr(String vlanId, String nic) + private String setVxnetBrName(String pifName, String vnetId) { + return "brvx-" + vnetId; + } + + private String createVnetBr(String vNetId, String pifKey, String protocol) throws InternalErrorException { - String brName = setVnetBrName(nic, vlanId); - createVnet(vlanId, nic, brName); + String nic = _pifs.get(pifKey); + if (nic == null) { + // if not found in bridge map, maybe traffic label refers to pif already? + File pif = new File("/sys/class/net/" + pifKey); + if (pif.isDirectory()){ + nic = pifKey; + } + } + String brName = ""; + if (protocol.equals(Networks.BroadcastDomainType.Vxlan.scheme())) { + brName = setVxnetBrName(nic, vNetId); + } else { + brName = setVnetBrName(nic, vNetId); + } + createVnet(vNetId, nic, brName, protocol); return brName; } - private void createVnet(String vnetId, String pif, String brName) + + private void createVnet(String vnetId, String pif, String brName, String protocol) throws InternalErrorException { synchronized (_vnetBridgeMonitor) { - final Script command = new Script(_modifyVlanPath, _timeout, s_logger); + String script = _modifyVlanPath; + if(protocol.equals(Networks.BroadcastDomainType.Vxlan.scheme())) { + script = _modifyVxlanPath; + } + final Script command = new Script(script, _timeout, s_logger); command.add("-v", vnetId); command.add("-p", pif); command.add("-b", brName); @@ -180,7 +205,7 @@ public class BridgeVifDriver extends VifDriverBase { private void deleteVnetBr(String brName) { synchronized (_vnetBridgeMonitor) { - String cmdout = Script.runSimpleBashScript("ls /sys/class/net/" + brName + "/brif | grep vnet"); + String cmdout = Script.runSimpleBashScript("ls /sys/class/net/" + brName + "/brif | tr '\n' ' '"); if (cmdout != null && cmdout.contains("vnet")) { // Active VM remains on that bridge return; @@ -210,8 +235,16 @@ public class BridgeVifDriver extends VifDriverBase { s_logger.debug("unable to get a vNet ID from name " + brName); return; } - - final Script command = new Script(_modifyVlanPath, _timeout, s_logger); + + String scriptPath = null; + if (cmdout != null && cmdout.contains("vxlan")) { + scriptPath = _modifyVxlanPath; + } else{ + scriptPath = _modifyVlanPath; + } + + + final Script command = new Script(scriptPath, _timeout, s_logger); command.add("-o", "delete"); command.add("-v", vNetId); command.add("-p", pName); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java index 0e4d9eea8ac..1117110e2b8 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/KVMHAMonitor.java @@ -20,7 +20,10 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; + +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; + import com.cloud.utils.script.Script; import org.libvirt.Connect; @@ -68,10 +71,10 @@ public class KVMHAMonitor extends KVMHABase implements Runnable { } } - private class Monitor implements Runnable { + private class Monitor extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { synchronized (_storagePool) { for (String uuid : _storagePool.keySet()) { NfsStoragePool primaryStoragePool = _storagePool.get(uuid); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java index 74f02c0c639..8d3a0e9cdf1 100755 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java @@ -18,7 +18,6 @@ package com.cloud.hypervisor.kvm.resource; import java.io.BufferedReader; import java.io.File; -import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.BufferedOutputStream; @@ -436,7 +435,7 @@ ServerResource { s_logger.info("developer.properties found at " + file.getAbsolutePath()); Properties properties = new Properties(); try { - properties.load(new FileInputStream(file)); + PropertiesUtil.loadFromFile(properties, file); String startMac = (String) properties.get("private.macaddr.start"); if (startMac == null) { @@ -987,6 +986,18 @@ ServerResource { } _pifs.put(bridge, pif); } + + // guest(private) creates bridges on a pif, if private bridge not found try pif direct + // This addresses the unnecessary requirement of someone to create an unused bridge just for traffic label + if (_pifs.get("private") == null) { + s_logger.debug("guest(private) traffic label '" + _guestBridgeName+ "' not found as bridge, looking for physical interface"); + File dev = new File("/sys/class/net/" + _guestBridgeName); + if (dev.exists()) { + s_logger.debug("guest(private) traffic label '" + _guestBridgeName + "' found as a physical device"); + _pifs.put("private", _guestBridgeName); + } + } + s_logger.debug("done looking for pifs, no more bridges"); } @@ -1024,30 +1035,35 @@ ServerResource { } private String matchPifFileInDirectory(String bridgeName){ - File f = new File("/sys/devices/virtual/net/" + bridgeName + "/brif"); + File brif = new File("/sys/devices/virtual/net/" + bridgeName + "/brif"); - if (! f.isDirectory()){ + if (! brif.isDirectory()){ + File pif = new File("/sys/class/net/" + bridgeName); + if (pif.isDirectory()) { + // if bridgeName already refers to a pif, return it as-is + return bridgeName; + } s_logger.debug("failing to get physical interface from bridge " - + bridgeName + ", does " + f.getAbsolutePath() + + bridgeName + ", does " + brif.getAbsolutePath() + "exist?"); return ""; } - File[] interfaces = f.listFiles(); + File[] interfaces = brif.listFiles(); for (int i = 0; i < interfaces.length; i++) { String fname = interfaces[i].getName(); s_logger.debug("matchPifFileInDirectory: file name '"+fname+"'"); if (fname.startsWith("eth") || fname.startsWith("bond") || fname.startsWith("vlan") || fname.startsWith("em") - || fname.matches("^p\\d+p\\d+")) { + || fname.matches("^p\\d+p\\d+.*")) { return fname; } } s_logger.debug("failing to get physical interface from bridge " + bridgeName + ", did not find an eth*, bond*, vlan*, em*, or p*p* in " - + f.getAbsolutePath()); + + brif.getAbsolutePath()); return ""; } @@ -1204,8 +1220,6 @@ ServerResource { return execute((AttachIsoCommand) cmd); } else if (cmd instanceof AttachVolumeCommand) { return execute((AttachVolumeCommand) cmd); - } else if (cmd instanceof StopCommand) { - return execute((StopCommand) cmd); } else if (cmd instanceof CheckConsoleProxyLoadCommand) { return execute((CheckConsoleProxyLoadCommand) cmd); } else if (cmd instanceof WatchConsoleProxyLoadCommand) { @@ -1365,7 +1379,7 @@ ServerResource { secondaryStorageUrl + volumeDestPath); _storagePoolMgr.copyPhysicalDisk(volume, - destVolumeName,secondaryStoragePool); + destVolumeName,secondaryStoragePool, 0); return new CopyVolumeAnswer(cmd, true, null, null, volumeName); } else { volumePath = "/volumes/" + cmd.getVolumeId() + File.separator; @@ -1375,7 +1389,7 @@ ServerResource { KVMPhysicalDisk volume = secondaryStoragePool .getPhysicalDisk(cmd.getVolumePath() + ".qcow2"); _storagePoolMgr.copyPhysicalDisk(volume, volumeName, - primaryPool); + primaryPool, 0); return new CopyVolumeAnswer(cmd, true, null, null, volumeName); } } catch (CloudRuntimeException e) { @@ -1457,19 +1471,18 @@ ServerResource { if (cmd.getTemplateUrl() != null) { if(primaryPool.getType() == StoragePoolType.CLVM) { - vol = templateToPrimaryDownload(cmd.getTemplateUrl(),primaryPool); + vol = templateToPrimaryDownload(cmd.getTemplateUrl(),primaryPool, dskch.getPath()); } else { BaseVol = primaryPool.getPhysicalDisk(cmd.getTemplateUrl()); - vol = _storagePoolMgr.createDiskFromTemplate(BaseVol, UUID - .randomUUID().toString(), primaryPool); + vol = _storagePoolMgr.createDiskFromTemplate(BaseVol, + dskch.getPath(), primaryPool, 0); } if (vol == null) { return new Answer(cmd, false, " Can't create storage volume on storage pool"); } } else { - vol = primaryPool.createPhysicalDisk(UUID.randomUUID() - .toString(), dskch.getSize()); + vol = primaryPool.createPhysicalDisk(dskch.getPath(), dskch.getSize()); } VolumeTO volume = new VolumeTO(cmd.getVolumeId(), dskch.getType(), pool.getType(), pool.getUuid(), pool.getPath(), @@ -1486,7 +1499,7 @@ ServerResource { } // this is much like PrimaryStorageDownloadCommand, but keeping it separate - protected KVMPhysicalDisk templateToPrimaryDownload(String templateUrl, KVMStoragePool primaryPool) { + protected KVMPhysicalDisk templateToPrimaryDownload(String templateUrl, KVMStoragePool primaryPool, String volUuid) { int index = templateUrl.lastIndexOf("/"); String mountpoint = templateUrl.substring(0, index); String templateName = null; @@ -1522,7 +1535,7 @@ ServerResource { /* Copy volume to primary storage */ - KVMPhysicalDisk primaryVol = _storagePoolMgr.copyPhysicalDisk(templateVol, UUID.randomUUID().toString(), primaryPool); + KVMPhysicalDisk primaryVol = _storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, primaryPool, 0); return primaryVol; } catch (CloudRuntimeException e) { s_logger.error("Failed to download template to primary storage",e); @@ -2347,7 +2360,7 @@ ServerResource { primaryUuid); String volUuid = UUID.randomUUID().toString(); KVMPhysicalDisk disk = _storagePoolMgr.copyPhysicalDisk(snapshot, - volUuid, primaryPool); + volUuid, primaryPool, 0); return new CreateVolumeFromSnapshotAnswer(cmd, true, "", disk.getName()); } catch (CloudRuntimeException e) { @@ -2498,7 +2511,7 @@ ServerResource { QemuImgFile destFile = new QemuImgFile(tmpltPath + "/" + cmd.getUniqueName() + ".qcow2"); destFile.setFormat(PhysicalDiskFormat.QCOW2); - QemuImg q = new QemuImg(); + QemuImg q = new QemuImg(0); try { q.convert(srcFile, destFile); } catch (QemuImgException e) { @@ -2601,7 +2614,7 @@ ServerResource { cmd.getPoolUuid()); KVMPhysicalDisk primaryVol = _storagePoolMgr.copyPhysicalDisk( - tmplVol, UUID.randomUUID().toString(), primaryPool); + tmplVol, UUID.randomUUID().toString(), primaryPool, 0); return new PrimaryStorageDownloadAnswer(primaryVol.getName(), primaryVol.getSize()); @@ -2907,6 +2920,8 @@ ServerResource { */ destDomain = dm.migrate(dconn, (1 << 0) | (1 << 3), xmlDesc, vmName, "tcp:" + cmd.getDestinationIp(), _migrateSpeed); + + _storagePoolMgr.disconnectPhysicalDisksViaVmSpec(cmd.getVirtualMachine()); } catch (LibvirtException e) { s_logger.debug("Can't migrate domain: " + e.getMessage()); result = e.getMessage(); @@ -2955,6 +2970,9 @@ ServerResource { } NicTO[] nics = vm.getNics(); + + boolean success = false; + try { Connect conn = LibvirtConnection.getConnectionByVmName(vm.getName()); for (NicTO nic : nics) { @@ -2969,10 +2987,14 @@ ServerResource { } } + _storagePoolMgr.connectPhysicalDisksViaVmSpec(vm); + synchronized (_vms) { _vms.put(vm.getName(), State.Migrating); } + success = true; + return new PrepareForMigrationAnswer(cmd); } catch (LibvirtException e) { return new PrepareForMigrationAnswer(cmd, e.toString()); @@ -2980,6 +3002,10 @@ ServerResource { return new PrepareForMigrationAnswer(cmd, e.toString()); } catch (URISyntaxException e) { return new PrepareForMigrationAnswer(cmd, e.toString()); + } finally { + if (!success) { + _storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vm); + } } } @@ -3243,10 +3269,7 @@ ServerResource { String result = stopVM(conn, vmName); if (result == null) { for (DiskDef disk : disks) { - if (disk.getDeviceType() == DiskDef.deviceType.CDROM - && disk.getDiskPath() != null) { - cleanupDisk(conn, disk); - } + cleanupDisk(disk); } for (InterfaceDef iface: ifaces) { // We don't know which "traffic type" is associated with @@ -3258,7 +3281,7 @@ ServerResource { } state = State.Stopped; - return new StopAnswer(cmd, result, 0, true); + return new StopAnswer(cmd, result, true); } catch (LibvirtException e) { return new StopAnswer(cmd, e.getMessage(), false); } finally { @@ -3357,10 +3380,28 @@ ServerResource { } } + protected String getUuid(String uuid) { + if (uuid == null) { + uuid = UUID.randomUUID().toString(); + } else { + try { + UUID uuid2 = UUID.fromString(uuid); + String uuid3 = uuid2.toString(); + if (!uuid3.equals(uuid)) { + uuid = UUID.randomUUID().toString(); + } + } catch (IllegalArgumentException e) { + uuid = UUID.randomUUID().toString(); + } + } + return uuid; + } protected LibvirtVMDef createVMFromSpec(VirtualMachineTO vmTO) { LibvirtVMDef vm = new LibvirtVMDef(); vm.setDomainName(vmTO.getName()); - vm.setDomUUID(vmTO.getUuid()); + String uuid = vmTO.getUuid(); + uuid = getUuid(uuid); + vm.setDomUUID(uuid); vm.setDomDescription(vmTO.getOs()); GuestDef guest = new GuestDef(); @@ -3501,6 +3542,8 @@ ServerResource { createVbd(conn, vmSpec, vmName, vm); + _storagePoolMgr.connectPhysicalDisksViaVmSpec(vmSpec); + createVifs(vmSpec, vm); s_logger.debug("starting " + vmName + ": " + vm.toString()); @@ -3532,17 +3575,20 @@ ServerResource { // pass cmdline info to system vms if (vmSpec.getType() != VirtualMachine.Type.User) { - if ((_kernelVersion < 2006034) && (conn.getVersion() < 1001000)) { // CLOUDSTACK-2823: try passCmdLine some times if kernel < 2.6.34 and qemu < 1.1.0 on hypervisor (for instance, CentOS 6.4) + if ((conn.getVersion() < 1001000)) { // CLOUDSTACK-2823: try passCmdLine some times if kernel < 2.6.34 and qemu < 1.1.0 on hypervisor (for instance, CentOS 6.4) //wait for 5 minutes at most - for (int count = 0; count < 30; count ++) { - boolean succeed = passCmdLine(vmName, vmSpec.getBootArgs()); - if (succeed) { - break; + String controlIp = null; + for (NicTO nic : nics) { + if (nic.getType() == TrafficType.Control) { + controlIp = nic.getIp(); } - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - s_logger.trace("Ignoring InterruptedException.", e); + } + for (int count = 0; count < 30; count ++) { + passCmdLine(vmName, vmSpec.getBootArgs()); + //check router is up? + boolean result = _virtRouterResource.connect(controlIp, 1, 5000); + if (result) { + break; } } } else { @@ -3578,6 +3624,9 @@ ServerResource { _vms.remove(vmName); } } + if (state != State.Running) { + _storagePoolMgr.disconnectPhysicalDisksViaVmSpec(vmSpec); + } } } @@ -3627,10 +3676,10 @@ ServerResource { physicalDisk = secondaryStorage.getPhysicalDisk(volName); } else if (volume.getType() != Volume.Type.ISO) { PrimaryDataStoreTO store = (PrimaryDataStoreTO)data.getDataStore(); - pool = _storagePoolMgr.getStoragePool( - store.getPoolType(), - store.getUuid()); - physicalDisk = pool.getPhysicalDisk(data.getPath()); + physicalDisk = _storagePoolMgr.getPhysicalDisk( store.getPoolType(), + store.getUuid(), + data.getPath()); + pool = physicalDisk.getPool(); } String volPath = null; @@ -3659,7 +3708,7 @@ ServerResource { disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(), pool.getUuid(), devId, diskBusType, diskProtocol.RBD); - } else if (pool.getType() == StoragePoolType.CLVM) { + } else if (pool.getType() == StoragePoolType.CLVM || physicalDisk.getFormat() == PhysicalDiskFormat.RAW) { disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType); } else { @@ -3703,10 +3752,9 @@ ServerResource { if (volume.getType() == Volume.Type.ROOT) { DataTO data = volume.getData(); PrimaryDataStoreTO store = (PrimaryDataStoreTO)data.getDataStore(); - KVMStoragePool pool = _storagePoolMgr.getStoragePool( - store.getPoolType(), - store.getUuid()); - KVMPhysicalDisk physicalDisk = pool.getPhysicalDisk(data.getPath()); + KVMPhysicalDisk physicalDisk = _storagePoolMgr.getPhysicalDisk( store.getPoolType(), + store.getUuid(), + data.getPath()); FilesystemDef rootFs = new FilesystemDef(physicalDisk.getPath(), "/"); vm.getDevices().addDevice(rootFs); break; @@ -3744,34 +3792,20 @@ ServerResource { return new CheckSshAnswer(cmd); } - public boolean cleanupDisk(Connect conn, DiskDef disk) { - // need to umount secondary storage + public boolean cleanupDisk(DiskDef disk) { String path = disk.getDiskPath(); - String poolUuid = null; - if (path != null) { - String[] token = path.split("/"); - if (token.length > 3) { - poolUuid = token[2]; - } - } - if (poolUuid == null) { - return true; - } - - try { - // we use libvirt as storage adaptor since we passed a libvirt - // connection to cleanupDisk. We pass a storage type that maps - // to libvirt adaptor. - KVMStoragePool pool = _storagePoolMgr.getStoragePool( - StoragePoolType.Filesystem, poolUuid); - if (pool != null) { - _storagePoolMgr.deleteStoragePool(pool.getType(),pool.getUuid()); - } - return true; - } catch (CloudRuntimeException e) { + if (path == null) { + s_logger.debug("Unable to clean up disk with null path (perhaps empty cdrom drive):" + disk); return false; } + + if (path.endsWith("systemvm.iso")) { + // don't need to clean up system vm ISO as it's stored in local + return true; + } + + return _storagePoolMgr.disconnectPhysicalDiskByPath(path); } protected synchronized String attachOrDetachISO(Connect conn, @@ -3801,7 +3835,7 @@ ServerResource { if (result == null && !isAttach) { for (DiskDef disk : disks) { if (disk.getDeviceType() == DiskDef.deviceType.CDROM) { - cleanupDisk(conn, disk); + cleanupDisk(disk); } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java index a1721e1de89..c704a4fa53a 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePool.java @@ -17,19 +17,23 @@ package com.cloud.hypervisor.kvm.storage; import java.util.List; +import java.util.Map; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import com.cloud.storage.Storage.StoragePoolType; public interface KVMStoragePool { - public KVMPhysicalDisk createPhysicalDisk(String name, - PhysicalDiskFormat format, long size); + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, PhysicalDiskFormat format, long size); - public KVMPhysicalDisk createPhysicalDisk(String name, long size); + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, long size); + + public boolean connectPhysicalDisk(String volumeUuid, Map details); public KVMPhysicalDisk getPhysicalDisk(String volumeUuid); - public boolean deletePhysicalDisk(String uuid); + public boolean disconnectPhysicalDisk(String volumeUuid); + + public boolean deletePhysicalDisk(String volumeUuid); public List listPhysicalDisks(); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java index 31d6179e8d9..9ca709c3ca3 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java @@ -18,6 +18,8 @@ package com.cloud.hypervisor.kvm.storage; import java.net.URI; import java.net.URISyntaxException; +import java.util.List; +import java.util.Arrays; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.HashMap; @@ -25,17 +27,52 @@ import java.util.UUID; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import com.cloud.agent.api.to.VirtualMachineTO; +import com.cloud.agent.api.to.DiskTO; + import com.cloud.hypervisor.kvm.resource.KVMHABase; import com.cloud.hypervisor.kvm.resource.KVMHABase.PoolType; import com.cloud.hypervisor.kvm.resource.KVMHAMonitor; import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.StorageLayer; +import com.cloud.storage.Volume; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.log4j.Logger; + +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; public class KVMStoragePoolManager { - private StorageAdaptor _storageAdaptor; + private static final Logger s_logger = Logger + .getLogger(KVMStoragePoolManager.class); + private class StoragePoolInformation { + String name; + String host; + int port; + String path; + String userInfo; + boolean type; + StoragePoolType poolType; + + + public StoragePoolInformation(String name, + String host, + int port, + String path, + String userInfo, + StoragePoolType poolType, + boolean type) { + this.name = name; + this.host = host; + this.port = port; + this.path = path; + this.userInfo = userInfo; + this.type = type; + this.poolType = poolType; + } + } private KVMHAMonitor _haMonitor; - private final Map _storagePools = new ConcurrentHashMap(); + private final Map _storagePools = new ConcurrentHashMap(); private final Map _storageMapper = new HashMap(); private StorageAdaptor getStorageAdaptor(StoragePoolType type) { @@ -51,25 +88,126 @@ public class KVMStoragePoolManager { return adaptor; } - private void addStoragePool(String uuid) { + private void addStoragePool(String uuid, StoragePoolInformation pool) { synchronized (_storagePools) { if (!_storagePools.containsKey(uuid)) { - _storagePools.put(uuid, new Object()); + _storagePools.put(uuid, pool); } } } public KVMStoragePoolManager(StorageLayer storagelayer, KVMHAMonitor monitor) { - this._storageAdaptor = new LibvirtStorageAdaptor(storagelayer); this._haMonitor = monitor; this._storageMapper.put("libvirt", new LibvirtStorageAdaptor(storagelayer)); // add other storage adaptors here - // this._storageMapper.put("newadaptor", new NewStorageAdaptor(storagelayer)); + // this._storageMapper.put("newadaptor", new NewStorageAdaptor(storagelayer)); + this._storageMapper.put(StoragePoolType.Iscsi.toString(), new iScsiAdmStorageAdaptor()); + } + + public boolean connectPhysicalDisk(StoragePoolType type, String poolUuid, String volPath, Map details) { + StorageAdaptor adaptor = getStorageAdaptor(type); + KVMStoragePool pool = adaptor.getStoragePool(poolUuid); + + return adaptor.connectPhysicalDisk(volPath, pool, details); + } + + public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { + boolean result = false; + + final String vmName = vmSpec.getName(); + + List disks = Arrays.asList(vmSpec.getDisks()); + + for (DiskTO disk : disks) { + if (disk.getType() != Volume.Type.ISO) { + VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); + PrimaryDataStoreTO store = (PrimaryDataStoreTO) vol.getDataStore(); + KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid()); + + StorageAdaptor adaptor = getStorageAdaptor(pool.getType()); + + result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails()); + + if (!result) { + s_logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); + + return result; + } + } + } + + return result; + } + + public boolean disconnectPhysicalDiskByPath(String path) { + for (Map.Entry set : _storageMapper.entrySet()) { + StorageAdaptor adaptor = set.getValue(); + + if (adaptor.disconnectPhysicalDiskByPath(path)) { + return true; + } + } + + return false; + } + + public boolean disconnectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec) { + if (vmSpec == null) { + /* CloudStack often tries to stop VMs that shouldn't be running, to ensure a known state, + for example if we lose communication with the agent and the VM is brought up elsewhere. + We may not know about these yet. This might mean that we can't use the vmspec map, because + when we restart the agent we lose all of the info about running VMs. */ + + s_logger.debug("disconnectPhysicalDiskViaVmSpec: Attempted to stop a VM that is not yet in our hash map"); + + return true; + } + + boolean result = true; + + final String vmName = vmSpec.getName(); + + List disks = Arrays.asList(vmSpec.getDisks()); + + for (DiskTO disk : disks) { + if (disk.getType() != Volume.Type.ISO) { + s_logger.debug("Disconnecting disk " + disk.getPath()); + + VolumeObjectTO vol = (VolumeObjectTO) disk.getData(); + PrimaryDataStoreTO store = (PrimaryDataStoreTO) vol.getDataStore(); + + KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid()); + + StorageAdaptor adaptor = getStorageAdaptor(pool.getType()); + + // if a disk fails to disconnect, still try to disconnect remaining + + boolean subResult = adaptor.disconnectPhysicalDisk(vol.getPath(), pool); + + if (!subResult) { + s_logger.error("Failed to disconnect disks via vm spec for vm: " + vmName + " volume:" + vol.toString()); + + result = false; + } + } + } + + return result; } public KVMStoragePool getStoragePool(StoragePoolType type, String uuid) { + StorageAdaptor adaptor = getStorageAdaptor(type); - return adaptor.getStoragePool(uuid); + KVMStoragePool pool = null; + try { + pool = adaptor.getStoragePool(uuid); + } catch(Exception e) { + StoragePoolInformation info = _storagePools.get(uuid); + if (info != null) { + pool = createStoragePool(info.name, info.host, info.port, info.path, info.userInfo, info.poolType, info.type); + } + } + return pool; } public KVMStoragePool getStoragePoolByURI(String uri) { @@ -98,6 +236,39 @@ public class KVMStoragePoolManager { return createStoragePool(uuid, sourceHost, 0, sourcePath, "", protocol, false); } + public KVMPhysicalDisk getPhysicalDisk(StoragePoolType type, String poolUuid, String volName) { + int cnt = 0; + int retries = 10; + KVMPhysicalDisk vol = null; + //harden get volume, try cnt times to get volume, in case volume is created on other host + String errMsg = ""; + while (cnt < retries) { + try { + KVMStoragePool pool = getStoragePool(type, poolUuid); + vol = pool.getPhysicalDisk(volName); + if (vol != null) { + break; + } + } catch (Exception e) { + s_logger.debug("Failed to find volume:" + volName + " due to" + e.toString() + ", retry:" + cnt); + errMsg = e.toString(); + } + + try { + Thread.sleep(30000); + } catch (InterruptedException e) { + } + cnt++; + } + + if (vol == null) { + throw new CloudRuntimeException(errMsg); + } else { + return vol; + } + + } + public KVMStoragePool createStoragePool( String name, String host, int port, String path, String userInfo, StoragePoolType type) { @@ -105,7 +276,8 @@ public class KVMStoragePoolManager { return createStoragePool(name, host, port, path, userInfo, type, true); } - private KVMStoragePool createStoragePool( String name, String host, int port, + //Note: due to bug CLOUDSTACK-4459, createStoragepool can be called in parallel, so need to be synced. + private synchronized KVMStoragePool createStoragePool( String name, String host, int port, String path, String userInfo, StoragePoolType type, boolean primaryStorage) { StorageAdaptor adaptor = getStorageAdaptor(type); @@ -119,10 +291,18 @@ public class KVMStoragePoolManager { PoolType.PrimaryStorage); _haMonitor.addStoragePool(nfspool); } - addStoragePool(pool.getUuid()); + StoragePoolInformation info = new StoragePoolInformation(name, host, port, path, userInfo, type, primaryStorage); + addStoragePool(pool.getUuid(), info); return pool; } + public boolean disconnectPhysicalDisk(StoragePoolType type, String poolUuid, String volPath) { + StorageAdaptor adaptor = getStorageAdaptor(type); + KVMStoragePool pool = adaptor.getStoragePool(poolUuid); + + return adaptor.disconnectPhysicalDisk(volPath, pool); + } + public boolean deleteStoragePool(StoragePoolType type, String uuid) { StorageAdaptor adaptor = getStorageAdaptor(type); _haMonitor.removeStoragePool(uuid); @@ -132,25 +312,25 @@ public class KVMStoragePoolManager { } public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, - KVMStoragePool destPool) { + KVMStoragePool destPool, int timeout) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); // LibvirtStorageAdaptor-specific statement if (destPool.getType() == StoragePoolType.RBD) { return adaptor.createDiskFromTemplate(template, name, - PhysicalDiskFormat.RAW, template.getSize(), destPool); + PhysicalDiskFormat.RAW, template.getSize(), destPool, timeout); } else if (destPool.getType() == StoragePoolType.CLVM) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.RAW, template.getSize(), - destPool); + destPool, timeout); } else if (template.getFormat() == PhysicalDiskFormat.DIR) { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.DIR, - template.getSize(), destPool); + template.getSize(), destPool, timeout); } else { return adaptor.createDiskFromTemplate(template, name, PhysicalDiskFormat.QCOW2, - template.getSize(), destPool); + template.getSize(), destPool, timeout); } } @@ -163,9 +343,9 @@ public class KVMStoragePoolManager { } public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, - KVMStoragePool destPool) { + KVMStoragePool destPool, int timeout) { StorageAdaptor adaptor = getStorageAdaptor(destPool.getType()); - return adaptor.copyPhysicalDisk(disk, name, destPool); + return adaptor.copyPhysicalDisk(disk, name, destPool, timeout); } public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java index 3cca4fd087b..8b8cd9ed93d 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java @@ -18,10 +18,10 @@ */ package com.cloud.hypervisor.kvm.storage; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileNotFoundException; import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; import java.io.IOException; import java.net.URISyntaxException; import java.text.DateFormat; @@ -35,11 +35,6 @@ import java.util.UUID; import javax.naming.ConfigurationException; -import com.cloud.agent.api.storage.CopyVolumeAnswer; -import com.cloud.agent.api.to.DataObjectType; -import com.cloud.agent.api.to.S3TO; -import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.utils.S3Utils; import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -49,6 +44,8 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; @@ -57,20 +54,28 @@ import org.apache.cloudstack.utils.qemu.QemuImg; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import org.apache.cloudstack.utils.qemu.QemuImgException; import org.apache.cloudstack.utils.qemu.QemuImgFile; -import org.apache.log4j.Logger; import org.apache.commons.io.FileUtils; +import org.apache.log4j.Logger; import org.libvirt.Connect; import org.libvirt.Domain; import org.libvirt.DomainInfo; import org.libvirt.DomainSnapshot; import org.libvirt.LibvirtException; +import com.ceph.rados.IoCTX; +import com.ceph.rados.Rados; +import com.ceph.rados.RadosException; +import com.ceph.rbd.Rbd; +import com.ceph.rbd.RbdException; +import com.ceph.rbd.RbdImage; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; +import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.api.to.S3TO; import com.cloud.exception.InternalErrorException; import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource; import com.cloud.hypervisor.kvm.resource.LibvirtConnection; @@ -87,16 +92,10 @@ import com.cloud.storage.template.Processor.FormatInfo; import com.cloud.storage.template.QCOW2Processor; import com.cloud.storage.template.TemplateLocation; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.S3Utils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; -import com.ceph.rados.Rados; -import com.ceph.rados.RadosException; -import com.ceph.rados.IoCTX; -import com.ceph.rbd.Rbd; -import com.ceph.rbd.RbdImage; -import com.ceph.rbd.RbdException; - import static com.cloud.utils.S3Utils.putFile; public class KVMStorageProcessor implements StorageProcessor { @@ -147,8 +146,7 @@ public class KVMStorageProcessor implements StorageProcessor { DataTO destData = cmd.getDestTO(); TemplateObjectTO template = (TemplateObjectTO) srcData; DataStoreTO imageStore = template.getDataStore(); - TemplateObjectTO volume = (TemplateObjectTO) destData; - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) volume.getDataStore(); + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO) destData.getDataStore(); if (!(imageStore instanceof NfsTO)) { return new CopyCmdAnswer("unsupported protocol"); @@ -194,22 +192,46 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), - primaryPool); + KVMPhysicalDisk primaryVol = null; + if (destData instanceof VolumeObjectTO) { + VolumeObjectTO volume = (VolumeObjectTO) destData; + primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, volume.getUuid(), + primaryPool, cmd.getWaitInMillSeconds()); + } else if (destData instanceof TemplateObjectTO) { + TemplateObjectTO destTempl = (TemplateObjectTO) destData; + primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, destTempl.getUuid(), + primaryPool, cmd.getWaitInMillSeconds()); + } else { + primaryVol = storagePoolMgr.copyPhysicalDisk(tmplVol, UUID.randomUUID().toString(), + primaryPool, cmd.getWaitInMillSeconds()); + } - TemplateObjectTO newTemplate = new TemplateObjectTO(); - newTemplate.setPath(primaryVol.getName()); + DataTO data = null; /** * Force the ImageFormat for RBD templates to RAW * */ - if (primaryPool.getType() == StoragePoolType.RBD) { - newTemplate.setFormat(ImageFormat.RAW); - } else { - newTemplate.setFormat(ImageFormat.QCOW2); + if (destData.getObjectType() == DataObjectType.TEMPLATE) { + TemplateObjectTO newTemplate = new TemplateObjectTO(); + newTemplate.setPath(primaryVol.getName()); + if (primaryPool.getType() == StoragePoolType.RBD) { + newTemplate.setFormat(ImageFormat.RAW); + } else { + newTemplate.setFormat(ImageFormat.QCOW2); + } + data = newTemplate; + } else if (destData.getObjectType() == DataObjectType.VOLUME) { + VolumeObjectTO volumeObjectTO = new VolumeObjectTO(); + volumeObjectTO.setPath(primaryVol.getName()); + if (primaryVol.getFormat() == PhysicalDiskFormat.RAW) + volumeObjectTO.setFormat(ImageFormat.RAW); + else if (primaryVol.getFormat() == PhysicalDiskFormat.QCOW2) { + volumeObjectTO.setFormat(ImageFormat.QCOW2); + } + data = volumeObjectTO; } - return new CopyCmdAnswer(newTemplate); + return new CopyCmdAnswer(data); } catch (CloudRuntimeException e) { return new CopyCmdAnswer(e.toString()); } finally { @@ -219,8 +241,8 @@ public class KVMStorageProcessor implements StorageProcessor { } } - // this is much like PrimaryStorageDownloadCommand, but keeping it separate - private KVMPhysicalDisk templateToPrimaryDownload(String templateUrl, KVMStoragePool primaryPool) { + // this is much like PrimaryStorageDownloadCommand, but keeping it separate. copies template direct to root disk + private KVMPhysicalDisk templateToPrimaryDownload(String templateUrl, KVMStoragePool primaryPool, String volUuid, int timeout) { int index = templateUrl.lastIndexOf("/"); String mountpoint = templateUrl.substring(0, index); String templateName = null; @@ -256,8 +278,8 @@ public class KVMStorageProcessor implements StorageProcessor { /* Copy volume to primary storage */ - KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, UUID.randomUUID().toString(), - primaryPool); + KVMPhysicalDisk primaryVol = storagePoolMgr.copyPhysicalDisk(templateVol, volUuid, + primaryPool, timeout); return primaryVol; } catch (CloudRuntimeException e) { s_logger.error("Failed to download template to primary storage", e); @@ -287,10 +309,15 @@ public class KVMStorageProcessor implements StorageProcessor { String templatePath = template.getPath(); if (primaryPool.getType() == StoragePoolType.CLVM) { - vol = templateToPrimaryDownload(templatePath, primaryPool); + templatePath = ((NfsTO)imageStore).getUrl() + File.separator + templatePath; + vol = templateToPrimaryDownload(templatePath, primaryPool, volume.getUuid(), cmd.getWaitInMillSeconds()); } else { - BaseVol = primaryPool.getPhysicalDisk(templatePath); - vol = storagePoolMgr.createDiskFromTemplate(BaseVol, UUID.randomUUID().toString(), primaryPool); + if (templatePath.contains("/mnt")) { + //upgrade issue, if the path contains path, need to extract the volume uuid from path + templatePath = templatePath.substring(templatePath.lastIndexOf(File.separator) + 1); + } + BaseVol = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), templatePath); + vol = storagePoolMgr.createDiskFromTemplate(BaseVol, volume.getUuid(), BaseVol.getPool(), cmd.getWaitInMillSeconds()); } if (vol == null) { return new CopyCmdAnswer(" Can't create storage volume on storage pool"); @@ -353,7 +380,7 @@ public class KVMStorageProcessor implements StorageProcessor { String srcVolumeName = srcVolumePath.substring(index + 1); secondaryStoragePool = storagePoolMgr.getStoragePoolByURI( secondaryStorageUrl + File.separator + volumeDir - ); + ); if (!srcVolumeName.endsWith(".qcow2") && srcFormat == ImageFormat.QCOW2) { srcVolumeName = srcVolumeName + ".qcow2"; } @@ -361,7 +388,7 @@ public class KVMStorageProcessor implements StorageProcessor { .getPhysicalDisk(srcVolumeName); volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); KVMPhysicalDisk newDisk = storagePoolMgr.copyPhysicalDisk(volume, volumeName, - primaryPool); + primaryPool, cmd.getWaitInMillSeconds()); VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setFormat(ImageFormat.valueOf(newDisk.getFormat().toString().toUpperCase())); newVol.setPath(volumeName); @@ -394,28 +421,14 @@ public class KVMStorageProcessor implements StorageProcessor { String destVolumePath = destData.getPath(); String secondaryStorageUrl = nfsStore.getUrl(); KVMStoragePool secondaryStoragePool = null; - KVMStoragePool primaryPool = null; - try { - try { - primaryPool = storagePoolMgr.getStoragePool( - primaryStore.getPoolType(), - primaryStore.getUuid()); - } catch (CloudRuntimeException e) { - if (e.getMessage().contains("not found")) { - primaryPool = storagePoolMgr.createStoragePool(primaryStore.getUuid(), - primaryStore.getHost(), primaryStore.getPort(), - primaryStore.getPath(), null, - primaryStore.getPoolType()); - } else { - return new CopyCmdAnswer(e.getMessage()); - } - } + try { String volumeName = UUID.randomUUID().toString(); String destVolumeName = volumeName + "." + destFormat.getFileExtension(); - KVMPhysicalDisk volume = primaryPool.getPhysicalDisk(srcVolumePath); + KVMPhysicalDisk volume = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), srcVolumePath); volume.setFormat(PhysicalDiskFormat.valueOf(srcFormat.toString())); + secondaryStoragePool = storagePoolMgr.getStoragePoolByURI( secondaryStorageUrl); secondaryStoragePool.createFolder(destVolumePath); @@ -423,7 +436,7 @@ public class KVMStorageProcessor implements StorageProcessor { secondaryStoragePool = storagePoolMgr.getStoragePoolByURI( secondaryStorageUrl + File.separator + destVolumePath); storagePoolMgr.copyPhysicalDisk(volume, - destVolumeName,secondaryStoragePool); + destVolumeName,secondaryStoragePool, cmd.getWaitInMillSeconds()); VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(destVolumePath + File.separator + destVolumeName); newVol.setFormat(destFormat); @@ -441,7 +454,7 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer createTemplateFromVolume(CopyCommand cmd) { DataTO srcData = cmd.getSrcTO(); DataTO destData = cmd.getDestTO(); - int wait = cmd.getWait(); + int wait = cmd.getWaitInMillSeconds(); TemplateObjectTO template = (TemplateObjectTO) destData; DataStoreTO imageStore = template.getDataStore(); VolumeObjectTO volume = (VolumeObjectTO) srcData; @@ -459,24 +472,15 @@ public class KVMStorageProcessor implements StorageProcessor { secondaryStorage = storagePoolMgr.getStoragePoolByURI(nfsImageStore.getUrl()); - try { - primary = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - } catch (CloudRuntimeException e) { - if (e.getMessage().contains("not found")) { - primary = storagePoolMgr.createStoragePool(primaryStore.getUuid(), primaryStore.getHost(), - primaryStore.getPort(), primaryStore.getPath(), null, primaryStore.getPoolType()); - } else { - return new CopyCmdAnswer(e.getMessage()); - } - } + primary = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - KVMPhysicalDisk disk = primary.getPhysicalDisk(volume.getPath()); + KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), volume.getPath()); String tmpltPath = secondaryStorage.getLocalPath() + File.separator + templateFolder; this.storageLayer.mkdirs(tmpltPath); String templateName = UUID.randomUUID().toString(); if (primary.getType() != StoragePoolType.RBD) { - Script command = new Script(_createTmplPath, wait * 1000, s_logger); + Script command = new Script(_createTmplPath, wait, s_logger); command.add("-f", disk.getPath()); command.add("-t", tmpltPath); command.add("-n", templateName + ".qcow2"); @@ -497,7 +501,7 @@ public class KVMStorageProcessor implements StorageProcessor { QemuImgFile destFile = new QemuImgFile(tmpltPath + "/" + templateName + ".qcow2"); destFile.setFormat(PhysicalDiskFormat.QCOW2); - QemuImg q = new QemuImg(); + QemuImg q = new QemuImg(cmd.getWaitInMillSeconds()); try { q.convert(srcFile, destFile); } catch (QemuImgException e) { @@ -625,7 +629,7 @@ public class KVMStorageProcessor implements StorageProcessor { SnapshotObjectTO snapshotOnCacheStore = (SnapshotObjectTO)answer.getNewData(); snapshotOnCacheStore.setDataStore(cacheStore); ((SnapshotObjectTO) destData).setDataStore(imageStore); - CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWait(), cmd.executeInSequence()); + CopyCommand newCpyCmd = new CopyCommand(snapshotOnCacheStore, destData, cmd.getWaitInMillSeconds(), cmd.executeInSequence()); return copyToObjectStore(newCpyCmd); } @Override @@ -661,9 +665,9 @@ public class KVMStorageProcessor implements StorageProcessor { snapshotRelPath = destSnapshot.getPath(); snapshotDestPath = ssPmountPath + File.separator + snapshotRelPath; - KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), - primaryStore.getUuid()); - KVMPhysicalDisk snapshotDisk = primaryPool.getPhysicalDisk(volumePath); + KVMPhysicalDisk snapshotDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), + primaryStore.getUuid(), volumePath); + KVMStoragePool primaryPool = snapshotDisk.getPool(); /** * RBD snapshots can't be copied using qemu-img, so we have to use @@ -729,7 +733,7 @@ public class KVMStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(e.toString()); } } else { - Script command = new Script(_manageSnapshotPath, cmd.getWait() * 1000, s_logger); + Script command = new Script(_manageSnapshotPath, cmd.getWaitInMillSeconds(), s_logger); command.add("-b", snapshotDisk.getPath()); command.add("-n", snapshotName); command.add("-p", snapshotDestPath); @@ -798,6 +802,7 @@ public class KVMStorageProcessor implements StorageProcessor { } } + protected synchronized String attachOrDetachISO(Connect conn, String vmName, String isoPath, boolean isAttach) throws LibvirtException, URISyntaxException, InternalErrorException { String isoXml = null; @@ -823,7 +828,7 @@ public class KVMStorageProcessor implements StorageProcessor { if (result == null && !isAttach) { for (DiskDef disk : disks) { if (disk.getDeviceType() == DiskDef.deviceType.CDROM) { - this.resource.cleanupDisk(conn, disk); + this.resource.cleanupDisk(disk); } } @@ -967,9 +972,11 @@ public class KVMStorageProcessor implements StorageProcessor { String vmName = cmd.getVmName(); try { Connect conn = LibvirtConnection.getConnectionByVmName(vmName); - KVMStoragePool primary = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - KVMPhysicalDisk phyDisk = primary.getPhysicalDisk(vol.getPath()); + storagePoolMgr.connectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath(), disk.getDetails()); + + KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); + attachOrDetachDisk(conn, true, vmName, phyDisk, disk.getDiskSeq().intValue()); return new AttachAnswer(disk); @@ -990,11 +997,13 @@ public class KVMStorageProcessor implements StorageProcessor { String vmName = cmd.getVmName(); try { Connect conn = LibvirtConnection.getConnectionByVmName(vmName); - KVMStoragePool primary = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - KVMPhysicalDisk phyDisk = primary.getPhysicalDisk(vol.getPath()); + KVMPhysicalDisk phyDisk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); + attachOrDetachDisk(conn, false, vmName, phyDisk, disk.getDiskSeq().intValue()); + storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath()); + return new DettachAnswer(disk); } catch (LibvirtException e) { s_logger.debug("Failed to attach volume: " + vol.getPath() + ", due to " + e.toString()); @@ -1017,7 +1026,7 @@ public class KVMStorageProcessor implements StorageProcessor { primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); disksize = volume.getSize(); - vol = primaryPool.createPhysicalDisk(UUID.randomUUID().toString(), disksize); + vol = primaryPool.createPhysicalDisk(volume.getUuid(), disksize); VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(vol.getName()); @@ -1064,7 +1073,8 @@ public class KVMStorageProcessor implements StorageProcessor { KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(primaryStore.getPoolType(), primaryStore.getUuid()); - KVMPhysicalDisk disk = primaryPool.getPhysicalDisk(volume.getPath()); + KVMPhysicalDisk disk = storagePoolMgr.getPhysicalDisk(primaryStore.getPoolType(), + primaryStore.getUuid(), volume.getPath()); if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING && !primaryPool.isExternalSnapshot()) { String vmUuid = vm.getUUIDString(); Object[] args = new Object[] { snapshotName, vmUuid }; @@ -1191,7 +1201,7 @@ public class KVMStorageProcessor implements StorageProcessor { String primaryUuid = pool.getUuid(); KVMStoragePool primaryPool = storagePoolMgr.getStoragePool(pool.getPoolType(), primaryUuid); String volUuid = UUID.randomUUID().toString(); - KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool); + KVMPhysicalDisk disk = storagePoolMgr.copyPhysicalDisk(snapshotDisk, volUuid, primaryPool, cmd.getWaitInMillSeconds()); VolumeObjectTO newVol = new VolumeObjectTO(); newVol.setPath(disk.getName()); newVol.setSize(disk.getVirtualSize()); @@ -1213,4 +1223,14 @@ public class KVMStorageProcessor implements StorageProcessor { public Answer deleteSnapshot(DeleteCommand cmd) { return new Answer(cmd); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } } diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java index 5760725cbe8..8ef855e3df6 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStorageAdaptor.java @@ -101,10 +101,15 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { try { vol = pool.storageVolLookupByName(volName); } catch (LibvirtException e) { - + s_logger.debug("Can't find volume: " + e.toString()); } if (vol == null) { - storagePoolRefresh(pool); + try { + refreshPool(pool); + } catch (LibvirtException e) { + s_logger.debug("failed to refresh pool: " + e.toString()); + } + try { vol = pool.storageVolLookupByName(volName); } catch (LibvirtException e) { @@ -119,6 +124,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { LibvirtStorageVolumeDef volDef = new LibvirtStorageVolumeDef(UUID .randomUUID().toString(), size, format, null, null); s_logger.debug(volDef.toString()); + return pool.storageVolCreateXML(volDef.toString(), 0); } @@ -128,7 +134,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { refreshPool(pool); } } catch (LibvirtException e) { - + s_logger.debug("refresh storage pool failed: " + e.toString()); } } @@ -397,7 +403,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { return pool; } catch (LibvirtException e) { - throw new CloudRuntimeException(e.toString()); + s_logger.debug("can't get storage pool",e); + throw new CloudRuntimeException(e.toString(), e); } } @@ -437,6 +444,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { } return disk; } catch (LibvirtException e) { + s_logger.debug("Failed to get physical disk:", e); throw new CloudRuntimeException(e.toString()); } @@ -698,6 +706,53 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { return disk; } + @Override + public boolean connectPhysicalDisk(String name, KVMStoragePool pool, Map details) { + // this is for managed storage that needs to prep disks prior to use + return true; + } + + @Override + public boolean disconnectPhysicalDisk(String uuid, KVMStoragePool pool) { + // this is for managed storage that needs to cleanup disks after use + return true; + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + // we've only ever cleaned up ISOs that are NFS mounted + + String poolUuid = null; + + if (localPath != null && localPath.startsWith(_mountPoint)) { + String[] token = localPath.split("/"); + + if (token.length > 3) { + poolUuid = token[2]; + } + } else { + return false; + } + + if (poolUuid == null) { + return false; + } + + try { + Connect conn = LibvirtConnection.getConnection(); + + StoragePool pool = conn.storagePoolLookupByUUIDString(poolUuid); + + deleteStoragePool(poolUuid); + + return true; + } catch (LibvirtException ex) { + return false; + } catch (CloudRuntimeException ex) { + return false; + } + } + @Override public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool) { @@ -758,9 +813,9 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { */ @Override public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, - String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool) { + String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool, int timeout) { - String newUuid = UUID.randomUUID().toString(); + String newUuid = name; KVMStoragePool srcPool = template.getPool(); KVMPhysicalDisk disk = null; @@ -775,20 +830,20 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { if (destPool.getType() != StoragePoolType.RBD) { disk = destPool.createPhysicalDisk(newUuid, format, template.getVirtualSize()); if (template.getFormat() == PhysicalDiskFormat.TAR) { - Script.runSimpleBashScript("tar -x -f " + template.getPath() + " -C " + disk.getPath()); + Script.runSimpleBashScript("tar -x -f " + template.getPath() + " -C " + disk.getPath(), timeout); } else if (template.getFormat() == PhysicalDiskFormat.DIR) { Script.runSimpleBashScript("mkdir -p " + disk.getPath()); Script.runSimpleBashScript("chmod 755 " + disk.getPath()); - Script.runSimpleBashScript("cp -p -r " + template.getPath() + "/* " + disk.getPath()); + Script.runSimpleBashScript("cp -p -r " + template.getPath() + "/* " + disk.getPath(), timeout); } else if (format == PhysicalDiskFormat.QCOW2) { QemuImgFile backingFile = new QemuImgFile(template.getPath(), template.getFormat()); QemuImgFile destFile = new QemuImgFile(disk.getPath()); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(timeout); qemu.create(destFile, backingFile); } else if (format == PhysicalDiskFormat.RAW) { QemuImgFile sourceFile = new QemuImgFile(template.getPath(), template.getFormat()); QemuImgFile destFile = new QemuImgFile(disk.getPath(), PhysicalDiskFormat.RAW); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(timeout); qemu.convert(sourceFile, destFile); } } else { @@ -798,7 +853,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { disk.setSize(template.getVirtualSize()); disk.setVirtualSize(disk.getSize()); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(timeout); QemuImgFile srcFile; QemuImgFile destFile = new QemuImgFile(KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(), destPool.getSourcePort(), @@ -952,7 +1007,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { */ @Override public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, - KVMStoragePool destPool) { + KVMStoragePool destPool, int timeout) { /** With RBD you can't run qemu-img convert with an existing RBD image as destination @@ -991,24 +1046,27 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { String destPath = newDisk.getPath(); PhysicalDiskFormat destFormat = newDisk.getFormat(); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(timeout); QemuImgFile srcFile = null; QemuImgFile destFile = null; if ((srcPool.getType() != StoragePoolType.RBD) && (destPool.getType() != StoragePoolType.RBD)) { if (sourceFormat == PhysicalDiskFormat.TAR) { - Script.runSimpleBashScript("tar -x -f " + sourcePath + " -C " + destPath); + Script.runSimpleBashScript("tar -x -f " + sourcePath + " -C " + destPath, timeout); } else if (sourceFormat == PhysicalDiskFormat.DIR) { Script.runSimpleBashScript("mkdir -p " + destPath); Script.runSimpleBashScript("chmod 755 " + destPath); - Script.runSimpleBashScript("cp -p -r " + sourcePath + "/* " + destPath); + Script.runSimpleBashScript("cp -p -r " + sourcePath + "/* " + destPath, timeout); } else { srcFile = new QemuImgFile(sourcePath, sourceFormat); try { Map info = qemu.info(srcFile); String backingFile = info.get(new String("backing_file")); if (sourceFormat.equals(destFormat) && backingFile == null) { - Script.runSimpleBashScript("cp -f " + sourcePath + " " + destPath); + String result = Script.runSimpleBashScript("cp -f " + sourcePath + " " + destPath, timeout); + if (result != null) { + throw new CloudRuntimeException("Failed to create disk: " + result); + } } else { destFile = new QemuImgFile(destPath, destFormat); try { @@ -1169,45 +1227,12 @@ public class LibvirtStorageAdaptor implements StorageAdaptor { // However, we also need to fix the issues in CloudStack source code. // A file lock is used to prevent deleting a volume from a KVM storage pool when refresh it. private void refreshPool(StoragePool pool) throws LibvirtException { - Connect conn = LibvirtConnection.getConnection(); - LibvirtStoragePoolDef spd = getStoragePoolDef(conn, pool); - if ((! spd.getPoolType().equals(LibvirtStoragePoolDef.poolType.NETFS)) - && (! spd.getPoolType().equals(LibvirtStoragePoolDef.poolType.DIR))) { - pool.refresh(0); - return; - } - String lockFile = spd.getTargetPath() + File.separator + _lockfile; - s_logger.debug("Attempting to lock pool " + pool.getName() + " with file " + lockFile); - if (lock(lockFile, ACQUIRE_GLOBAL_FILELOCK_TIMEOUT_FOR_KVM)) { - try { - pool.refresh(0); - } finally { - s_logger.debug("Releasing the lock on pool " + pool.getName() + " with file " + lockFile); - unlock(lockFile); - } - } else { - throw new CloudRuntimeException("Can not get file lock to refresh the pool " + pool.getName()); - } + pool.refresh(0); + return; } private void deleteVol(LibvirtStoragePool pool, StorageVol vol) throws LibvirtException { - if ((! pool.getType().equals(StoragePoolType.NetworkFilesystem)) - && (! pool.getType().equals(StoragePoolType.Filesystem))) { - vol.delete(0); - return; - } - String lockFile = pool.getLocalPath() + File.separator + _lockfile; - s_logger.debug("Attempting to lock pool " + pool.getName() + " with file " + lockFile); - if (lock(lockFile, ACQUIRE_GLOBAL_FILELOCK_TIMEOUT_FOR_KVM)) { - try { - vol.delete(0); - } finally { - s_logger.debug("Releasing the lock on pool " + pool.getName() + " with file " + lockFile); - unlock(lockFile); - } - } else { - throw new CloudRuntimeException("Can not get file lock to delete the volume " + vol.getName()); - } + vol.delete(0); } private boolean lock(String path, int wait) { diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java index 2ce517504d6..df0af5f0530 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/LibvirtStoragePool.java @@ -16,14 +16,20 @@ // under the License. package com.cloud.hypervisor.kvm.storage; +import java.io.File; import java.util.List; +import java.util.Map; +import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; +import org.apache.log4j.Logger; import org.libvirt.StoragePool; import com.cloud.storage.Storage.StoragePoolType; public class LibvirtStoragePool implements KVMStoragePool { + private static final Logger s_logger = Logger + .getLogger(LibvirtStoragePool.class); protected String uuid; protected String uri; protected long capacity; @@ -120,7 +126,42 @@ public class LibvirtStoragePool implements KVMStoragePool { @Override public KVMPhysicalDisk getPhysicalDisk(String volumeUuid) { - return this._storageAdaptor.getPhysicalDisk(volumeUuid, this); + KVMPhysicalDisk disk = null; + try { + disk = this._storageAdaptor.getPhysicalDisk(volumeUuid, this); + } catch (CloudRuntimeException e) { + if ((this.getStoragePoolType() != StoragePoolType.NetworkFilesystem) && + (this.getStoragePoolType() != StoragePoolType.Filesystem)) { + throw e; + } + } + + if (disk != null) { + return disk; + } + s_logger.debug("find volume bypass libvirt"); + //For network file system or file system, try to use java file to find the volume, instead of through libvirt. BUG:CLOUDSTACK-4459 + String localPoolPath = this.getLocalPath(); + File f = new File(localPoolPath + File.separator + volumeUuid); + if (!f.exists()) { + s_logger.debug("volume: " + volumeUuid + " not exist on storage pool"); + throw new CloudRuntimeException("Can't find volume:" + volumeUuid); + } + disk = new KVMPhysicalDisk(f.getPath(), volumeUuid, this); + disk.setFormat(PhysicalDiskFormat.QCOW2); + disk.setSize(f.length()); + disk.setVirtualSize(f.length()); + return disk; + } + + @Override + public boolean connectPhysicalDisk(String name, Map details) { + return true; + } + + @Override + public boolean disconnectPhysicalDisk(String uuid) { + return true; } @Override diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java index 4956d8d4717..c5ff4218cfc 100644 --- a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/StorageAdaptor.java @@ -17,6 +17,7 @@ package com.cloud.hypervisor.kvm.storage; import java.util.List; +import java.util.Map; import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; import com.cloud.storage.Storage.StoragePoolType; @@ -25,6 +26,8 @@ public interface StorageAdaptor { public KVMStoragePool getStoragePool(String uuid); + // given disk path (per database) and pool, create new KVMPhysicalDisk, populate + // it with info from local disk, and return it public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool); @@ -36,11 +39,21 @@ public interface StorageAdaptor { public KVMPhysicalDisk createPhysicalDisk(String name, KVMStoragePool pool, PhysicalDiskFormat format, long size); + // given disk path (per database) and pool, prepare disk on host + public boolean connectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details); + + // given disk path (per database) and pool, clean up disk on host + public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool pool); + + // given local path to file/device (per Libvirt XML), 1) check that device is + // handled by your adaptor, return false if not. 2) clean up device, return true + public boolean disconnectPhysicalDiskByPath(String localPath); + public boolean deletePhysicalDisk(String uuid, KVMStoragePool pool); public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, - KVMStoragePool destPool); + KVMStoragePool destPool, int timeout); public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, long size, @@ -50,7 +63,7 @@ public interface StorageAdaptor { KVMStoragePool pool); public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, - KVMStoragePool destPools); + KVMStoragePool destPools, int timeout); public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool); diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStorageAdaptor.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStorageAdaptor.java new file mode 100644 index 00000000000..476a727bc70 --- /dev/null +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStorageAdaptor.java @@ -0,0 +1,373 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.kvm.storage; + +import java.util.List; +import java.util.Map; +import java.util.HashMap; + +import org.apache.log4j.Logger; +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; + +import com.cloud.agent.api.to.DiskTO; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.utils.StringUtils; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.script.OutputInterpreter; +import com.cloud.utils.script.Script; + +public class iScsiAdmStorageAdaptor implements StorageAdaptor { + private static final Logger s_logger = Logger.getLogger(iScsiAdmStorageAdaptor.class); + + private static final Map _mapStorageUuidToStoragePool = new HashMap(); + + @Override + public KVMStoragePool createStoragePool(String uuid, String host, int port, String path, String userInfo, StoragePoolType storagePoolType) { + iScsiAdmStoragePool storagePool = new iScsiAdmStoragePool(uuid, host, port, storagePoolType, this); + + _mapStorageUuidToStoragePool.put(uuid, storagePool); + + return storagePool; + } + + @Override + public KVMStoragePool getStoragePool(String uuid) { + return _mapStorageUuidToStoragePool.get(uuid); + } + + @Override + public boolean deleteStoragePool(String uuid) { + return _mapStorageUuidToStoragePool.remove(uuid) != null; + } + + @Override + public boolean deleteStoragePool(KVMStoragePool pool) { + return deleteStoragePool(pool.getUuid()); + } + + // called from LibvirtComputingResource.execute(CreateCommand) + // does not apply for iScsiAdmStorageAdaptor + @Override + public KVMPhysicalDisk createPhysicalDisk(String volumeUuid, KVMStoragePool pool, PhysicalDiskFormat format, long size) { + throw new UnsupportedOperationException("Creating a physical disk is not supported."); + } + + @Override + public boolean connectPhysicalDisk(String volumeUuid, KVMStoragePool pool, Map details) { + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 -o new + Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + + iScsiAdmCmd.add("-m", "node"); + iScsiAdmCmd.add("-T", getIqn(volumeUuid)); + iScsiAdmCmd.add("-p", pool.getSourceHost() + ":" + pool.getSourcePort()); + iScsiAdmCmd.add("-o", "new"); + + String result = iScsiAdmCmd.execute(); + + if (result != null) { + s_logger.debug("Failed to add iSCSI target " + volumeUuid); + System.out.println("Failed to add iSCSI target " + volumeUuid); + + return false; + } + else { + s_logger.debug("Successfully added iSCSI target " + volumeUuid); + System.out.println("Successfully added to iSCSI target " + volumeUuid); + } + + String chapInitiatorUsername = details.get(DiskTO.CHAP_INITIATOR_USERNAME); + String chapInitiatorSecret = details.get(DiskTO.CHAP_INITIATOR_SECRET); + + if (StringUtils.isNotBlank(chapInitiatorUsername) && StringUtils.isNotBlank(chapInitiatorSecret)) { + try { + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --op update -n node.session.auth.authmethod -v CHAP + executeChapCommand(volumeUuid, pool, "node.session.auth.authmethod", "CHAP", null); + + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --op update -n node.session.auth.username -v username + executeChapCommand(volumeUuid, pool, "node.session.auth.username", chapInitiatorUsername, "username"); + + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 --op update -n node.session.auth.password -v password + executeChapCommand(volumeUuid, pool, "node.session.auth.password", chapInitiatorSecret, "password"); + } + catch (Exception ex) { + return false; + } + } + + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10 --login + iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + + iScsiAdmCmd.add("-m", "node"); + iScsiAdmCmd.add("-T", getIqn(volumeUuid)); + iScsiAdmCmd.add("-p", pool.getSourceHost() + ":" + pool.getSourcePort()); + iScsiAdmCmd.add("--login"); + + result = iScsiAdmCmd.execute(); + + if (result != null) { + s_logger.debug("Failed to log in to iSCSI target " + volumeUuid); + System.out.println("Failed to log in to iSCSI target " + volumeUuid); + + return false; + } + else { + s_logger.debug("Successfully logged in to iSCSI target " + volumeUuid); + System.out.println("Successfully logged in to iSCSI target " + volumeUuid); + } + + // There appears to be a race condition where logging in to the iSCSI volume via iscsiadm + // returns success before the device has been added to the OS. + // What happens is you get logged in and the device shows up, but the device may not + // show up before we invoke Libvirt to attach the device to a VM. + // waitForDiskToBecomeAvailable(String, KVMStoragePool) invokes blockdev + // via getPhysicalDisk(String, KVMStoragePool) and checks if the size came back greater + // than 0. + // After a certain number of tries and a certain waiting period in between tries, + // this method could still return (it should not block indefinitely) (the race condition + // isn't solved here, but made highly unlikely to be a problem). + waitForDiskToBecomeAvailable(volumeUuid, pool); + + return true; + } + + private void waitForDiskToBecomeAvailable(String volumeUuid, KVMStoragePool pool) { + int numberOfTries = 10; + int timeBetweenTries = 1000; + + while (getPhysicalDisk(volumeUuid, pool).getSize() == 0 && numberOfTries > 0) { + numberOfTries--; + + try { + Thread.sleep(timeBetweenTries); + } + catch (Exception ex) { + // don't do anything + } + } + } + + private void executeChapCommand(String path, KVMStoragePool pool, String nParameter, String vParameter, String detail) throws Exception { + Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + + iScsiAdmCmd.add("-m", "node"); + iScsiAdmCmd.add("-T", getIqn(path)); + iScsiAdmCmd.add("-p", pool.getSourceHost() + ":" + pool.getSourcePort()); + iScsiAdmCmd.add("--op", "update"); + iScsiAdmCmd.add("-n", nParameter); + iScsiAdmCmd.add("-v", vParameter); + + String result = iScsiAdmCmd.execute(); + + boolean useDetail = detail != null && detail.trim().length() > 0; + + detail = useDetail ? detail.trim() + " " : detail; + + if (result != null) { + s_logger.debug("Failed to execute CHAP " + (useDetail ? detail : "") + "command for iSCSI target " + path + " : message = " + result); + System.out.println("Failed to execute CHAP " + (useDetail ? detail : "") + "command for iSCSI target " + path + " : message = " + result); + + throw new Exception("Failed to execute CHAP " + (useDetail ? detail : "") + "command for iSCSI target " + path + " : message = " + result); + } else { + s_logger.debug("CHAP " + (useDetail ? detail : "") + "command executed successfully for iSCSI target " + path); + System.out.println("CHAP " + (useDetail ? detail : "") + "command executed successfully for iSCSI target " + path); + } + } + + // example by-path: /dev/disk/by-path/ip-192.168.233.10:3260-iscsi-iqn.2012-03.com.solidfire:storagepool2-lun-0 + private String getByPath(String host, String path) { + return "/dev/disk/by-path/ip-" + host + "-iscsi-" + getIqn(path) + "-lun-" + getLun(path); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeUuid, KVMStoragePool pool) { + String deviceByPath = getByPath(pool.getSourceHost() + ":" + pool.getSourcePort(), volumeUuid); + KVMPhysicalDisk physicalDisk = new KVMPhysicalDisk(deviceByPath, volumeUuid, pool); + + physicalDisk.setFormat(PhysicalDiskFormat.RAW); + + long deviceSize = getDeviceSize(deviceByPath); + + physicalDisk.setSize(deviceSize); + physicalDisk.setVirtualSize(deviceSize); + + return physicalDisk; + } + + private long getDeviceSize(String deviceByPath) { + Script iScsiAdmCmd = new Script(true, "blockdev", 0, s_logger); + + iScsiAdmCmd.add("--getsize64", deviceByPath); + + OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser(); + + String result = iScsiAdmCmd.execute(parser); + + if (result != null) { + s_logger.warn("Unable to retrieve the size of device " + deviceByPath); + + return 0; + } + + return Long.parseLong(parser.getLine()); + } + + private static String getIqn(String path) { + return getComponent(path, 1); + } + + private static String getLun(String path) { + return getComponent(path, 2); + } + + private static String getComponent(String path, int index) { + String[] tmp = path.split("/"); + + if (tmp.length != 3) { + String msg = "Wrong format for iScsi path: " + path + ". It should be formatted as '/targetIQN/LUN'."; + + s_logger.warn(msg); + + throw new CloudRuntimeException(msg); + } + + return tmp[index].trim(); + } + + public boolean disconnectPhysicalDisk(String host, int port, String iqn, String lun) { + // use iscsiadm to log out of the iSCSI target and un-discover it + + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10 --logout + Script iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + + iScsiAdmCmd.add("-m", "node"); + iScsiAdmCmd.add("-T", iqn); + iScsiAdmCmd.add("-p", host + ":" + port); + iScsiAdmCmd.add("--logout"); + + String result = iScsiAdmCmd.execute(); + + if (result != null) { + s_logger.debug("Failed to log out of iSCSI target /" + iqn + "/" + lun + " : message = " + result); + System.out.println("Failed to log out of iSCSI target /" + iqn + "/" + lun + " : message = " + result); + + return false; + } + else { + s_logger.debug("Successfully logged out of iSCSI target /" + iqn + "/" + lun); + System.out.println("Successfully logged out of iSCSI target /" + iqn + "/" + lun); + } + + // ex. sudo iscsiadm -m node -T iqn.2012-03.com.test:volume1 -p 192.168.233.10:3260 -o delete + iScsiAdmCmd = new Script(true, "iscsiadm", 0, s_logger); + + iScsiAdmCmd.add("-m", "node"); + iScsiAdmCmd.add("-T", iqn); + iScsiAdmCmd.add("-p", host + ":" + port); + iScsiAdmCmd.add("-o", "delete"); + + result = iScsiAdmCmd.execute(); + + if (result != null) { + s_logger.debug("Failed to remove iSCSI target /" + iqn + "/" + lun + " : message = " + result); + System.out.println("Failed to remove iSCSI target /" + iqn + "/" + lun + " : message = " + result); + + return false; + } else { + s_logger.debug("Removed iSCSI target /" + iqn + "/" + lun); + System.out.println("Removed iSCSI target /" + iqn + "/" + lun); + } + + return true; + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid, KVMStoragePool pool) { + return disconnectPhysicalDisk(pool.getSourceHost(), pool.getSourcePort(), getIqn(volumeUuid), getLun(volumeUuid)); + } + + @Override + public boolean disconnectPhysicalDiskByPath(String localPath) { + String search1 = "/dev/disk/by-path/ip-"; + String search2 = ":"; + String search3 = "-iscsi-"; + String search4 = "-lun-"; + + if (localPath.indexOf(search3) == -1) { + // this volume doesn't below to this adaptor, so just return true + return true; + } + + int index = localPath.indexOf(search2); + + String host = localPath.substring(search1.length(), index); + + int index2 = localPath.indexOf(search3); + + String port = localPath.substring(index + search2.length(), index2); + + index = localPath.indexOf(search4); + + String iqn = localPath.substring(index2 + search3.length(), index); + + String lun = localPath.substring(index + search4.length()); + + return disconnectPhysicalDisk(host, Integer.parseInt(port), iqn, lun); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid, KVMStoragePool pool) { + throw new UnsupportedOperationException("Deleting a physical disk is not supported."); + } + + // does not apply for iScsiAdmStorageAdaptor + @Override + public List listPhysicalDisks(String storagePoolUuid, KVMStoragePool pool) { + throw new UnsupportedOperationException("Listing disks is not supported for this configuration."); + } + + @Override + public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name, PhysicalDiskFormat format, long size, + KVMStoragePool destPool, int timeout) { + throw new UnsupportedOperationException("Creating a disk from a template is not yet supported for this configuration."); + } + + @Override + public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk, String name, PhysicalDiskFormat format, + long size, KVMStoragePool destPool) { + throw new UnsupportedOperationException("Creating a template from a disk is not yet supported for this configuration."); + } + + @Override + public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name, KVMStoragePool destPool, int timeout) { + throw new UnsupportedOperationException("Copying a disk is not supported in this configuration."); + } + + @Override + public KVMPhysicalDisk createDiskFromSnapshot(KVMPhysicalDisk snapshot, String snapshotName, String name, KVMStoragePool destPool) { + throw new UnsupportedOperationException("Creating a disk from a snapshot is not supported in this configuration."); + } + + @Override + public boolean refresh(KVMStoragePool pool) { + return true; + } + + @Override + public boolean createFolder(String uuid, String path) { + throw new UnsupportedOperationException("A folder cannot be created in this configuration."); + } +} diff --git a/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStoragePool.java b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStoragePool.java new file mode 100644 index 00000000000..380881503ea --- /dev/null +++ b/plugins/hypervisors/kvm/src/com/cloud/hypervisor/kvm/storage/iScsiAdmStoragePool.java @@ -0,0 +1,167 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.hypervisor.kvm.storage; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.utils.qemu.QemuImg.PhysicalDiskFormat; + +import com.cloud.storage.Storage.StoragePoolType; + +public class iScsiAdmStoragePool implements KVMStoragePool { + private String _uuid; + private String _sourceHost; + private int _sourcePort; + private StoragePoolType _storagePoolType; + private StorageAdaptor _storageAdaptor; + private String _authUsername; + private String _authSecret; + private String _sourceDir; + private String _localPath; + + public iScsiAdmStoragePool(String uuid, String host, int port, StoragePoolType storagePoolType, StorageAdaptor storageAdaptor) { + _uuid = uuid; + _sourceHost = host; + _sourcePort = port; + _storagePoolType = storagePoolType; + _storageAdaptor = storageAdaptor; + } + + @Override + public String getUuid() { + return _uuid; + } + + @Override + public String getSourceHost() { + return _sourceHost; + } + + @Override + public int getSourcePort() { + return _sourcePort; + } + + @Override + public long getCapacity() { + return 0; + } + + @Override + public long getUsed() { + return 0; + } + + @Override + public long getAvailable() { + return 0; + } + + @Override + public StoragePoolType getType() { + return _storagePoolType; + } + + @Override + public PhysicalDiskFormat getDefaultFormat() { + return PhysicalDiskFormat.RAW; + } + + // called from LibvirtComputingResource.copyPhysicalDisk(KVMPhysicalDisk, String, KVMStoragePool) and + // from LibvirtComputingResource.createDiskFromTemplate(KVMPhysicalDisk, String, PhysicalDiskFormat, long, KVMStoragePool) + // does not apply for iScsiAdmStoragePool + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, PhysicalDiskFormat format, long size) { + throw new UnsupportedOperationException("Creating a physical disk is not supported."); + } + + // called from LibvirtComputingResource.execute(CreateCommand) and + // from KVMStorageProcessor.createVolume(CreateObjectCommand) + // does not apply for iScsiAdmStoragePool + @Override + public KVMPhysicalDisk createPhysicalDisk(String name, long size) { + throw new UnsupportedOperationException("Creating a physical disk is not supported."); + } + + @Override + public boolean connectPhysicalDisk(String name, Map details) { + return this._storageAdaptor.connectPhysicalDisk(name, this, details); + } + + @Override + public KVMPhysicalDisk getPhysicalDisk(String volumeUuid) { + return this._storageAdaptor.getPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean disconnectPhysicalDisk(String volumeUuid) { + return this._storageAdaptor.disconnectPhysicalDisk(volumeUuid, this); + } + + @Override + public boolean deletePhysicalDisk(String volumeUuid) { + return this._storageAdaptor.deletePhysicalDisk(volumeUuid, this); + } + + // does not apply for iScsiAdmStoragePool + @Override + public List listPhysicalDisks() { + return this._storageAdaptor.listPhysicalDisks(_uuid, this); + } + + // does not apply for iScsiAdmStoragePool + @Override + public boolean refresh() { + return this._storageAdaptor.refresh(this); + } + + @Override + public boolean delete() { + return this._storageAdaptor.deleteStoragePool(this); + } + + @Override + public boolean createFolder(String path) { + return this._storageAdaptor.createFolder(_uuid, path); + } + + @Override + public boolean isExternalSnapshot() { + return false; + } + + @Override + public String getAuthUserName() { + return _authUsername; + } + + @Override + public String getAuthSecret() { + return _authSecret; + } + + @Override + public String getSourceDir() { + return _sourceDir; + } + + @Override + public String getLocalPath() { + return _localPath; + } +} diff --git a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java index 4ed85930e1f..0e83bc943e1 100644 --- a/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java +++ b/plugins/hypervisors/kvm/src/org/apache/cloudstack/utils/qemu/QemuImg.java @@ -31,6 +31,7 @@ public class QemuImg { /* The qemu-img binary. We expect this to be in $PATH */ public String _qemuImgPath = "qemu-img"; + private int timeout; /* Shouldn't we have KVMPhysicalDisk and LibvirtVMDef read this? */ public static enum PhysicalDiskFormat { @@ -46,8 +47,12 @@ public class QemuImg { } } - public QemuImg() { + public QemuImg(int timeout) { + this.timeout = timeout; + } + public void setTimeout(int timeout) { + this.timeout = timeout; } /** @@ -84,7 +89,7 @@ public class QemuImg { * @return void */ public void create(QemuImgFile file, QemuImgFile backingFile, Map options) throws QemuImgException { - Script s = new Script(_qemuImgPath); + Script s = new Script(_qemuImgPath, timeout); s.add("create"); if (options != null && !options.isEmpty()) { @@ -181,7 +186,7 @@ public class QemuImg { * @return void */ public void convert(QemuImgFile srcFile, QemuImgFile destFile, Map options) throws QemuImgException { - Script s = new Script(_qemuImgPath); + Script s = new Script(_qemuImgPath, timeout); s.add("convert"); s.add("-f"); s.add(srcFile.getFormat().toString()); diff --git a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java index 3640030ad8c..d6e8dc2fcc2 100644 --- a/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java +++ b/plugins/hypervisors/kvm/test/com/cloud/hypervisor/kvm/resource/LibvirtComputingResourceTest.java @@ -24,11 +24,13 @@ import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.utils.Pair; import com.cloud.vm.VirtualMachine; +import junit.framework.Assert; import org.apache.commons.lang.SystemUtils; import org.junit.Assume; import org.junit.Test; import java.util.Random; +import java.util.UUID; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -195,4 +197,17 @@ public class LibvirtComputingResourceTest { Pair stats = LibvirtComputingResource.getNicStats("lo"); assertNotNull(stats); } + + @Test + public void testUUID() { + String uuid = "1"; + LibvirtComputingResource lcr = new LibvirtComputingResource(); + uuid =lcr.getUuid(uuid); + Assert.assertTrue(!uuid.equals("1")); + + String oldUuid = UUID.randomUUID().toString(); + uuid = oldUuid; + uuid = lcr.getUuid(uuid); + Assert.assertTrue(uuid.equals(oldUuid)); + } } diff --git a/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java b/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java index 9c6ac8b1e6a..5244dda9243 100644 --- a/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java +++ b/plugins/hypervisors/kvm/test/org/apache/cloudstack/utils/qemu/QemuImgTest.java @@ -38,7 +38,7 @@ public class QemuImgTest { long size = 10995116277760l; QemuImgFile file = new QemuImgFile(filename, size, PhysicalDiskFormat.QCOW2); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file); Map info = qemu.info(file); @@ -69,7 +69,7 @@ public class QemuImgTest { options.put("cluster_size", clusterSize); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file, options); Map info = qemu.info(file); @@ -96,7 +96,7 @@ public class QemuImgTest { QemuImgFile file = new QemuImgFile(filename, startSize, PhysicalDiskFormat.QCOW2); try { - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file); qemu.resize(file, endSize); Map info = qemu.info(file); @@ -125,7 +125,7 @@ public class QemuImgTest { QemuImgFile file = new QemuImgFile(filename, startSize, PhysicalDiskFormat.RAW); try { - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file); qemu.resize(file, increment, true); Map info = qemu.info(file); @@ -153,7 +153,7 @@ public class QemuImgTest { QemuImgFile file = new QemuImgFile(filename, startSize, PhysicalDiskFormat.RAW); try { - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file); qemu.resize(file, increment, true); Map info = qemu.info(file); @@ -182,7 +182,7 @@ public class QemuImgTest { long endSize = -1; QemuImgFile file = new QemuImgFile(filename, startSize, PhysicalDiskFormat.QCOW2); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); try { qemu.create(file); qemu.resize(file, endSize); @@ -199,7 +199,7 @@ public class QemuImgTest { long startSize = 20480; QemuImgFile file = new QemuImgFile(filename, 20480, PhysicalDiskFormat.QCOW2); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(file); qemu.resize(file, 0); @@ -216,7 +216,7 @@ public class QemuImgTest { QemuImgFile firstFile = new QemuImgFile(firstFileName, 20480, PhysicalDiskFormat.QCOW2); QemuImgFile secondFile = new QemuImgFile(secondFileName, PhysicalDiskFormat.QCOW2); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(firstFile); qemu.create(secondFile, firstFile); @@ -240,7 +240,7 @@ public class QemuImgTest { QemuImgFile srcFile = new QemuImgFile(srcFileName, srcSize); QemuImgFile destFile = new QemuImgFile(destFileName); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(srcFile); qemu.convert(srcFile, destFile); Map info = qemu.info(destFile); @@ -267,7 +267,7 @@ public class QemuImgTest { QemuImgFile srcFile = new QemuImgFile(srcFileName, srcSize, srcFormat); QemuImgFile destFile = new QemuImgFile(destFileName, destFormat); - QemuImg qemu = new QemuImg(); + QemuImg qemu = new QemuImg(0); qemu.create(srcFile); qemu.convert(srcFile, destFile); diff --git a/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/module.properties b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/module.properties new file mode 100644 index 00000000000..1d93fa1efe7 --- /dev/null +++ b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ovm-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/spring-ovm-compute-context.xml b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/spring-ovm-compute-context.xml new file mode 100644 index 00000000000..b12672a6a45 --- /dev/null +++ b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-compute/spring-ovm-compute-context.xml @@ -0,0 +1,39 @@ + + + + + + + + + + + + + diff --git a/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/module.properties b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/module.properties new file mode 100644 index 00000000000..3a4b1f8740f --- /dev/null +++ b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ovm-discoverer +parent=discoverer \ No newline at end of file diff --git a/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/spring-ovm-discoverer-context.xml b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/spring-ovm-discoverer-context.xml new file mode 100644 index 00000000000..b0f2a15519f --- /dev/null +++ b/plugins/hypervisors/ovm/resources/META-INF/cloudstack/ovm-discoverer/spring-ovm-discoverer-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java index 1d8f4f06c56..eba7ae56ef6 100755 --- a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java +++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmDiscoverer.java @@ -31,9 +31,9 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.apache.xmlrpc.XmlRpcException; -import com.cloud.configuration.Config; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupRoutingCommand; +import com.cloud.configuration.Config; import com.cloud.dc.ClusterVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.exception.DiscoveryException; @@ -48,244 +48,214 @@ import com.cloud.resource.ResourceManager; import com.cloud.resource.ResourceStateAdapter; import com.cloud.resource.ServerResource; import com.cloud.resource.UnableDeleteHostException; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.ssh.SSHCmdHelper; @Local(value = Discoverer.class) -public class OvmDiscoverer extends DiscovererBase implements Discoverer, - ResourceStateAdapter { - private static final Logger s_logger = Logger - .getLogger(OvmDiscoverer.class); - protected String _publicNetworkDevice; - protected String _privateNetworkDevice; - protected String _guestNetworkDevice; +public class OvmDiscoverer extends DiscovererBase implements Discoverer, ResourceStateAdapter { + private static final Logger s_logger = Logger.getLogger(OvmDiscoverer.class); + protected String _publicNetworkDevice; + protected String _privateNetworkDevice; + protected String _guestNetworkDevice; - @Inject - ClusterDao _clusterDao; - @Inject - ResourceManager _resourceMgr; + @Inject + ClusterDao _clusterDao; + @Inject + ResourceManager _resourceMgr; - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - super.configure(name, params); - _publicNetworkDevice = _params.get(Config.OvmPublicNetwork.key()); - _privateNetworkDevice = _params.get(Config.OvmPrivateNetwork.key()); - _guestNetworkDevice = _params.get(Config.OvmGuestNetwork.key()); - _resourceMgr.registerResourceStateAdapter(this.getClass() - .getSimpleName(), this); - return true; - } + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + super.configure(name, params); + _publicNetworkDevice = _params.get(Config.OvmPublicNetwork.key()); + _privateNetworkDevice = _params.get(Config.OvmPrivateNetwork.key()); + _guestNetworkDevice = _params.get(Config.OvmGuestNetwork.key()); + _resourceMgr.registerResourceStateAdapter(this.getClass().getSimpleName(), this); + return true; + } - protected OvmDiscoverer() { - } + protected OvmDiscoverer() { + } - @Override - public boolean stop() { - _resourceMgr.unregisterResourceStateAdapter(this.getClass() - .getSimpleName()); - return super.stop(); - } + @Override + public boolean stop() { + _resourceMgr.unregisterResourceStateAdapter(this.getClass().getSimpleName()); + return super.stop(); + } - private boolean checkIfExisted(String guid) { - SearchCriteria2 sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getGuid(), SearchCriteria.Op.EQ, guid); - sc.addAnd(sc.getEntity().getHypervisorType(), SearchCriteria.Op.EQ, - HypervisorType.Ovm); - List hosts = sc.list(); - return !hosts.isEmpty(); - } + private boolean checkIfExisted(String guid) { + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getGuid(), SearchCriteria.Op.EQ, guid); + sc.and(sc.entity().getHypervisorType(), SearchCriteria.Op.EQ, HypervisorType.Ovm); + List hosts = sc.list(); + return !hosts.isEmpty(); + } - @Override - public Map> find(long dcId, - Long podId, Long clusterId, URI url, String username, - String password, List hostTags) throws DiscoveryException { - Connection conn = null; + @Override + public Map> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List hostTags) + throws DiscoveryException { + Connection conn = null; - if (!url.getScheme().equals("http")) { - String msg = "urlString is not http so we're not taking care of the discovery for this: " - + url; - s_logger.debug(msg); - return null; - } - if (clusterId == null) { - String msg = "must specify cluster Id when add host"; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - } + if (!url.getScheme().equals("http")) { + String msg = "urlString is not http so we're not taking care of the discovery for this: " + url; + s_logger.debug(msg); + return null; + } + if (clusterId == null) { + String msg = "must specify cluster Id when add host"; + s_logger.debug(msg); + throw new CloudRuntimeException(msg); + } - if (podId == null) { - String msg = "must specify pod Id when add host"; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); - } + if (podId == null) { + String msg = "must specify pod Id when add host"; + s_logger.debug(msg); + throw new CloudRuntimeException(msg); + } - ClusterVO cluster = _clusterDao.findById(clusterId); - if (cluster == null - || (cluster.getHypervisorType() != HypervisorType.Ovm)) { - if (s_logger.isInfoEnabled()) - s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); - return null; - } + ClusterVO cluster = _clusterDao.findById(clusterId); + if (cluster == null || (cluster.getHypervisorType() != HypervisorType.Ovm)) { + if (s_logger.isInfoEnabled()) + s_logger.info("invalid cluster id or cluster is not for Ovm hypervisors"); + return null; + } - String agentUsername = _params.get("agentusername"); - if (agentUsername == null) { - throw new CloudRuntimeException("Agent user name must be specified"); - } + String agentUsername = _params.get("agentusername"); + if (agentUsername == null) { + throw new CloudRuntimeException("Agent user name must be specified"); + } - String agentPassword = _params.get("agentpassword"); - if (agentPassword == null) { - throw new CloudRuntimeException("Agent password must be specified"); - } + String agentPassword = _params.get("agentpassword"); + if (agentPassword == null) { + throw new CloudRuntimeException("Agent password must be specified"); + } - try { - String hostname = url.getHost(); - InetAddress ia = InetAddress.getByName(hostname); - String hostIp = ia.getHostAddress(); - String guid = UUID.nameUUIDFromBytes(hostIp.getBytes()).toString(); + try { + String hostname = url.getHost(); + InetAddress ia = InetAddress.getByName(hostname); + String hostIp = ia.getHostAddress(); + String guid = UUID.nameUUIDFromBytes(hostIp.getBytes()).toString(); - if (checkIfExisted(guid)) { - throw new CloudRuntimeException("The host " + hostIp - + " has been added before"); - } + if (checkIfExisted(guid)) { + throw new CloudRuntimeException("The host " + hostIp + " has been added before"); + } - s_logger.debug("Ovm discover is going to disover host having guid " - + guid); + s_logger.debug("Ovm discover is going to disover host having guid " + guid); - ClusterVO clu = _clusterDao.findById(clusterId); - if (clu.getGuid() == null) { - clu.setGuid(UUID.randomUUID().toString()); - _clusterDao.update(clusterId, clu); - } + ClusterVO clu = _clusterDao.findById(clusterId); + if (clu.getGuid() == null) { + clu.setGuid(UUID.randomUUID().toString()); + _clusterDao.update(clusterId, clu); + } - com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection( - hostIp, 22); - sshConnection.connect(null, 60000, 60000); - sshConnection = SSHCmdHelper.acquireAuthorizedConnection(hostIp, - username, password); - if (sshConnection == null) { - throw new DiscoveryException( - String.format( - "Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s, discover failed", - hostIp, username, password)); - } + com.trilead.ssh2.Connection sshConnection = new com.trilead.ssh2.Connection(hostIp, 22); + sshConnection.connect(null, 60000, 60000); + sshConnection = SSHCmdHelper.acquireAuthorizedConnection(hostIp, username, password); + if (sshConnection == null) { + throw new DiscoveryException(String.format("Cannot connect to ovm host(IP=%1$s, username=%2$s, password=%3$s, discover failed", hostIp, username, password)); + } - if (!SSHCmdHelper.sshExecuteCmd(sshConnection, - "[ -f '/etc/ovs-agent/agent.ini' ]")) { - throw new DiscoveryException( - "Can not find /etc/ovs-agent/agent.ini " + hostIp); - } + if (!SSHCmdHelper.sshExecuteCmd(sshConnection, "[ -f '/etc/ovs-agent/agent.ini' ]")) { + throw new DiscoveryException("Can not find /etc/ovs-agent/agent.ini " + hostIp); + } - Map details = new HashMap(); - OvmResourceBase ovmResource = new OvmResourceBase(); - details.put("ip", hostIp); - details.put("username", username); - details.put("password", password); - details.put("zone", Long.toString(dcId)); - details.put("guid", guid); - details.put("pod", Long.toString(podId)); - details.put("cluster", Long.toString(clusterId)); - details.put("agentusername", agentUsername); - details.put("agentpassword", agentPassword); - if (_publicNetworkDevice != null) { - details.put("public.network.device", _publicNetworkDevice); - } - if (_privateNetworkDevice != null) { - details.put("private.network.device", _privateNetworkDevice); - } - if (_guestNetworkDevice != null) { - details.put("guest.network.device", _guestNetworkDevice); - } + Map details = new HashMap(); + OvmResourceBase ovmResource = new OvmResourceBase(); + details.put("ip", hostIp); + details.put("username", username); + details.put("password", password); + details.put("zone", Long.toString(dcId)); + details.put("guid", guid); + details.put("pod", Long.toString(podId)); + details.put("cluster", Long.toString(clusterId)); + details.put("agentusername", agentUsername); + details.put("agentpassword", agentPassword); + if (_publicNetworkDevice != null) { + details.put("public.network.device", _publicNetworkDevice); + } + if (_privateNetworkDevice != null) { + details.put("private.network.device", _privateNetworkDevice); + } + if (_guestNetworkDevice != null) { + details.put("guest.network.device", _guestNetworkDevice); + } - Map params = new HashMap(); - params.putAll(details); - ovmResource.configure("Ovm Server", params); - ovmResource.start(); + Map params = new HashMap(); + params.putAll(details); + ovmResource.configure("Ovm Server", params); + ovmResource.start(); - conn = new Connection(hostIp, "oracle", agentPassword); - /* After resource start, we are able to execute our agent api */ - OvmHost.Details d = OvmHost.getDetails(conn); - details.put("agentVersion", d.agentVersion); - details.put(HostInfo.HOST_OS_KERNEL_VERSION, d.dom0KernelVersion); - details.put(HostInfo.HYPERVISOR_VERSION, d.hypervisorVersion); + conn = new Connection(hostIp, "oracle", agentPassword); + /* After resource start, we are able to execute our agent api */ + OvmHost.Details d = OvmHost.getDetails(conn); + details.put("agentVersion", d.agentVersion); + details.put(HostInfo.HOST_OS_KERNEL_VERSION, d.dom0KernelVersion); + details.put(HostInfo.HYPERVISOR_VERSION, d.hypervisorVersion); - Map> resources = new HashMap>(); - resources.put(ovmResource, details); - return resources; - } catch (XmlRpcException e) { - s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url, - e); - return null; - } catch (UnknownHostException e) { - s_logger.debug( - "Host name resolve failed exception, Unable to discover OVM: " - + url, e); - return null; - } catch (ConfigurationException e) { - s_logger.debug( - "Configure resource failed, Unable to discover OVM: " + url, - e); - return null; - } catch (Exception e) { - s_logger.debug("Unable to discover OVM: " + url, e); - return null; - } - } + Map> resources = new HashMap>(); + resources.put(ovmResource, details); + return resources; + } catch (XmlRpcException e) { + s_logger.debug("XmlRpc exception, Unable to discover OVM: " + url, e); + return null; + } catch (UnknownHostException e) { + s_logger.debug("Host name resolve failed exception, Unable to discover OVM: " + url, e); + return null; + } catch (ConfigurationException e) { + s_logger.debug("Configure resource failed, Unable to discover OVM: " + url, e); + return null; + } catch (Exception e) { + s_logger.debug("Unable to discover OVM: " + url, e); + return null; + } + } - @Override - public void postDiscovery(List hosts, long msId) - throws DiscoveryException { - // TODO Auto-generated method stub + @Override + public void postDiscovery(List hosts, long msId) throws DiscoveryException { + // TODO Auto-generated method stub - } + } - @Override - public boolean matchHypervisor(String hypervisor) { - return HypervisorType.Ovm.toString().equalsIgnoreCase(hypervisor); - } + @Override + public boolean matchHypervisor(String hypervisor) { + return HypervisorType.Ovm.toString().equalsIgnoreCase(hypervisor); + } - @Override - public HypervisorType getHypervisorType() { - return HypervisorType.Ovm; - } + @Override + public HypervisorType getHypervisorType() { + return HypervisorType.Ovm; + } - @Override - public HostVO createHostVOForConnectedAgent(HostVO host, - StartupCommand[] cmd) { - // TODO Auto-generated method stub - return null; - } + @Override + public HostVO createHostVOForConnectedAgent(HostVO host, StartupCommand[] cmd) { + // TODO Auto-generated method stub + return null; + } - @Override - public HostVO createHostVOForDirectConnectAgent(HostVO host, - StartupCommand[] startup, ServerResource resource, - Map details, List hostTags) { - StartupCommand firstCmd = startup[0]; - if (!(firstCmd instanceof StartupRoutingCommand)) { - return null; - } + @Override + public HostVO createHostVOForDirectConnectAgent(HostVO host, StartupCommand[] startup, ServerResource resource, Map details, List hostTags) { + StartupCommand firstCmd = startup[0]; + if (!(firstCmd instanceof StartupRoutingCommand)) { + return null; + } - StartupRoutingCommand ssCmd = ((StartupRoutingCommand) firstCmd); - if (ssCmd.getHypervisorType() != HypervisorType.Ovm) { - return null; - } + StartupRoutingCommand ssCmd = ((StartupRoutingCommand)firstCmd); + if (ssCmd.getHypervisorType() != HypervisorType.Ovm) { + return null; + } - return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Ovm, - details, hostTags); - } + return _resourceMgr.fillRoutingHostVO(host, ssCmd, HypervisorType.Ovm, details, hostTags); + } - @Override - public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, - boolean isForceDeleteStorage) throws UnableDeleteHostException { - if (host.getType() != com.cloud.host.Host.Type.Routing - || host.getHypervisorType() != HypervisorType.Ovm) { - return null; - } + @Override + public DeleteHostAnswer deleteHost(HostVO host, boolean isForced, boolean isForceDeleteStorage) throws UnableDeleteHostException { + if (host.getType() != com.cloud.host.Host.Type.Routing || host.getHypervisorType() != HypervisorType.Ovm) { + return null; + } - _resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage); - return new DeleteHostAnswer(true); - } + _resourceMgr.deleteRoutingHost(host, isForced, isForceDeleteStorage); + return new DeleteHostAnswer(true); + } } diff --git a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java index 1eddedc6f19..2d0d67b2f41 100755 --- a/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java +++ b/plugins/hypervisors/ovm/src/com/cloud/ovm/hypervisor/OvmResourceBase.java @@ -732,7 +732,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { vm = OvmVm.getDetails(_conn, vmName); } catch (XmlRpcException e) { s_logger.debug("Unable to get details of vm: " + vmName + ", treating it as stopped", e); - return new StopAnswer(cmd, "success", 0, true); + return new StopAnswer(cmd, "success", true); } deleteAllNetworkRulesForVm(vmName); @@ -740,7 +740,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource { cleanup(vm); state = State.Stopped; - return new StopAnswer(cmd, "success", 0, true); + return new StopAnswer(cmd, "success", true); } catch (Exception e) { s_logger.debug("Stop " + vmName + "failed", e); return new StopAnswer(cmd, e.getMessage(), false); diff --git a/plugins/hypervisors/simulator/pom.xml b/plugins/hypervisors/simulator/pom.xml index 81aeb6d7bd7..a44997bf252 100644 --- a/plugins/hypervisors/simulator/pom.xml +++ b/plugins/hypervisors/simulator/pom.xml @@ -25,7 +25,6 @@ 4.3.0-SNAPSHOT ../../pom.xml - org.apache.cloudstack cloud-plugin-hypervisor-simulator Apache CloudStack Plugin - Hypervisor Simulator Simulator Hypervisor for Cloudstack diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/core/spring-simulator-core-context.xml b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/core/spring-simulator-core-context.xml new file mode 100644 index 00000000000..4905ae80b8f --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/core/spring-simulator-core-context.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/module.properties b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/module.properties new file mode 100644 index 00000000000..7b12a088161 --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=simulator-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/spring-simulator-compute-context.xml b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/spring-simulator-compute-context.xml new file mode 100644 index 00000000000..11462333d0e --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-compute/spring-simulator-compute-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/module.properties b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/module.properties new file mode 100644 index 00000000000..536cf1562fd --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=simulator-discoverer +parent=discoverer \ No newline at end of file diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/spring-simulator-discover-context.xml b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/spring-simulator-discover-context.xml new file mode 100644 index 00000000000..323f64fdac7 --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-discoverer/spring-simulator-discover-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + + diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/module.properties b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/module.properties new file mode 100644 index 00000000000..97a1784c82a --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=simulator-storage +parent=storage \ No newline at end of file diff --git a/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/spring-simulator-storage-context.xml b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/spring-simulator-storage-context.xml new file mode 100644 index 00000000000..4cb73e60bae --- /dev/null +++ b/plugins/hypervisors/simulator/resources/META-INF/cloudstack/simulator-storage/spring-simulator-storage-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java index fcc1b4f2209..4071aeb4c46 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockAgentManagerImpl.java @@ -16,6 +16,25 @@ // under the License. package com.cloud.agent.manager; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.regex.PatternSyntaxException; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthCommand; @@ -31,7 +50,12 @@ import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.DiscoveryException; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; -import com.cloud.resource.*; +import com.cloud.resource.AgentResourceBase; +import com.cloud.resource.AgentRoutingResource; +import com.cloud.resource.AgentStorageResource; +import com.cloud.resource.Discoverer; +import com.cloud.resource.ResourceManager; +import com.cloud.resource.SimulatorSecondaryDiscoverer; import com.cloud.simulator.MockHost; import com.cloud.simulator.MockHostVO; import com.cloud.simulator.MockVMVO; @@ -41,27 +65,9 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; -import org.apache.cloudstack.api.command.admin.host.AddSecondaryStorageCmd; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import java.security.NoSuchAlgorithmException; -import java.security.SecureRandom; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.regex.PatternSyntaxException; @Component @Local(value = { MockAgentManager.class }) @@ -81,10 +87,13 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage MockStorageManager _storageMgr = null; @Inject ResourceManager _resourceMgr; - @Inject + SimulatorSecondaryDiscoverer discoverer; @Inject HostDao hostDao; + + List discoverers; + private SecureRandom random; private final Map _resources = new ConcurrentHashMap(); private ThreadPoolExecutor _executor; @@ -159,7 +168,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); mockHost.setResource("com.cloud.agent.AgentRoutingResource"); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); mockHost = _mockHostDao.persist(mockHost); @@ -170,7 +179,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage throw new CloudRuntimeException("Error configuring agent", ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -256,7 +265,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage private void handleSystemVMStop() { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { if (this.mode.equalsIgnoreCase("Stop")) { txn.start(); @@ -279,7 +288,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -323,7 +332,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage mockHost.setVersion(this.getClass().getPackage().getImplementationVersion()); mockHost.setResource(resource); mockHost.setVmId(vmId); - Transaction simtxn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy simtxn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { simtxn.start(); mockHost = _mockHostDao.persist(mockHost); @@ -334,7 +343,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage + ex.getMessage(), ex); } finally { simtxn.close(); - simtxn = Transaction.open(Transaction.CLOUD_DB); + simtxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); simtxn.close(); } @@ -366,7 +375,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage @Override public MockHost getHost(String guid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockHost _host = _mockHostDao.findByGuid(guid); @@ -382,7 +391,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage throw new CloudRuntimeException("Unable to get host " + guid + " due to " + ex.getMessage(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -391,7 +400,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage public GetHostStatsAnswer getHostStatistic(GetHostStatsCommand cmd) { String hostGuid = cmd.getHostGuid(); MockHost host = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); host = _mockHostDao.findByGuid(hostGuid); @@ -404,11 +413,11 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage throw new CloudRuntimeException("Unable to get host " + hostGuid + " due to " + ex.getMessage(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } - Transaction vmtxn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy vmtxn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { vmtxn.start(); List vms = _mockVmDao.findByHostId(host.getId()); @@ -435,7 +444,7 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage + ex.getMessage(), ex); } finally { vmtxn.close(); - vmtxn = Transaction.open(Transaction.CLOUD_DB); + vmtxn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); vmtxn.close(); } } @@ -452,6 +461,17 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage @Override public boolean start() { + for ( Discoverer discoverer : discoverers ) { + if ( discoverer instanceof SimulatorSecondaryDiscoverer ) { + this.discoverer = (SimulatorSecondaryDiscoverer)discoverer; + break; + } + } + + if ( this.discoverer == null ) { + throw new IllegalStateException("Failed to find SimulatorSecondaryDiscoverer"); + } + return true; } @@ -477,4 +497,13 @@ public class MockAgentManagerImpl extends ManagerBase implements MockAgentManage } return new CheckNetworkAnswer(cmd, true, "Network Setup check by names is done"); } + + public List getDiscoverers() { + return discoverers; + } + + @Inject + public void setDiscoverers(List discoverers) { + this.discoverers = discoverers; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java index d23575bfee3..64463d486ae 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockStorageManagerImpl.java @@ -76,7 +76,7 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.template.TemplateProp; import com.cloud.utils.NumbersUtil; import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DiskProfile; import com.cloud.vm.VirtualMachine.State; @@ -120,7 +120,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa VMTemplateDao templateDao; private MockVolumeVO findVolumeFromSecondary(String path, String ssUrl, MockVolumeType type) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); String volumePath = path.replaceAll(ssUrl, ""); @@ -141,7 +141,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Unable to find volume " + path + " on secondary " + ssUrl, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -154,7 +154,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa return new PrimaryStorageDownloadAnswer("Can't find primary storage"); } - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockStoragePoolVO primaryStorage = null; try { txn.start(); @@ -168,7 +168,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding primary storagee " + cmd.getPoolUuid(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -179,7 +179,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa newVolume.setPoolId(primaryStorage.getId()); newVolume.setSize(template.getSize()); newVolume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); _mockVolumeDao.persist(newVolume); @@ -189,7 +189,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving volume " + newVolume, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new PrimaryStorageDownloadAnswer(newVolume.getPath(), newVolume.getSize()); @@ -200,7 +200,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa StorageFilerTO sf = cmd.getPool(); DiskProfile dskch = cmd.getDiskCharacteristics(); MockStoragePoolVO storagePool = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); storagePool = _mockStoragePoolDao.findByUuid(sf.getUuid()); @@ -213,7 +213,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding storage " + sf.getUuid(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -224,7 +224,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa volume.setPath(storagePool.getMountPoint() + volumeName); volume.setSize(dskch.getSize()); volume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); volume = _mockVolumeDao.persist(volume); @@ -234,7 +234,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving volume " + volume, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -246,7 +246,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public AttachVolumeAnswer AttachVolume(AttachVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); String poolid = cmd.getPoolUuid(); @@ -270,7 +270,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + cmd.getVmName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -284,7 +284,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa } String vmName = cmd.getVmName(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockVMVO vm = null; try { txn.start(); @@ -298,7 +298,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when attaching iso to vm " + vm.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new Answer(cmd); @@ -306,7 +306,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer DeleteStoragePool(DeleteStoragePoolCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockStoragePoolVO storage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); @@ -321,7 +321,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when deleting storage pool " + cmd.getPool().getPath(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -329,7 +329,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public ModifyStoragePoolAnswer ModifyStoragePool(ModifyStoragePoolCommand cmd) { StorageFilerTO sf = cmd.getPool(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockStoragePoolVO storagePool = null; try { txn.start(); @@ -361,7 +361,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when modifying storage pool " + cmd.getPool().getPath(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); @@ -370,7 +370,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer CreateStoragePool(CreateStoragePoolCommand cmd) { StorageFilerTO sf = cmd.getPool(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockStoragePoolVO storagePool = null; try { txn.start(); @@ -402,7 +402,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when creating storage pool " + cmd.getPool().getPath(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new ModifyStoragePoolAnswer(cmd, storagePool.getCapacity(), 0, new HashMap()); @@ -410,7 +410,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer SecStorageSetup(SecStorageSetupCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockSecStorageVO storage = null; try { txn.start(); @@ -424,7 +424,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when setting up sec storage" + cmd.getSecUrl(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new SecStorageSetupAnswer(storage.getMountPoint()); @@ -432,7 +432,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer ListVolumes(ListVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockSecStorageVO storage = null; try { txn.start(); @@ -446,11 +446,11 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding sec storage " + cmd.getSecUrl(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); List volumes = _mockVolumeDao.findByStorageIdAndType(storage.getId(), @@ -468,7 +468,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -483,7 +483,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa MockSecStorageVO storage = null; String nfsUrl = ((NfsTO) store).getUrl(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); storage = _mockSecStorageDao.findByUrl(nfsUrl); try { txn.start(); @@ -501,14 +501,14 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding template on sec storage " + storage.getId(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @Override public Answer Destroy(DestroyCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockVolumeVO volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolume().getPath()); @@ -530,7 +530,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when destroying volume " + cmd.getVolume().getPath(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new Answer(cmd); @@ -539,7 +539,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public DownloadAnswer Download(DownloadCommand cmd) { MockSecStorageVO ssvo = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); ssvo = _mockSecStorageDao.findByUrl(cmd.getSecUrl()); @@ -553,7 +553,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error accessing secondary storage " + cmd.getSecUrl(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -564,7 +564,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa volume.setSize(0); volume.setType(MockVolumeType.TEMPLATE); volume.setStatus(Status.DOWNLOAD_IN_PROGRESS); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); volume = _mockVolumeDao.persist(volume); @@ -574,7 +574,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving volume " + volume, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new DownloadAnswer(String.valueOf(volume.getId()), 0, "Downloading", Status.DOWNLOAD_IN_PROGRESS, @@ -583,7 +583,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public DownloadAnswer DownloadProcess(DownloadProgressCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); String volumeId = cmd.getJobId(); @@ -616,7 +616,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error during download job " + cmd.getJobId(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -624,7 +624,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public GetStorageStatsAnswer GetStorageStats(GetStorageStatsCommand cmd) { String uuid = cmd.getStorageId(); - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); if (uuid == null) { @@ -653,7 +653,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("DBException during storage stats collection for pool " + uuid, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -663,7 +663,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa String volPath = cmd.getVolumePath(); MockVolumeVO volume = null; MockStoragePoolVO storagePool = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); volume = _mockVolumeDao.findByStoragePathAndType(volPath); @@ -680,7 +680,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Unable to perform snapshot", ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -693,7 +693,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa snapshot.setPoolId(storagePool.getId()); snapshot.setType(MockVolumeType.SNAPSHOT); snapshot.setStatus(Status.DOWNLOADED); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); snapshot = _mockVolumeDao.persist(snapshot); @@ -703,7 +703,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving snapshot " + snapshot, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -717,7 +717,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa MockVolumeVO volume = null; MockVolumeVO snapshot = null; MockSecStorageVO secStorage = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); @@ -742,7 +742,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when backing up snapshot"); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -754,7 +754,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa newsnapshot.setSize(snapshot.getSize()); newsnapshot.setStatus(Status.DOWNLOADED); newsnapshot.setType(MockVolumeType.SNAPSHOT); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); snapshot = _mockVolumeDao.persist(snapshot); @@ -764,7 +764,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when backing up snapshot " + newsnapshot, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -773,7 +773,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public CreateVolumeFromSnapshotAnswer CreateVolumeFromSnapshot(CreateVolumeFromSnapshotCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockVolumeVO backSnapshot = null; MockStoragePoolVO primary = null; try { @@ -795,7 +795,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when creating volume from snapshot", ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -808,7 +808,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa volume.setSize(backSnapshot.getSize()); volume.setStatus(Status.DOWNLOADED); volume.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); _mockVolumeDao.persist(volume); @@ -818,7 +818,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when creating volume from snapshot " + volume, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -828,7 +828,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer Delete(DeleteCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockVolumeVO template = _mockVolumeDao.findByStoragePathAndType(cmd.getData().getPath()); @@ -842,7 +842,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when deleting object"); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new Answer(cmd); @@ -879,7 +879,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public void preinstallTemplates(String url, long zoneId) { MockSecStorageVO storage = null; - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); storage = _mockSecStorageDao.findByUrl(url); @@ -933,14 +933,14 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa } catch (Exception ex) { throw new CloudRuntimeException("Unable to find sec storage at " + url, ex); } finally { - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @Override public StoragePoolInfo getLocalStorage(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockHost host = null; MockStoragePoolVO storagePool = null; try { @@ -953,7 +953,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -965,7 +965,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa storagePool.setCapacity(DEFAULT_HOST_STORAGE_SIZE); storagePool.setHostGuid(hostGuid); storagePool.setStorageType(StoragePoolType.Filesystem); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); storagePool = _mockStoragePoolDao.persist(storagePool); @@ -975,7 +975,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -985,7 +985,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public StoragePoolInfo getLocalStorage(String hostGuid, Long storageSize) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockHost host = null; try { txn.start(); @@ -996,13 +996,13 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Unable to find host " + hostGuid, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } if (storageSize == null) { storageSize = DEFAULT_HOST_STORAGE_SIZE; } - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockStoragePoolVO storagePool = null; try { txn.start(); @@ -1013,7 +1013,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when finding storagePool " + storagePool, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } if (storagePool == null) { @@ -1024,7 +1024,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa storagePool.setCapacity(storageSize); storagePool.setHostGuid(hostGuid); storagePool.setStorageType(StoragePoolType.Filesystem); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); storagePool = _mockStoragePoolDao.persist(storagePool); @@ -1034,7 +1034,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving storagePool " + storagePool, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -1044,7 +1044,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public CreatePrivateTemplateAnswer CreatePrivateTemplateFromSnapshot(CreatePrivateTemplateFromSnapshotCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockVolumeVO snapshot = null; MockSecStorageVO sec = null; try { @@ -1066,7 +1066,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa txn.commit(); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -1078,7 +1078,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa template.setSize(snapshot.getSize()); template.setStatus(Status.DOWNLOADED); template.setType(MockVolumeType.TEMPLATE); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); template = _mockVolumeDao.persist(template); @@ -1088,7 +1088,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when saving template " + template, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -1098,7 +1098,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public Answer ComputeChecksum(ComputeChecksumCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockVolumeVO volume = _mockVolumeDao.findByName(cmd.getTemplatePath()); @@ -1116,14 +1116,14 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa return new Answer(cmd, true, md5); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @Override public CreatePrivateTemplateAnswer CreatePrivateTemplateFromVolume(CreatePrivateTemplateFromVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockVolumeVO volume = null; MockSecStorageVO sec = null; try { @@ -1143,7 +1143,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa throw new CloudRuntimeException("Error when creating private template from volume"); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -1155,7 +1155,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa template.setSize(volume.getSize()); template.setStatus(Status.DOWNLOADED); template.setType(MockVolumeType.TEMPLATE); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); template = _mockVolumeDao.persist(template); @@ -1166,7 +1166,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + template.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -1176,7 +1176,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa @Override public CopyVolumeAnswer CopyVolume(CopyVolumeCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); boolean toSecondaryStorage = cmd.toSecondaryStorage(); MockSecStorageVO sec = null; MockStoragePoolVO primaryStorage = null; @@ -1193,11 +1193,11 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + cmd.getSecondaryStorageURL(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); primaryStorage = _mockStoragePoolDao.findByUuid(cmd.getPool().getUuid()); @@ -1211,12 +1211,12 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + cmd.getPool(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } MockVolumeVO volume = null; - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); volume = _mockVolumeDao.findByStoragePathAndType(cmd.getVolumePath()); @@ -1230,7 +1230,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + cmd.getVolumePath(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -1243,7 +1243,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa vol.setSize(volume.getSize()); vol.setStatus(Status.DOWNLOADED); vol.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); vol = _mockVolumeDao.persist(vol); @@ -1254,7 +1254,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + vol.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new CopyVolumeAnswer(cmd, true, null, sec.getMountPoint(), vol.getPath()); @@ -1266,7 +1266,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa vol.setSize(volume.getSize()); vol.setStatus(Status.DOWNLOADED); vol.setType(MockVolumeType.VOLUME); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); vol = _mockVolumeDao.persist(vol); @@ -1277,7 +1277,7 @@ public class MockStorageManagerImpl extends ManagerBase implements MockStorageMa + vol.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return new CopyVolumeAnswer(cmd, true, null, primaryStorage.getMountPoint(), vol.getPath()); diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java index 64df113ea91..28e235e556e 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/MockVmManagerImpl.java @@ -72,7 +72,7 @@ import com.cloud.simulator.dao.MockVMDao; import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; import org.apache.log4j.Logger; @@ -111,7 +111,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { int cpuHz, long ramSize, String bootArgs, String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); MockHost host = null; MockVm vm = null; try { @@ -128,7 +128,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("Unable to start VM " + vmName, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } @@ -153,7 +153,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { } else if (vmName.startsWith("i-")) { vm.setType("User"); } - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); vm = _mockVmDao.persist((MockVMVO) vm); @@ -163,13 +163,13 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to save vm to db " + vm.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } else { if(vm.getState() == State.Stopped) { vm.setState(State.Running); - txn = Transaction.open(Transaction.SIMULATOR_DB); + txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); _mockVmDao.update(vm.getId(), (MockVMVO)vm); @@ -179,7 +179,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to update vm " + vm.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -230,7 +230,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public Map getVms(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); List vms = _mockVmDao.findByHostGuid(hostGuid); @@ -245,7 +245,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -282,7 +282,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public Map getVmStates(String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); Map states = new HashMap(); @@ -301,7 +301,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to fetch vms from host " + hostGuid, ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -338,7 +338,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public CheckVirtualMachineAnswer checkVmState(CheckVirtualMachineCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockVMVO vm = _mockVmDao.findByVmName(cmd.getVmName()); @@ -353,7 +353,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to fetch vm state " + cmd.getVmName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -378,7 +378,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public MigrateAnswer Migrate(MigrateCommand cmd, SimulatorInfo info) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); String vmName = cmd.getVmName(); @@ -405,14 +405,14 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to migrate vm " + cmd.getVmName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @Override public PrepareForMigrationAnswer prepareForMigrate(PrepareForMigrationCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); VirtualMachineTO vmTo = cmd.getVirtualMachine(); try { txn.start(); @@ -425,7 +425,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to find vm " + vmTo.getName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); return new PrepareForMigrationAnswer(cmd); } @@ -438,7 +438,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public Answer CleanupNetworkRules(CleanupNetworkRulesCmd cmd, SimulatorInfo info) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); List rules = _mockSecurityDao.findByHost(info.getHostUuid()); @@ -455,7 +455,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to clean up rules", ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -503,7 +503,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { @Override public StopAnswer stopVM(StopCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); String vmName = cmd.getVmName(); @@ -517,20 +517,20 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { _mockAgentMgr.handleSystemVMStop(vm.getId()); } txn.commit(); - return new StopAnswer(cmd, null, new Integer(0), true); + return new StopAnswer(cmd, null, true); } catch (Exception ex) { txn.rollback(); throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @Override public RebootAnswer rebootVM(RebootCommand cmd) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockVm vm = _mockVmDao.findByVmName(cmd.getVmName()); @@ -545,7 +545,7 @@ public class MockVmManagerImpl extends ManagerBase implements MockVmManager { throw new CloudRuntimeException("unable to stop vm " + cmd.getVmName(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java index f6bd2b651ba..f30bd3661d0 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/agent/manager/SimulatorManagerImpl.java @@ -99,7 +99,7 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.component.PluggableService; import com.cloud.utils.db.DB; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VirtualMachine.State; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -182,7 +182,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage @DB @Override public Answer simulate(Command cmd, String hostGuid) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { MockHost host = _mockHost.findByGuid(hostGuid); String cmdName = cmd.toString(); @@ -373,7 +373,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage return new Answer(cmd, false, e.toString()); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } } @@ -403,7 +403,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage @Override public boolean configureSimulator(Long zoneId, Long podId, Long clusterId, Long hostId, String command, String values) { - Transaction txn = Transaction.open(Transaction.SIMULATOR_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.SIMULATOR_DB); try { txn.start(); MockConfigurationVO config = _mockConfigDao.findByCommand(zoneId, podId, clusterId, hostId, command); @@ -427,7 +427,7 @@ public class SimulatorManagerImpl extends ManagerBase implements SimulatorManage throw new CloudRuntimeException("Unable to configure simulator because of " + ex.getMessage(), ex); } finally { txn.close(); - txn = Transaction.open(Transaction.CLOUD_DB); + txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); } return true; diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java index 8a03c4b9598..9a27d748a29 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/AgentRoutingResource.java @@ -231,7 +231,7 @@ public class AgentRoutingResource extends AgentStorageResource { return new StopAnswer(cmd, result.getDetails(), false); } - answer = new StopAnswer(cmd, null, 0, true); + answer = new StopAnswer(cmd, null, true); Pair data = _runningVms.get(vmName); if (data != null) { this.usedCpu -= data.first(); diff --git a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java index c7768aa5b69..1c992722058 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java +++ b/plugins/hypervisors/simulator/src/com/cloud/resource/SimulatorStorageProcessor.java @@ -19,14 +19,9 @@ package com.cloud.resource; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.DataTO; -import com.cloud.agent.api.to.DiskTO; -import com.cloud.agent.api.to.NfsTO; -import com.cloud.agent.manager.SimulatorManager; -import com.cloud.storage.Storage; -import com.cloud.storage.resource.StorageProcessor; +import java.io.File; +import java.util.UUID; + import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -36,13 +31,21 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachAnswer; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.log4j.Logger; -import java.io.File; -import java.util.UUID; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.NfsTO; +import com.cloud.agent.manager.SimulatorManager; +import com.cloud.storage.Storage; +import com.cloud.storage.resource.StorageProcessor; public class SimulatorStorageProcessor implements StorageProcessor { @@ -214,4 +217,16 @@ public class SimulatorStorageProcessor implements StorageProcessor { public Answer deleteSnapshot(DeleteCommand cmd) { return new Answer(cmd); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + // TODO Auto-generated method stub + return null; + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + // TODO Auto-generated method stub + return null; + } } diff --git a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java index fd825b751ed..a681c061a28 100644 --- a/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java +++ b/plugins/hypervisors/simulator/src/com/cloud/simulator/dao/MockConfigurationDaoImpl.java @@ -20,7 +20,7 @@ import com.cloud.simulator.MockConfigurationVO; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import org.springframework.stereotype.Component; import javax.ejb.Local; @@ -117,7 +117,7 @@ public class MockConfigurationDaoImpl extends GenericDaoBase volumeMap, Host srcHost, Host destHost) { - return true; + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { + return StrategyPriority.HYPERVISOR; } @Override diff --git a/plugins/hypervisors/ucs/pom.xml b/plugins/hypervisors/ucs/pom.xml index 0309a422499..ae9708cba98 100755 --- a/plugins/hypervisors/ucs/pom.xml +++ b/plugins/hypervisors/ucs/pom.xml @@ -18,9 +18,7 @@ 4.3.0-SNAPSHOT ../../pom.xml - org.apache.cloudstack cloud-plugin-hypervisor-ucs - 4.3.0-SNAPSHOT Apache CloudStack Plugin - Hypervisor UCS http://maven.apache.org diff --git a/plugins/hypervisors/ucs/resources/META-INF/cloudstack/core/spring-ucs-core-context.xml b/plugins/hypervisors/ucs/resources/META-INF/cloudstack/core/spring-ucs-core-context.xml new file mode 100644 index 00000000000..47732f46092 --- /dev/null +++ b/plugins/hypervisors/ucs/resources/META-INF/cloudstack/core/spring-ucs-core-context.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java index 5cb9b022bf4..5dc6f79bf7d 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsBladeDaoImpl.java @@ -19,12 +19,10 @@ package com.cloud.ucs.database; import javax.ejb.Local; -import org.springframework.stereotype.Component; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = { UcsBladeDao.class }) -@DB(txn = false) +@DB() public class UcsBladeDaoImpl extends GenericDaoBase implements UcsBladeDao { } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java index f1664a1870a..b341a1b2279 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDao.java @@ -27,7 +27,7 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.GenericQueryBuilder; public interface UcsManagerDao extends GenericDao { } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java index 93c088566a1..9500886875f 100644 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/database/UcsManagerDaoImpl.java @@ -19,12 +19,10 @@ package com.cloud.ucs.database; import javax.ejb.Local; -import org.springframework.stereotype.Component; - import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = { UcsManagerDao.class }) -@DB(txn = false) +@DB() public class UcsManagerDaoImpl extends GenericDaoBase implements UcsManagerDao { } diff --git a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java index 9c8bc4e0bc9..4239482d1cb 100755 --- a/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java +++ b/plugins/hypervisors/ucs/src/com/cloud/ucs/manager/UcsManagerImpl.java @@ -30,8 +30,10 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; import org.apache.cloudstack.api.AddUcsManagerCmd; import org.apache.cloudstack.api.AssociateUcsProfileToBladeCmd; +import org.apache.cloudstack.api.DeleteUcsManagerCmd; import org.apache.cloudstack.api.ListUcsBladeCmd; import org.apache.cloudstack.api.ListUcsManagerCmd; import org.apache.cloudstack.api.ListUcsProfileCmd; @@ -40,8 +42,7 @@ import org.apache.cloudstack.api.response.UcsBladeResponse; import org.apache.cloudstack.api.response.UcsManagerResponse; import org.apache.cloudstack.api.response.UcsProfileResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; -import org.apache.cloudstack.api.DeleteUcsManagerCmd; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.configuration.Config; import com.cloud.dc.ClusterDetailsDao; @@ -60,9 +61,8 @@ import com.cloud.ucs.structure.UcsCookie; import com.cloud.ucs.structure.UcsProfile; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.xmlobject.XmlObject; @@ -98,7 +98,7 @@ public class UcsManagerImpl implements UcsManager { private ScheduledExecutorService syncBladesExecutor; private int syncBladeInterval; - private class SyncBladesThread implements Runnable { + private class SyncBladesThread extends ManagedContextRunnable { private void discoverNewBlades(Map previous, Map now, UcsManagerVO mgr) { @@ -132,8 +132,8 @@ public class UcsManagerImpl implements UcsManager { } private void syncBlades(UcsManagerVO mgr) { - SearchCriteriaService q = SearchCriteria2.create(UcsBladeVO.class); - q.addAnd(q.getEntity().getUcsManagerId(), Op.EQ, mgr.getId()); + QueryBuilder q = QueryBuilder.create(UcsBladeVO.class); + q.and(q.entity().getUcsManagerId(), Op.EQ, mgr.getId()); List pblades = q.list(); if (pblades.isEmpty()) { return; @@ -156,7 +156,7 @@ public class UcsManagerImpl implements UcsManager { } @Override - public void run() { + protected void runInContext() { try { List mgrs = ucsDao.listAll(); for (UcsManagerVO mgr : mgrs) { @@ -210,8 +210,8 @@ public class UcsManagerImpl implements UcsManager { @Override @DB public UcsManagerResponse addUcsManager(AddUcsManagerCmd cmd) { - SearchCriteriaService q = SearchCriteria2.create(UcsManagerVO.class); - q.addAnd(q.getEntity().getUrl(), Op.EQ, cmd.getUrl()); + QueryBuilder q = QueryBuilder.create(UcsManagerVO.class); + q.and(q.entity().getUrl(), Op.EQ, cmd.getUrl()); UcsManagerVO mgrvo = q.find(); if (mgrvo != null) { throw new IllegalArgumentException(String.format("duplicate UCS manager. url[%s] is used by another UCS manager already", cmd.getUrl())); @@ -226,10 +226,7 @@ public class UcsManagerImpl implements UcsManager { vo.setZoneId(cmd.getZoneId()); vo.setName(cmd.getName()); - Transaction txn = Transaction.currentTxn(); - txn.start(); mgrvo = ucsDao.persist(vo); - txn.commit(); UcsManagerResponse rsp = new UcsManagerResponse(); rsp.setId(String.valueOf(vo.getId())); rsp.setName(vo.getName()); @@ -342,9 +339,9 @@ public class UcsManagerImpl implements UcsManager { @Override public UcsBladeResponse associateProfileToBlade(AssociateUcsProfileToBladeCmd cmd) { - SearchCriteriaService q = SearchCriteria2.create(UcsBladeVO.class); - q.addAnd(q.getEntity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); - q.addAnd(q.getEntity().getId(), Op.EQ, cmd.getBladeId()); + QueryBuilder q = QueryBuilder.create(UcsBladeVO.class); + q.and(q.entity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); + q.and(q.entity().getId(), Op.EQ, cmd.getBladeId()); UcsBladeVO bvo = q.find(); if (bvo == null) { throw new IllegalArgumentException(String.format("cannot find UCS blade[id:%s, ucs manager id:%s]", cmd.getBladeId(), cmd.getUcsManagerId())); @@ -424,8 +421,8 @@ public class UcsManagerImpl implements UcsManager { return response; } - SearchCriteriaService serv = SearchCriteria2.create(UcsManagerVO.class); - serv.addAnd(serv.getEntity().getZoneId(), Op.EQ, cmd.getZoneId()); + QueryBuilder serv = QueryBuilder.create(UcsManagerVO.class); + serv.and(serv.entity().getZoneId(), Op.EQ, cmd.getZoneId()); List vos = serv.list(); for (UcsManagerVO vo : vos) { @@ -454,8 +451,8 @@ public class UcsManagerImpl implements UcsManager { @Override public ListResponse listUcsBlades(ListUcsBladeCmd cmd) { - SearchCriteriaService serv = SearchCriteria2.create(UcsBladeVO.class); - serv.addAnd(serv.getEntity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); + QueryBuilder serv = QueryBuilder.create(UcsBladeVO.class); + serv.and(serv.entity().getUcsManagerId(), Op.EQ, cmd.getUcsManagerId()); List vos = serv.list(); List rsps = new ArrayList(vos.size()); @@ -509,8 +506,8 @@ public class UcsManagerImpl implements UcsManager { @Override public void deleteUcsManager(Long id) { - SearchCriteriaService serv = SearchCriteria2.create(UcsBladeVO.class); - serv.addAnd(serv.getEntity().getUcsManagerId(), Op.EQ, id); + QueryBuilder serv = QueryBuilder.create(UcsBladeVO.class); + serv.and(serv.entity().getUcsManagerId(), Op.EQ, id); List vos = serv.list(); for (UcsBladeVO vo : vos) { bladeDao.remove(vo.getId()); diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml new file mode 100644 index 00000000000..406ffe16b4e --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/core/spring-vmware-core-context.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/module.properties b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/module.properties new file mode 100644 index 00000000000..b605835b94f --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vmware-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/spring-vmware-compute-context.xml b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/spring-vmware-compute-context.xml new file mode 100644 index 00000000000..402640db8fe --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/spring-vmware-compute-context.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/vmware-defaults.properties b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/vmware-defaults.properties new file mode 100644 index 00000000000..334e9fffaa9 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-compute/vmware-defaults.properties @@ -0,0 +1,17 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +premium=true diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/module.properties b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/module.properties new file mode 100644 index 00000000000..0d726f85b48 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vmware-discoverer +parent=discoverer \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/spring-vmware-discoverer-context.xml b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/spring-vmware-discoverer-context.xml new file mode 100644 index 00000000000..933f0e8a723 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-discoverer/spring-vmware-discoverer-context.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/module.properties b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/module.properties new file mode 100644 index 00000000000..91ea24cc557 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vmware-network +parent=network \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/spring-vmware-network-context.xml b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/spring-vmware-network-context.xml new file mode 100644 index 00000000000..440356b3f53 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-network/spring-vmware-network-context.xml @@ -0,0 +1,34 @@ + + + + + + + + \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/module.properties b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/module.properties new file mode 100644 index 00000000000..9c3bab68940 --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vmware-storage +parent=storage \ No newline at end of file diff --git a/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/spring-vmware-storage-context.xml b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/spring-vmware-storage-context.xml new file mode 100644 index 00000000000..bb428bfbdeb --- /dev/null +++ b/plugins/hypervisors/vmware/resources/META-INF/cloudstack/vmware-storage/spring-vmware-storage-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java index fd7b3b48795..58255bf2e58 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/VmwareServerDiscoverer.java @@ -60,10 +60,12 @@ import com.cloud.hypervisor.vmware.resource.VmwareResource; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.network.NetworkModel; import com.cloud.network.Networks.TrafficType; +import com.cloud.network.Network; import com.cloud.network.PhysicalNetwork; import com.cloud.network.VmwareTrafficLabel; import com.cloud.network.dao.CiscoNexusVSMDeviceDao; import com.cloud.network.element.CiscoNexusVSMElement; +import com.cloud.network.element.NetworkElement; import com.cloud.resource.Discoverer; import com.cloud.resource.DiscovererBase; import com.cloud.resource.ResourceManager; @@ -105,8 +107,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements ResourceManager _resourceMgr; @Inject CiscoNexusVSMDeviceDao _nexusDao; - @Inject CiscoNexusVSMElement _nexusElement; + List networkElements; @Inject NetworkModel _netmgr; @Inject @@ -450,6 +452,15 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } } } + + protected CiscoNexusVSMElement getCiscoNexusVSMElement() { + for ( NetworkElement networkElement : networkElements ) { + if ( networkElement instanceof CiscoNexusVSMElement ) + return (CiscoNexusVSMElement)networkElement; + } + + throw new IllegalStateException("Failed to CiscoNexusVSMElement"); + } private VmwareDatacenterVO fetchVmwareDatacenterByZone(Long dcId) throws DiscoveryException { VmwareDatacenterVO vmwareDc; @@ -640,6 +651,16 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } + @Override + public boolean start() { + if ( ! super.start() ) + return false; + + _nexusElement = getCiscoNexusVSMElement(); + + return true; + } + @Override public boolean stop() { _resourceMgr.unregisterResourceStateAdapter(this.getClass() @@ -791,4 +812,14 @@ public class VmwareServerDiscoverer extends DiscovererBase implements } _urlParams.putAll(params); } + + public List getNetworkElements() { + return networkElements; + } + + @Inject + public void setNetworkElements(List networkElements) { + this.networkElements = networkElements; + } + } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java index 7d2d1285c13..20d68b0b8d1 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/LegacyZoneDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria.Op; @Component - @Local(value=LegacyZoneDao.class) @DB(txn=false) + @Local(value=LegacyZoneDao.class) @DB public class LegacyZoneDaoImpl extends GenericDaoBase implements LegacyZoneDao { protected static final Logger s_logger = Logger.getLogger(LegacyZoneDaoImpl.class); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java index 9f5796a073a..6dbbbed5d89 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/dao/VmwareDatacenterDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=VmwareDatacenterDao.class) @DB(txn=false) +@Local(value=VmwareDatacenterDao.class) @DB public class VmwareDatacenterDaoImpl extends GenericDaoBase implements VmwareDatacenterDao { protected static final Logger s_logger = Logger.getLogger(VmwareDatacenterDaoImpl.class); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java index 2b44071a87c..d0147d194a6 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareHostService.java @@ -17,6 +17,7 @@ package com.cloud.hypervisor.vmware.manager; import com.cloud.agent.api.Command; +import com.cloud.hypervisor.vmware.mo.DatastoreMO; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.vmware.vim25.ManagedObjectReference; @@ -28,7 +29,8 @@ public interface VmwareHostService { String getWorkerName(VmwareContext context, Command cmd, int workerSequence); - ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception; + ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception; + void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception; void handleDatastoreAndVmdkDetach(String iqn, String storageHost, int storagePort) throws Exception; } diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java index 2649452611f..9af0aa002d8 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareManagerImpl.java @@ -120,6 +120,8 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.utils.ssh.SshHelper; @@ -575,12 +577,16 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw s_logger.info("Worker VM's owner management server has changed runid, recycle it"); return true; } - + + // disable time-out check until we have found out a VMware API that can check if + // there are pending tasks on the subject VM +/* if(System.currentTimeMillis() - startTick > _hungWorkerTimeout) { if(s_logger.isInfoEnabled()) s_logger.info("Worker VM expired, seconds elapsed: " + (System.currentTimeMillis() - startTick) / 1000); return true; } +*/ return false; } @@ -1087,32 +1093,12 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw // Add DC to database into vmware_data_center table vmwareDc = new VmwareDatacenterVO(guid, vmwareDcName, vCenterHost, userName, password); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - vmwareDc = _vmwareDcDao.persist(vmwareDc); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - s_logger.error("Failed to persist VMware datacenter details to database. Exception: " + e.getMessage()); - throw new CloudRuntimeException(e.getMessage()); - } + vmwareDc = _vmwareDcDao.persist(vmwareDc); // Map zone with vmware datacenter vmwareDcZoneMap = new VmwareDatacenterZoneMapVO(zoneId, vmwareDc.getId()); - txn = Transaction.currentTxn(); - try { - txn.start(); - vmwareDcZoneMap = _vmwareDcZoneMapDao.persist(vmwareDcZoneMap); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - s_logger.error("Failed to associate VMware datacenter with zone " + zoneId + ". Exception: " + e.getMessage()); - // Removing VMware datacenter from vmware_data_center table because association with zone failed. - _vmwareDcDao.remove(vmwareDcZoneMap.getId()); - throw new CloudRuntimeException(e.getMessage()); - } + vmwareDcZoneMap = _vmwareDcZoneMapDao.persist(vmwareDcZoneMap); // Set custom field for this DC if (addDcCustomFieldDef) { @@ -1148,40 +1134,35 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw validateZoneWithResources(zoneId, "remove VMware datacenter to zone"); // Get DC associated with this zone - VmwareDatacenterZoneMapVO vmwareDcZoneMap; VmwareDatacenterVO vmwareDatacenter; String vmwareDcName; - long vmwareDcId; String vCenterHost; String userName; String password; DatacenterMO dcMo = null; Transaction txn; - vmwareDcZoneMap = _vmwareDcZoneMapDao.findByZoneId(zoneId); + final VmwareDatacenterZoneMapVO vmwareDcZoneMap = _vmwareDcZoneMapDao.findByZoneId(zoneId); // Check if zone is associated with VMware DC if (vmwareDcZoneMap == null) { throw new CloudRuntimeException("Zone " + zoneId + " is not associated with any VMware datacenter."); } - vmwareDcId = vmwareDcZoneMap.getVmwareDcId(); + final long vmwareDcId = vmwareDcZoneMap.getVmwareDcId(); vmwareDatacenter = _vmwareDcDao.findById(vmwareDcId); vmwareDcName = vmwareDatacenter.getVmwareDatacenterName(); vCenterHost = vmwareDatacenter.getVcenterHost(); userName = vmwareDatacenter.getUser(); password = vmwareDatacenter.getPassword(); - txn = Transaction.currentTxn(); - try { - txn.start(); - // Remove the VMware datacenter entry in table vmware_data_center - _vmwareDcDao.remove(vmwareDcId); - // Remove the map entry in table vmware_data_center_zone_map - _vmwareDcZoneMapDao.remove(vmwareDcZoneMap.getId()); - txn.commit(); - } catch (Exception e) { - s_logger.info("Caught exception when trying to delete VMware datacenter record." + e.getMessage()); - throw new CloudRuntimeException("Failed to delete VMware datacenter."); - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // Remove the VMware datacenter entry in table vmware_data_center + _vmwareDcDao.remove(vmwareDcId); + // Remove the map entry in table vmware_data_center_zone_map + _vmwareDcZoneMapDao.remove(vmwareDcZoneMap.getId()); + } + }); // Construct context VmwareContext context = null; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java index e11e76612e3..0e2423e483b 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/manager/VmwareStorageManagerImpl.java @@ -16,39 +16,6 @@ // under the License. package com.cloud.hypervisor.vmware.manager; -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.OutputStreamWriter; -import java.rmi.RemoteException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.UUID; - -import org.apache.log4j.Logger; - -import com.vmware.vim25.FileInfo; -import com.vmware.vim25.FileQueryFlags; -import com.vmware.vim25.HostDatastoreBrowserSearchResults; -import com.vmware.vim25.HostDatastoreBrowserSearchSpec; -import com.vmware.vim25.ManagedObjectReference; -import com.vmware.vim25.TaskInfo; -import com.vmware.vim25.VirtualDeviceConfigSpec; -import com.vmware.vim25.VirtualDeviceConfigSpecOperation; -import com.vmware.vim25.VirtualDisk; -import com.vmware.vim25.VirtualLsiLogicController; -import com.vmware.vim25.VirtualMachineConfigSpec; -import com.vmware.vim25.VirtualMachineFileInfo; -import com.vmware.vim25.VirtualMachineGuestOsIdentifier; -import com.vmware.vim25.VirtualSCSISharing; - -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; - import com.cloud.agent.api.Answer; import com.cloud.agent.api.BackupSnapshotAnswer; import com.cloud.agent.api.BackupSnapshotCommand; @@ -73,7 +40,6 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.NfsTO; import com.cloud.agent.api.to.StorageFilerTO; -import com.cloud.agent.api.to.VolumeTO; import com.cloud.hypervisor.vmware.mo.CustomFieldConstants; import com.cloud.hypervisor.vmware.mo.DatacenterMO; import com.cloud.hypervisor.vmware.mo.DatastoreMO; @@ -97,6 +63,29 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; import com.cloud.vm.VirtualMachine; import com.cloud.vm.snapshot.VMSnapshot; +import com.vmware.vim25.FileInfo; +import com.vmware.vim25.FileQueryFlags; +import com.vmware.vim25.HostDatastoreBrowserSearchResults; +import com.vmware.vim25.HostDatastoreBrowserSearchSpec; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.TaskInfo; +import com.vmware.vim25.VirtualDisk; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; + +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.OutputStreamWriter; +import java.rmi.RemoteException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.UUID; public class VmwareStorageManagerImpl implements VmwareStorageManager { @Override @@ -1280,7 +1269,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { @Override public CreateVMSnapshotAnswer execute(VmwareHostService hostService, CreateVMSnapshotCommand cmd) { - List volumeTOs = cmd.getVolumeTOs(); + List volumeTOs = cmd.getVolumeTOs(); String vmName = cmd.getVmName(); String vmSnapshotName = cmd.getTarget().getSnapshotName(); String vmSnapshotDesc = cmd.getTarget().getDescription(); @@ -1330,19 +1319,20 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { mapNewDisk.put(baseName, vmdkName); } } - for (VolumeTO volumeTO : volumeTOs) { + for (VolumeObjectTO volumeTO : volumeTOs) { String baseName = extractSnapshotBaseFileName(volumeTO.getPath()); String newPath = mapNewDisk.get(baseName); // get volume's chain size for this VM snapshot, exclude current volume vdisk + DataStoreTO store = volumeTO.getDataStore(); long size = getVMSnapshotChainSize(context,hyperHost,baseName + "*.vmdk", - volumeTO.getPoolUuid(), newPath); + store.getUuid(), newPath); - if(volumeTO.getType()== Volume.Type.ROOT){ + if(volumeTO.getVolumeType()== Volume.Type.ROOT){ // add memory snapshot size - size = size + getVMSnapshotChainSize(context,hyperHost,cmd.getVmName()+"*.vmsn",volumeTO.getPoolUuid(),null); + size = size + getVMSnapshotChainSize(context,hyperHost,cmd.getVmName()+"*.vmsn",store.getUuid(),null); } - volumeTO.setChainSize(size); + volumeTO.setSize(size); volumeTO.setPath(newPath); } return new CreateVMSnapshotAnswer(cmd, cmd.getTarget(), volumeTOs); @@ -1362,7 +1352,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { @Override public DeleteVMSnapshotAnswer execute(VmwareHostService hostService, DeleteVMSnapshotCommand cmd) { - List listVolumeTo = cmd.getVolumeTOs(); + List listVolumeTo = cmd.getVolumeTOs(); VirtualMachineMO vmMo = null; VmwareContext context = hostService.getServiceContext(cmd); Map mapNewDisk = new HashMap(); @@ -1403,16 +1393,17 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { mapNewDisk.put(baseName, vmdkName); } } - for (VolumeTO volumeTo : listVolumeTo) { + for (VolumeObjectTO volumeTo : listVolumeTo) { String baseName = extractSnapshotBaseFileName(volumeTo.getPath()); String newPath = mapNewDisk.get(baseName); + DataStoreTO store = volumeTo.getDataStore(); long size = getVMSnapshotChainSize(context,hyperHost, - baseName + "*.vmdk", volumeTo.getPoolUuid(), newPath); - if(volumeTo.getType()== Volume.Type.ROOT){ + baseName + "*.vmdk", store.getUuid(), newPath); + if(volumeTo.getVolumeType()== Volume.Type.ROOT){ // add memory snapshot size - size = size + getVMSnapshotChainSize(context,hyperHost,cmd.getVmName()+"*.vmsn",volumeTo.getPoolUuid(),null); + size = size + getVMSnapshotChainSize(context,hyperHost,cmd.getVmName()+"*.vmsn",volumeTo.getUuid(),null); } - volumeTo.setChainSize(size); + volumeTo.setSize(size); volumeTo.setPath(newPath); } return new DeleteVMSnapshotAnswer(cmd, listVolumeTo); @@ -1429,7 +1420,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { String snapshotName = cmd.getTarget().getSnapshotName(); String vmName = cmd.getVmName(); Boolean snapshotMemory = cmd.getTarget().getType() == VMSnapshot.Type.DiskAndMemory; - List listVolumeTo = cmd.getVolumeTOs(); + List listVolumeTo = cmd.getVolumeTOs(); VirtualMachine.State vmState = VirtualMachine.State.Running; VirtualMachineMO vmMo = null; VmwareContext context = hostService.getServiceContext(cmd); @@ -1483,7 +1474,7 @@ public class VmwareStorageManagerImpl implements VmwareStorageManager { } } String key = null; - for (VolumeTO volumeTo : listVolumeTo) { + for (VolumeObjectTO volumeTo : listVolumeTo) { String parentUUID = volumeTo.getPath(); String[] s = parentUUID.split("-"); key = s[0]; diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java index ed607e118d2..3079998198c 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareContextFactory.java @@ -80,8 +80,15 @@ public class VmwareContextFactory { public static VmwareContext getContext(String vCenterAddress, String vCenterUserName, String vCenterPassword) throws Exception { VmwareContext context = s_pool.getContext(vCenterAddress, vCenterUserName); - if(context == null) + if(context == null) { context = create(vCenterAddress, vCenterUserName, vCenterPassword); + } else { + if(!context.validate()) { + s_logger.info("Validation of the context faild. dispose and create a new one"); + context.close(); + context = create(vCenterAddress, vCenterUserName, vCenterPassword); + } + } if(context != null) { context.registerStockObject(VmwareManager.CONTEXT_STOCK_NAME, s_vmwareMgr); diff --git a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 2d404171b54..423abe67a47 100755 --- a/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -1618,12 +1618,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception(msg); } +/* if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools is not installed or not running, cannot add nic to vm " + vmName; s_logger.debug(errMsg); return new PlugNicAnswer(cmd, false, "Unable to execute PlugNicCommand due to " + errMsg); } - +*/ // TODO need a way to specify the control of NIC device type VirtualEthernetCardType nicDeviceType = VirtualEthernetCardType.E1000; @@ -1698,12 +1699,13 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa throw new Exception(msg); } +/* if(!isVMWareToolsInstalled(vmMo)){ String errMsg = "vmware tools not installed or not running, cannot remove nic from vm " + vmName; s_logger.debug(errMsg); return new UnPlugNicAnswer(cmd, false, "Unable to execute unPlugNicCommand due to " + errMsg); } - +*/ VirtualDevice nic = findVirtualNicDevice(vmMo, cmd.getNic().getMac()); if ( nic == null ) { return new UnPlugNicAnswer(cmd, true, "success"); @@ -2050,7 +2052,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String args = " -v " + vmIpAddress; if (s_logger.isDebugEnabled()) { - s_logger.debug("Run command on domain router " + controlIp + ", /root/savepassword.sh " + args + " -p " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); + s_logger.debug("Run command on domain router " + controlIp + ", /opt/cloud/bin/savepassword.sh " + args + " -p " + StringUtils.getMaskedPasswordForDisplay(cmd.getPassword())); } args += " -p " + password; @@ -2058,7 +2060,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa try { VmwareManager mgr = getServiceContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, "/root/savepassword.sh " + args); + Pair result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, "/opt/cloud/bin/savepassword.sh " + args); if (!result.first()) { s_logger.error("savepassword command on domain router " + controlIp + " failed, message: " + result.second()); @@ -2627,6 +2629,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), hyperHost.getHyperHostDatacenter()); VirtualMachineDiskInfoBuilder diskInfoBuilder = null; VirtualMachineMO vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName); + boolean hasSnapshot = false; if (vmMo != null) { s_logger.info("VM " + vmInternalCSName + " already exists, tear down devices for reconfiguration"); if (getVmState(vmMo) != State.Stopped) @@ -2634,7 +2637,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // retrieve disk information before we tear down diskInfoBuilder = vmMo.getDiskInfoBuilder(); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + hasSnapshot = vmMo.hasSnapshot(); + if(!hasSnapshot) + vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + else + vmMo.tearDownDevices(new Class[] { VirtualEthernetCard.class }); vmMo.ensureScsiDeviceController(); } else { ManagedObjectReference morDc = hyperHost.getHyperHostDatacenter(); @@ -2652,7 +2659,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa vmMo.safePowerOff(_shutdown_waitMs); diskInfoBuilder = vmMo.getDiskInfoBuilder(); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + hasSnapshot = vmMo.hasSnapshot(); + if(!hasSnapshot) + vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + else + vmMo.tearDownDevices(new Class[] { VirtualEthernetCard.class }); vmMo.ensureScsiDeviceController(); } else { int ramMb = (int) (vmSpec.getMinRam() / (1024 * 1024)); @@ -2808,36 +2819,45 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // DiskTO[] sortedDisks = sortVolumesByDeviceId(disks); for (DiskTO vol : sortedDisks) { - deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); - if (vol.getType() == Volume.Type.ISO) continue; - + VirtualMachineDiskInfo matchingExistingDisk = getMatchingExistingDisk(diskInfoBuilder, vol); controllerKey = getDiskController(matchingExistingDisk, vol, vmSpec, ideControllerKey, scsiControllerKey); - VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore(); - Pair volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid()); - assert (volumeDsDetails != null); - VirtualDevice device; - - String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, - vol, matchingExistingDisk, - dataStoresDetails); - - device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, - diskChain, - volumeDsDetails.first(), - (controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1); - - deviceConfigSpecArray[i].setDevice(device); - deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); - - if(s_logger.isDebugEnabled()) - s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); - - i++; + if(!hasSnapshot) { + deviceConfigSpecArray[i] = new VirtualDeviceConfigSpec(); + + VolumeObjectTO volumeTO = (VolumeObjectTO)vol.getData(); + PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore(); + Pair volumeDsDetails = dataStoresDetails.get(primaryStore.getUuid()); + assert (volumeDsDetails != null); + + String[] diskChain = syncDiskChain(dcMo, vmMo, vmSpec, + vol, matchingExistingDisk, + dataStoresDetails); + if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) + scsiUnitNumber++; + VirtualDevice device = VmwareHelper.prepareDiskDevice(vmMo, null, controllerKey, + diskChain, + volumeDsDetails.first(), + (controllerKey == ideControllerKey) ? ideUnitNumber++ : scsiUnitNumber++, i + 1); + + deviceConfigSpecArray[i].setDevice(device); + deviceConfigSpecArray[i].setOperation(VirtualDeviceConfigSpecOperation.ADD); + + if(s_logger.isDebugEnabled()) + s_logger.debug("Prepare volume at new device " + _gson.toJson(device)); + + i++; + } else { + if(controllerKey == scsiControllerKey && VmwareHelper.isReservedScsiDeviceNumber(scsiUnitNumber)) + scsiUnitNumber++; + if(controllerKey == ideControllerKey) + ideUnitNumber++; + else + scsiUnitNumber++; + } } // @@ -2884,7 +2904,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa nicCount++; } - vmConfigSpec.getDeviceChange().addAll(Arrays.asList(deviceConfigSpecArray)); + for(int j = 0; j < i; j++) + vmConfigSpec.getDeviceChange().add(deviceConfigSpecArray[j]); // // Setup VM options @@ -3570,6 +3591,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } else { argsBuf.append(" -d ").append(" -s ").append(cmd.getVpnServerIp()); } + argsBuf.append(" -C ").append(cmd.getLocalCidr()); + argsBuf.append(" -i ").append(cmd.getPublicInterface()); try { VmwareManager mgr = getServiceContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); @@ -3836,11 +3859,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa if (getVmState(vmMo) != State.Stopped) { if (vmMo.safePowerOff(_shutdown_waitMs)) { state = State.Stopped; - return new StopAnswer(cmd, "Stop VM " + cmd.getVmName() + " Succeed", 0, true); + return new StopAnswer(cmd, "Stop VM " + cmd.getVmName() + " Succeed", true); } else { String msg = "Have problem in powering off VM " + cmd.getVmName() + ", let the process continue"; s_logger.warn(msg); - return new StopAnswer(cmd, msg, 0, true); + return new StopAnswer(cmd, msg, true); } } else { state = State.Stopped; @@ -3848,7 +3871,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String msg = "VM " + cmd.getVmName() + " is already in stopped state"; s_logger.info(msg); - return new StopAnswer(cmd, msg, 0, true); + return new StopAnswer(cmd, msg, true); } finally { synchronized (_vms) { _vms.put(cmd.getVmName(), state); @@ -3861,7 +3884,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa String msg = "VM " + cmd.getVmName() + " is no longer in vSphere"; s_logger.info(msg); - return new StopAnswer(cmd, msg, 0, true); + return new StopAnswer(cmd, msg, true); } } catch (Exception e) { if (e instanceof RemoteException) { @@ -4436,7 +4459,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa return str.replace('/', '-'); } - private String trimIqn(String iqn) { + public static String trimIqn(String iqn) { String[] tmp = iqn.split("/"); if (tmp.length != 3) { @@ -4451,36 +4474,23 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } @Override - public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception { + public void createVmdk(Command cmd, DatastoreMO dsMo, String vmdkDatastorePath, Long volumeSize) throws Exception { VmwareContext context = getServiceContext(); VmwareHypervisorHost hyperHost = getHyperHost(context); - ManagedObjectReference morDs = createVmfsDatastore(hyperHost, getDatastoreName(iqn), - storageHost, storagePort, trimIqn(iqn), - initiatorUsername, initiatorPassword, - targetUsername, targetPassword); + String dummyVmName = getWorkerName(context, cmd, 0); - DatastoreMO dsMo = new DatastoreMO(context, morDs); + VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); - String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); - - if (!dsMo.fileExists(volumeDatastorePath)) { - String dummyVmName = getWorkerName(context, cmd, 0); - - VirtualMachineMO vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); - - if (vmMo == null) { - throw new Exception("Unable to create a dummy VM for volume creation"); - } - - vmMo.createDisk(volumeDatastorePath, getMBsFromBytes(dsMo.getSummary().getFreeSpace()), - morDs, vmMo.getScsiDeviceControllerKey()); - vmMo.detachDisk(volumeDatastorePath, false); - vmMo.destroy(); + if (vmMo == null) { + throw new Exception("Unable to create a dummy VM for volume creation"); } - return morDs; + Long volumeSizeToUse = volumeSize < dsMo.getSummary().getFreeSpace() ? volumeSize : dsMo.getSummary().getFreeSpace(); + + vmMo.createDisk(vmdkDatastorePath, getMBsFromBytes(volumeSizeToUse), dsMo.getMor(), vmMo.getScsiDeviceControllerKey()); + vmMo.detachDisk(vmdkDatastorePath, false); + vmMo.destroy(); } @Override @@ -4513,9 +4523,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa ManagedObjectReference morDs = null; if (cmd.getAttach() && cmd.isManaged()) { - morDs = handleDatastoreAndVmdkAttach(cmd, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.getStoragePort(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), - cmd.getChapTargetUsername(), cmd.getChapTargetPassword()); + morDs = getVmfsDatastore(hyperHost, getDatastoreName(cmd.get_iScsiName()), cmd.getStorageHost(), cmd.getStoragePort(), trimIqn(cmd.get_iScsiName()), + cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), cmd.getChapTargetUsername(), cmd.getChapTargetPassword()); + + DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); + + String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); + + if (!dsMo.fileExists(volumeDatastorePath)) { + createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(cmd.get_iScsiName()), cmd.getVolumeSize()); + } } else { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getPoolUuid()); @@ -4528,10 +4545,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } DatastoreMO dsMo = new DatastoreMO(getServiceContext(), morDs); - VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(), - dsMo, cmd.getVolumePath()); - - String datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true); + + String datastoreVolumePath = null; + + if (cmd.isManaged()) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), cmd.getVmName(), dsMo, cmd.getVolumePath()); + + datastoreVolumePath = dsMo.searchFileInSubFolders(cmd.getVolumePath() + ".vmdk", true); + } + assert (datastoreVolumePath != null) : "Virtual disk file must exist in specified datastore for attach/detach operations."; if (datastoreVolumePath == null) { throw new CloudRuntimeException("Unable to find file " + cmd.getVolumePath() + ".vmdk in datastore " + dsMo.getName()); @@ -4684,7 +4709,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa } } - private ManagedObjectReference createVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, + public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, String iqn, String chapName, String chapSecret, String mutualChapName, String mutualChapSecret) throws Exception { VmwareContext context = getServiceContext(); ManagedObjectReference morCluster = hyperHost.getHyperHostCluster(); @@ -5407,7 +5432,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa // tear down all devices first before we destroy the VM to avoid accidently delete disk backing files if (getVmState(vmMo) != State.Stopped) vmMo.safePowerOff(_shutdown_waitMs); - vmMo.tearDownDevices(new Class[] { VirtualDisk.class, VirtualEthernetCard.class }); + vmMo.tearDownDevices(new Class[] { /* VirtualDisk.class, */ VirtualEthernetCard.class }); vmMo.destroy(); for (NetworkDetails netDetails : networks) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java index 9ec64ffce9d..1b08a5caceb 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/CiscoNexusVSMDeviceManagerImpl.java @@ -38,6 +38,8 @@ import com.cloud.resource.ResourceManager; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.network.dao.CiscoNexusVSMDeviceDao; import com.cloud.network.dao.PortProfileDao; @@ -131,29 +133,13 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { if (VSMObj == null) { // Create the VSM record. For now, we aren't using the vsmName field. VSMObj = new CiscoNexusVSMDeviceVO(ipaddress, username, password); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _ciscoNexusVSMDeviceDao.persist(VSMObj); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } + _ciscoNexusVSMDeviceDao.persist(VSMObj); } // At this stage, we have a VSM record for sure. Connect the VSM to the cluster Id. long vsmId = _ciscoNexusVSMDeviceDao.getVSMbyIpaddress(ipaddress).getId(); ClusterVSMMapVO connectorObj = new ClusterVSMMapVO(clusterId, vsmId); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _clusterVSMDao.persist(connectorObj); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } + _clusterVSMDao.persist(connectorObj); // Now, get a list of all the ESXi servers in this cluster. // This is effectively a select * from host where cluster_id=clusterId; @@ -196,7 +182,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { } @DB - public boolean deleteCiscoNexusVSM(long vsmId) throws ResourceInUseException { + public boolean deleteCiscoNexusVSM(final long vsmId) throws ResourceInUseException { CiscoNexusVSMDeviceVO cisconexusvsm = _ciscoNexusVSMDeviceDao.findById(vsmId); if (cisconexusvsm == null) { // This entry is already not present. Return success. @@ -225,20 +211,16 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { } // Iterate through the cluster list again, this time, delete the VSM. - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. - _ciscoNexusVSMDeviceDao.remove(vsmId); - // Remove the current record as well from ClusterVSMMapVO's table. - _clusterVSMDao.removeByVsmId(vsmId); - // There are no hosts at this stage in the cluster, so we don't need - // to notify any resources or remove host details. - txn.commit(); - } catch (Exception e) { - s_logger.info("Caught exception when trying to delete VSM record.." + e.getMessage()); - throw new CloudRuntimeException("Failed to delete VSM"); - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. + _ciscoNexusVSMDeviceDao.remove(vsmId); + // Remove the current record as well from ClusterVSMMapVO's table. + _clusterVSMDao.removeByVsmId(vsmId); + } + }); + return true; } @@ -252,15 +234,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Disabled) { // it's currently disabled. So change it to enabled and write it out to the db. cisconexusvsm.setVsmDeviceState(CiscoNexusVSMDeviceVO.VSMDeviceState.Enabled); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _ciscoNexusVSMDeviceDao.persist(cisconexusvsm); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } + _ciscoNexusVSMDeviceDao.persist(cisconexusvsm); } return cisconexusvsm; @@ -276,15 +250,7 @@ public abstract class CiscoNexusVSMDeviceManagerImpl extends AdapterBase { if (cisconexusvsm.getvsmDeviceState() == CiscoNexusVSMDeviceVO.VSMDeviceState.Enabled) { // it's currently disabled. So change it to enabled and write it out to the db. cisconexusvsm.setVsmDeviceState(CiscoNexusVSMDeviceVO.VSMDeviceState.Disabled); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _ciscoNexusVSMDeviceDao.persist(cisconexusvsm); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } + _ciscoNexusVSMDeviceDao.persist(cisconexusvsm); } return cisconexusvsm; diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java index cc25573dd2d..5379bba13ec 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/dao/CiscoNexusVSMDeviceDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=CiscoNexusVSMDeviceDao.class) @DB(txn=false) +@Local(value=CiscoNexusVSMDeviceDao.class) @DB public class CiscoNexusVSMDeviceDaoImpl extends GenericDaoBase implements CiscoNexusVSMDeviceDao { protected static final Logger s_logger = Logger.getLogger(CiscoNexusVSMDeviceDaoImpl.class); final SearchBuilder mgmtVlanIdSearch; diff --git a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java index 6c6ce557310..92564dd2c4e 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java +++ b/plugins/hypervisors/vmware/src/com/cloud/network/element/CiscoNexusVSMElement.java @@ -61,6 +61,8 @@ import com.cloud.utils.cisco.n1kv.vsm.NetconfHelper; import com.cloud.utils.component.Manager; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -260,7 +262,7 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme } @DB - public Pair validateAndAddVsm(String vsmIp, String vsmUser, String vsmPassword, long clusterId, String clusterName) throws ResourceInUseException { + public Pair validateAndAddVsm(final String vsmIp, final String vsmUser, final String vsmPassword, final long clusterId, String clusterName) throws ResourceInUseException { CiscoNexusVSMDeviceVO vsm = null; boolean vsmAdded = false; Long vsmId = 0L; @@ -293,36 +295,24 @@ public class CiscoNexusVSMElement extends CiscoNexusVSMDeviceManagerImpl impleme } } // persist credentials to database if the VSM entry is not already in the db. - if (_vsmDao.getVSMbyIpaddress(vsmIp) == null) { - vsm = new CiscoNexusVSMDeviceVO(vsmIp, vsmUser, vsmPassword); - txn = Transaction.currentTxn(); - try { - txn.start(); - vsm = _vsmDao.persist(vsm); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - s_logger.error("Failed to persist Cisco Nexus 1000v VSM details to database. Exception: " + e.getMessage()); - throw new CloudRuntimeException(e.getMessage()); + vsm = Transaction.execute(new TransactionCallback() { + @Override + public CiscoNexusVSMDeviceVO doInTransaction(TransactionStatus status) { + CiscoNexusVSMDeviceVO vsm = null; + if (_vsmDao.getVSMbyIpaddress(vsmIp) == null) { + vsm = new CiscoNexusVSMDeviceVO(vsmIp, vsmUser, vsmPassword); + vsm = _vsmDao.persist(vsm); + } + // Create a mapping between the cluster and the vsm. + vsm = _vsmDao.getVSMbyIpaddress(vsmIp); + if (vsm != null) { + ClusterVSMMapVO connectorObj = new ClusterVSMMapVO(clusterId, vsm.getId()); + _clusterVSMDao.persist(connectorObj); + } + return vsm; } - } - // Create a mapping between the cluster and the vsm. - vsm = _vsmDao.getVSMbyIpaddress(vsmIp); - if (vsm != null) { - ClusterVSMMapVO connectorObj = new ClusterVSMMapVO(clusterId, vsm.getId()); - txn = Transaction.currentTxn(); - try { - txn.start(); - _clusterVSMDao.persist(connectorObj); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - s_logger.error("Failed to associate Cisco Nexus 1000v VSM with cluster: " + clusterName + ". Exception: " + e.getMessage()); - _vsmDao.remove(vsm.getId()); // Removing VSM from virtual_supervisor_module table because association with cluster failed. - // Cluster would be deleted from cluster table by callee. - throw new CloudRuntimeException(e.getMessage()); - } - } + }); + } else { String msg; msg = "The global parameter " + Config.VmwareUseNexusVSwitch.toString() + diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java index 5365e58e78e..253d6fd3517 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageContextFactory.java @@ -16,11 +16,15 @@ // under the License. package com.cloud.storage.resource; +import org.apache.log4j.Logger; + import com.cloud.hypervisor.vmware.util.VmwareClient; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.hypervisor.vmware.util.VmwareContextPool; public class VmwareSecondaryStorageContextFactory { + private static final Logger s_logger = Logger.getLogger(VmwareSecondaryStorageContextFactory.class); + private static volatile int s_seq = 1; private static VmwareContextPool s_pool; @@ -51,6 +55,12 @@ public class VmwareSecondaryStorageContextFactory { VmwareContext context = s_pool.getContext(vCenterAddress, vCenterUserName); if(context == null) { context = create(vCenterAddress, vCenterUserName, vCenterPassword); + } else { + if(!context.validate()) { + s_logger.info("Validation of the context faild. dispose and create a new one"); + context.close(); + context = create(vCenterAddress, vCenterUserName, vCenterPassword); + } } if(context != null) { diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java index 2c302ab29fc..c84813f0b30 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareSecondaryStorageResourceHandler.java @@ -37,6 +37,7 @@ import com.cloud.hypervisor.vmware.manager.VmwareStorageManager; import com.cloud.hypervisor.vmware.manager.VmwareStorageManagerImpl; import com.cloud.hypervisor.vmware.manager.VmwareStorageMount; import com.cloud.hypervisor.vmware.mo.ClusterMO; +import com.cloud.hypervisor.vmware.mo.DatastoreMO; import com.cloud.hypervisor.vmware.mo.HostMO; import com.cloud.hypervisor.vmware.mo.VmwareHostType; import com.cloud.hypervisor.vmware.mo.VmwareHypervisorHost; @@ -347,8 +348,12 @@ public class VmwareSecondaryStorageResourceHandler implements SecondaryStorageRe return true; } - public ManagedObjectReference handleDatastoreAndVmdkAttach(Command cmd, String iqn, String storageHost, int storagePort, - String initiatorUsername, String initiatorPassword, String targetUsername, String targetPassword) throws Exception { + public ManagedObjectReference getVmfsDatastore(VmwareHypervisorHost hyperHost, String datastoreName, String storageIpAddress, int storagePortNumber, + String iqn, String initiatorChapName, String initiatorChapSecret, String mutualChapName, String mutualChapSecret) throws Exception { + throw new OperationNotSupportedException(); + } + + public void createVmdk(Command cmd, DatastoreMO dsMo, String volumeDatastorePath, Long volumeSize) throws Exception { throw new OperationNotSupportedException(); } diff --git a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java index 4982d879751..71ba4e9ff87 100644 --- a/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java +++ b/plugins/hypervisors/vmware/src/com/cloud/storage/resource/VmwareStorageProcessor.java @@ -26,22 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.UUID; - -import org.apache.commons.lang.StringUtils; -import org.apache.log4j.Logger; - -import com.google.gson.Gson; -import com.vmware.vim25.ManagedObjectReference; -import com.vmware.vim25.VirtualDeviceConfigSpec; -import com.vmware.vim25.VirtualDeviceConfigSpecOperation; -import com.vmware.vim25.VirtualDisk; -import com.vmware.vim25.VirtualEthernetCard; -import com.vmware.vim25.VirtualLsiLogicController; -import com.vmware.vim25.VirtualMachineConfigSpec; -import com.vmware.vim25.VirtualMachineFileInfo; -import com.vmware.vim25.VirtualMachineGuestOsIdentifier; -import com.vmware.vim25.VirtualSCSISharing; - import org.apache.cloudstack.storage.command.AttachAnswer; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CopyCmdAnswer; @@ -50,10 +34,14 @@ import org.apache.cloudstack.storage.command.CreateObjectAnswer; import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.lang.StringUtils; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.Command; @@ -88,10 +76,13 @@ import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.script.Script; import com.cloud.vm.VirtualMachine.State; +import com.google.gson.Gson; +import com.vmware.vim25.ManagedObjectReference; +import com.vmware.vim25.VirtualDisk; public class VmwareStorageProcessor implements StorageProcessor { private static final Logger s_logger = Logger.getLogger(VmwareStorageProcessor.class); - + private VmwareHostService hostService; private boolean _fullCloneFlag; private VmwareStorageMount mountService; @@ -128,9 +119,9 @@ public class VmwareStorageProcessor implements StorageProcessor { } return null; } - + private void copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl, - String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { + String templatePathAtSecondaryStorage, String templateName, String templateUuid) throws Exception { s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " + templatePathAtSecondaryStorage @@ -140,9 +131,9 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.info("Secondary storage mount point: " + secondaryMountPoint); String srcOVAFileName = VmwareStorageLayoutHelper.getTemplateOnSecStorageFilePath( - secondaryMountPoint, templatePathAtSecondaryStorage, - templateName, ImageFormat.OVA.getFileExtension()); - + secondaryMountPoint, templatePathAtSecondaryStorage, + templateName, ImageFormat.OVA.getFileExtension()); + String srcFileName = getOVFFilePath(srcOVAFileName); if(srcFileName == null) { Script command = new Script("tar", 0, s_logger); @@ -178,8 +169,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } if(vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) { - // the same template may be deployed with multiple copies at per-datastore per-host basis, - // save the original template name from CloudStack DB as the UUID to associate them. + // the same template may be deployed with multiple copies at per-datastore per-host basis, + // save the original template name from CloudStack DB as the UUID to associate them. vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_UUID, templateName); vmMo.markAsTemplate(); } else { @@ -197,7 +188,7 @@ public class VmwareStorageProcessor implements StorageProcessor { DataStoreTO srcStore = srcData.getDataStore(); if (!(srcStore instanceof NfsTO)) { return new CopyCmdAnswer("unsupported protocol"); - } + } NfsTO nfsImageStore = (NfsTO)srcStore; DataTO destData = cmd.getDestTO(); DataStoreTO destStore = destData.getDataStore(); @@ -206,9 +197,9 @@ public class VmwareStorageProcessor implements StorageProcessor { assert (secondaryStorageUrl != null); String templateUrl = secondaryStorageUrl + "/" + srcData.getPath(); - + Pair templateInfo = VmwareStorageLayoutHelper.decodeTemplateRelativePathAndNameFromUrl( - secondaryStorageUrl, templateUrl, template.getName()); + secondaryStorageUrl, templateUrl, template.getName()); VmwareContext context = hostService.getServiceContext(cmd); try { @@ -246,7 +237,7 @@ public class VmwareStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(msg); } } - + private boolean createVMLinkedClone(VirtualMachineMO vmTemplate, DatacenterMO dcMo, DatastoreMO dsMo, String vmdkName, ManagedObjectReference morDatastore, ManagedObjectReference morPool) throws Exception { @@ -265,16 +256,16 @@ public class VmwareStorageProcessor implements StorageProcessor { } s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); - + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, true); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, true); + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - + dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], @@ -292,18 +283,18 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.error(msg); throw new Exception(msg); } - + s_logger.info("Move volume out of volume-wrapper VM "); - String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false); - String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, - vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); - + String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.VMWARE, false); + String[] legacyCloudStackLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, + vmdkName, vmdkName, VmwareStorageLayoutType.CLOUDSTACK_LEGACY, false); + dsMo.moveDatastoreFile(vmwareLayoutFilePair[0], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[0], dcMo.getMor(), true); - + dsMo.moveDatastoreFile(vmwareLayoutFilePair[1], dcMo.getMor(), dsMo.getMor(), legacyCloudStackLayoutFilePair[1], @@ -343,22 +334,17 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception("Unable to create a dummy VM for volume creation"); } - String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName, - VmwareStorageLayoutType.CLOUDSTACK_LEGACY, - true // we only use the first file in the pair, linked or not will not matter - ); + String vmdkFilePair[] = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, null, vmdkName, + VmwareStorageLayoutType.CLOUDSTACK_LEGACY, + true // we only use the first file in the pair, linked or not will not matter + ); String volumeDatastorePath = vmdkFilePair[0]; synchronized (this) { s_logger.info("Delete file if exists in datastore to clear the way for creating the volume. file: " + volumeDatastorePath); VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vmdkName, dcMo); vmMo.createDisk(volumeDatastorePath, (int) (volume.getSize() / (1024L * 1024L)), morDatastore, -1); vmMo.detachDisk(volumeDatastorePath, false); - } - - VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(vmdkName); - newVol.setSize(volume.getSize()); - return new CopyCmdAnswer(newVol); + } } finally { vmMo.detachAllDisks(); @@ -392,11 +378,19 @@ public class VmwareStorageProcessor implements StorageProcessor { String srcFile = dsMo.getDatastorePath(vmdkName, true); dsMo.deleteFile(srcFile, dcMo.getMor(), true); - VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(vmdkName); - newVol.setSize(volume.getSize()); - return new CopyCmdAnswer(newVol); } + // restoreVM - move the new ROOT disk into corresponding VM folder + String vmInternalCSName = volume.getVmName(); + if (dsMo.folderExists(String.format("[%s]", dsMo.getName()), vmInternalCSName)) { + String oldRootDisk = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmInternalCSName, vmdkName); + if (oldRootDisk != null) + VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dcMo, vmInternalCSName, dsMo, vmdkName); + } + + VolumeObjectTO newVol = new VolumeObjectTO(); + newVol.setPath(vmdkName); + newVol.setSize(volume.getSize()); + return new CopyCmdAnswer(newVol); } catch (Throwable e) { if (e instanceof RemoteException) { s_logger.warn("Encounter remote exception to vCenter, invalidate VMware session context"); @@ -506,7 +500,7 @@ public class VmwareStorageProcessor implements StorageProcessor { try { ManagedObjectReference morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, poolId); - + if (morDs == null) { String msg = "Unable to find volumes's storage pool for copy volume operation"; s_logger.error(msg); @@ -518,7 +512,7 @@ public class VmwareStorageProcessor implements StorageProcessor { // create a dummy worker vm for attaching the volume DatastoreMO dsMo = new DatastoreMO(hyperHost.getContext(), morDs); workerVm = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVmName); - + if (workerVm == null) { String msg = "Unable to create worker VM to execute CopyVolumeCommand"; s_logger.error(msg); @@ -657,7 +651,7 @@ public class VmwareStorageProcessor implements StorageProcessor { Pair cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = cloneResult.first(); - + clonedVm.exportVm(secondaryMountPoint + "/" + installPath, templateUniqueName, true, false); long physicalSize = new File(installFullPath + "/" + templateUniqueName + ".ova").length(); @@ -960,7 +954,7 @@ public class VmwareStorageProcessor implements StorageProcessor { throw new Exception("unable to prepare snapshot backup directory"); } } - } + } VirtualMachineMO clonedVm = null; try { @@ -974,7 +968,7 @@ public class VmwareStorageProcessor implements StorageProcessor { // 4 MB is the minimum requirement for VM memory in VMware Pair cloneResult = vmMo.cloneFromCurrentSnapshot(workerVmName, 0, 4, volumeDeviceInfo.second(), - VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); + VmwareHelper.getDiskDeviceDatastore(volumeDeviceInfo.first())); clonedVm = cloneResult.first(); String disks[] = cloneResult.second(); @@ -998,7 +992,7 @@ public class VmwareStorageProcessor implements StorageProcessor { installPath, backupUuid, workerVmName); return new Ternary(backupUuid + "/" + backupUuid, snapshotInfo.first(), snapshotInfo.second()); } - + @Override public Answer backupSnapshot(CopyCommand cmd) { SnapshotObjectTO srcSnapshot = (SnapshotObjectTO)cmd.getSrcTO(); @@ -1025,7 +1019,7 @@ public class VmwareStorageProcessor implements StorageProcessor { String details = null; boolean success = false; String snapshotBackupUuid = null; - + boolean hasOwnerVm = false; Ternary backupResult = null; @@ -1037,7 +1031,7 @@ public class VmwareStorageProcessor implements StorageProcessor { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, primaryStore.getUuid()); CopyCmdAnswer answer = null; - + try { vmMo = hyperHost.findVmOnHyperHost(vmName); if (vmMo == null) { @@ -1050,7 +1044,7 @@ public class VmwareStorageProcessor implements StorageProcessor { dsMo = new DatastoreMO(hyperHost.getContext(), morDs); workerVMName = hostService.getWorkerName(context, cmd, 0); - + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, workerVMName); if (vmMo == null) { @@ -1062,12 +1056,12 @@ public class VmwareStorageProcessor implements StorageProcessor { String datastoreVolumePath = dsMo.getDatastorePath(volumePath + ".vmdk"); vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); } else { - s_logger.info("Using owner VM " + vmName + " for snapshot operation"); - hasOwnerVm = true; + s_logger.info("Using owner VM " + vmName + " for snapshot operation"); + hasOwnerVm = true; } } else { - s_logger.info("Using owner VM " + vmName + " for snapshot operation"); - hasOwnerVm = true; + s_logger.info("Using owner VM " + vmName + " for snapshot operation"); + hasOwnerVm = true; } if (!vmMo.createSnapshot(snapshotUuid, "Snapshot taken for " + srcSnapshot.getName(), false, false)) { @@ -1093,52 +1087,52 @@ public class VmwareStorageProcessor implements StorageProcessor { ManagedObjectReference snapshotMor = vmMo.getSnapshotMor(snapshotUuid); if (snapshotMor != null) { vmMo.removeSnapshot(snapshotUuid, false); - + // Snapshot operation may cause disk consolidation in VMware, when this happens // we need to update CloudStack DB // // TODO: this post operation fixup is not atomic and not safe when management server stops // in the middle if(backupResult != null && hasOwnerVm) { - s_logger.info("Check if we have disk consolidation after snapshot operation"); - - boolean chainConsolidated = false; - for(String vmdkDsFilePath : backupResult.third()) { - s_logger.info("Validate disk chain file:" + vmdkDsFilePath); - - if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) { - s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); - chainConsolidated = true; - break; - } else { - s_logger.info("" + vmdkDsFilePath + " is found still in chain"); - } - } - - if(chainConsolidated) { - String topVmdkFilePath = null; - try { - topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second()); - } catch(Exception e) { - s_logger.error("Unexpected exception", e); - } - - s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); - if(topVmdkFilePath != null) { - DatastoreFile file = new DatastoreFile(topVmdkFilePath); - - SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData(); - VolumeObjectTO vol = new VolumeObjectTO(); - vol.setUuid(srcSnapshot.getVolume().getUuid()); - vol.setPath(file.getFileBaseName()); - snapshotInfo.setVolume(vol); - } else { - s_logger.error("Disk has been consolidated, but top VMDK is not found ?!"); - } - } + s_logger.info("Check if we have disk consolidation after snapshot operation"); + + boolean chainConsolidated = false; + for(String vmdkDsFilePath : backupResult.third()) { + s_logger.info("Validate disk chain file:" + vmdkDsFilePath); + + if(vmMo.getDiskDevice(vmdkDsFilePath, false) == null) { + s_logger.info("" + vmdkDsFilePath + " no longer exists, consolidation detected"); + chainConsolidated = true; + break; + } else { + s_logger.info("" + vmdkDsFilePath + " is found still in chain"); + } + } + + if(chainConsolidated) { + String topVmdkFilePath = null; + try { + topVmdkFilePath = vmMo.getDiskCurrentTopBackingFileInChain(backupResult.second()); + } catch(Exception e) { + s_logger.error("Unexpected exception", e); + } + + s_logger.info("Disk has been consolidated, top VMDK is now: " + topVmdkFilePath); + if(topVmdkFilePath != null) { + DatastoreFile file = new DatastoreFile(topVmdkFilePath); + + SnapshotObjectTO snapshotInfo = (SnapshotObjectTO)answer.getNewData(); + VolumeObjectTO vol = new VolumeObjectTO(); + vol.setUuid(srcSnapshot.getVolume().getUuid()); + vol.setPath(file.getFileBaseName()); + snapshotInfo.setVolume(vol); + } else { + s_logger.error("Disk has been consolidated, but top VMDK is not found ?!"); + } + } } } else { - s_logger.error("Can not find the snapshot we just used ?!"); + s_logger.error("Can not find the snapshot we just used ?!"); } } @@ -1152,7 +1146,7 @@ public class VmwareStorageProcessor implements StorageProcessor { s_logger.warn("Failed to destroy worker VM: " + workerVMName); } } - + return answer; } catch (Throwable e) { if (e instanceof RemoteException) { @@ -1173,19 +1167,16 @@ public class VmwareStorageProcessor implements StorageProcessor { @Override public Answer attachVolume(AttachCommand cmd) { - return this.attachVolume(cmd, cmd.getDisk(), true, cmd.isManaged(), cmd.getVmName(), cmd.get_iScsiName(), - cmd.getStorageHost(), cmd.getStoragePort(), cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), - cmd.getChapTargetUsername(), cmd.getChapTargetPassword()); + Map details = cmd.getDisk().getDetails(); + boolean isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + String iScsiName = details.get(DiskTO.IQN); + String storageHost = details.get(DiskTO.STORAGE_HOST); + int storagePort = Integer.parseInt(details.get(DiskTO.STORAGE_PORT)); + + return this.attachVolume(cmd, cmd.getDisk(), true, isManaged, cmd.getVmName(), iScsiName, storageHost, storagePort); } private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean isManaged, String vmName, String iScsiName, String storageHost, int storagePort) { - return attachVolume(cmd, disk, isAttach, isManaged, vmName, iScsiName, storageHost, storagePort, null, null, null, null); - } - - private Answer attachVolume(Command cmd, DiskTO disk, boolean isAttach, boolean isManaged, String vmName, - String iScsiName, String storageHost, int storagePort, String initiatorUsername, String initiatorPassword, - String targetUsername, String targetPassword) { - VolumeObjectTO volumeTO = (VolumeObjectTO)disk.getData(); PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)volumeTO.getDataStore(); try { @@ -1200,12 +1191,23 @@ public class VmwareStorageProcessor implements StorageProcessor { ManagedObjectReference morDs = null; if (isAttach && isManaged) { - morDs = hostService.handleDatastoreAndVmdkAttach(cmd, iScsiName, storageHost, storagePort, - initiatorUsername, initiatorPassword, targetUsername, targetPassword); + Map details = disk.getDetails(); + + morDs = hostService.getVmfsDatastore(hyperHost, VmwareResource.getDatastoreName(iScsiName), storageHost, storagePort, + VmwareResource.trimIqn(iScsiName), details.get(DiskTO.CHAP_INITIATOR_USERNAME), details.get(DiskTO.CHAP_INITIATOR_SECRET), + details.get(DiskTO.CHAP_TARGET_USERNAME), details.get(DiskTO.CHAP_TARGET_SECRET)); + + DatastoreMO dsMo = new DatastoreMO(hostService.getServiceContext(null), morDs); + + String volumeDatastorePath = String.format("[%s] %s.vmdk", dsMo.getName(), dsMo.getName()); + + if (!dsMo.fileExists(volumeDatastorePath)) { + hostService.createVmdk(cmd, dsMo, VmwareResource.getDatastoreName(iScsiName), volumeTO.getSize()); + } } else { morDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, isManaged ? VmwareResource.getDatastoreName(iScsiName) : primaryStore.getUuid()); - } + } if (morDs == null) { String msg = "Unable to find the mounted datastore to execute AttachVolumeCommand, vmName: " + vmName; @@ -1216,31 +1218,42 @@ public class VmwareStorageProcessor implements StorageProcessor { DatastoreMO dsMo = new DatastoreMO(this.hostService.getServiceContext(null), morDs); String datastoreVolumePath; - if(isAttach) { - if(!isManaged) - datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, - dsMo, volumeTO.getPath()); - else - datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); - } else { - datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk"); - if(!dsMo.fileExists(datastoreVolumePath)) - datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk"); + if (isAttach) { + if (isManaged) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + datastoreVolumePath = VmwareStorageLayoutHelper.syncVolumeToVmDefaultFolder(dsMo.getOwnerDatacenter().first(), vmName, dsMo, volumeTO.getPath()); + } } - - disk.setVdiUuid(datastoreVolumePath); + else { + if (isManaged) { + datastoreVolumePath = dsMo.getDatastorePath(dsMo.getName() + ".vmdk"); + } + else { + datastoreVolumePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, volumeTO.getPath() + ".vmdk"); + + if (!dsMo.fileExists(datastoreVolumePath)) { + datastoreVolumePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, volumeTO.getPath() + ".vmdk"); + } + } + } + + disk.setPath(datastoreVolumePath); AttachAnswer answer = new AttachAnswer(disk); + if (isAttach) { vmMo.attachDisk(new String[] { datastoreVolumePath }, morDs); - } else { + } + else { vmMo.removeAllSnapshots(); vmMo.detachDisk(datastoreVolumePath, false); if (isManaged) { this.hostService.handleDatastoreAndVmdkDetach(iScsiName, storageHost, storagePort); } else { - VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath()); + VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, volumeTO.getPath()); } } @@ -1274,7 +1287,7 @@ public class VmwareStorageProcessor implements StorageProcessor { return morDatastore; } - + private Answer attachIso(DiskTO disk, boolean isAttach, String vmName) { try { VmwareHypervisorHost hyperHost = hostService.getHyperHost(hostService.getServiceContext(null), null); @@ -1387,7 +1400,7 @@ public class VmwareStorageProcessor implements StorageProcessor { String volumeDatastorePath = dsMo.getDatastorePath(volumeUuid + ".vmdk"); String dummyVmName = this.hostService.getWorkerName(context, cmd, 0); try { - s_logger.info("Create worker VM " + dummyVmName); + s_logger.info("Create worker VM " + dummyVmName); vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, dummyVmName); if (vmMo == null) { throw new Exception("Unable to create a dummy VM for volume creation"); @@ -1408,8 +1421,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } finally { s_logger.info("Destroy dummy VM after volume creation"); if(vmMo != null) { - vmMo.detachAllDisks(); - vmMo.destroy(); + vmMo.detachAllDisks(); + vmMo.destroy(); } } } catch (Throwable e) { @@ -1460,7 +1473,7 @@ public class VmwareStorageProcessor implements StorageProcessor { ClusterMO clusterMo = new ClusterMO(context, morCluster); if (vol.getVolumeType() == Volume.Type.ROOT) { - + String vmName = vol.getVmName(); if (vmName != null) { VirtualMachineMO vmMo = clusterMo.findVmOnHyperHost(vmName); @@ -1471,12 +1484,12 @@ public class VmwareStorageProcessor implements StorageProcessor { // Remove all snapshots to consolidate disks for removal vmMo.removeAllSnapshots(); - + VirtualMachineDiskInfo diskInfo = null; if(vol.getChainInfo() != null) - diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class); - - + diskInfo = _gson.fromJson(vol.getChainInfo(), VirtualMachineDiskInfo.class); + + HostMO hostMo = vmMo.getRunningHost(); List networks = vmMo.getNetworksWithDetails(); @@ -1484,7 +1497,7 @@ public class VmwareStorageProcessor implements StorageProcessor { if (this.resource.getVmState(vmMo) != State.Stopped) { vmMo.safePowerOff(_shutdown_waitMs); } - + List detachedDisks = vmMo.detachAllDisksExcept(vol.getPath(), diskInfo != null ? diskInfo.getDiskDeviceBusName() : null); VmwareStorageLayoutHelper.moveVolumeToRootFolder(new DatacenterMO(context, morDc), detachedDisks); @@ -1501,13 +1514,13 @@ public class VmwareStorageProcessor implements StorageProcessor { } } -/* + /* if (s_logger.isInfoEnabled()) { s_logger.info("Destroy volume by original name: " + vol.getPath() + ".vmdk"); } VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); -*/ + */ return new Answer(cmd, true, "Success"); } @@ -1527,8 +1540,8 @@ public class VmwareStorageProcessor implements StorageProcessor { } } - VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); - + VmwareStorageLayoutHelper.deleteVolumeVmdkFiles(dsMo, vol.getPath(), new DatacenterMO(context, morDc)); + return new Answer(cmd, true, "Success"); } catch (Throwable e) { if (e instanceof RemoteException) { @@ -1672,10 +1685,20 @@ public class VmwareStorageProcessor implements StorageProcessor { return new Answer(cmd, false, "unsupported command"); } } - + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + return new Answer(cmd, false, "not implememented yet"); + } + private static String deriveTemplateUuidOnHost(VmwareHypervisorHost hyperHost, String storeIdentifier, String templateName) { - String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); - templateUuid = templateUuid.replaceAll("-", ""); - return templateUuid; + String templateUuid = UUID.nameUUIDFromBytes((templateName + "@" + storeIdentifier + "-" + hyperHost.getMor().getValue()).getBytes()).toString(); + templateUuid = templateUuid.replaceAll("-", ""); + return templateUuid; } } diff --git a/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index bdba61ba028..5c9b3af52b9 100644 --- a/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -21,12 +21,14 @@ package org.apache.cloudstack.storage.motion; import java.util.HashMap; import java.util.Map; + import javax.inject.Inject; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -62,17 +64,17 @@ public class VmwareStorageMotionStrategy implements DataMotionStrategy { @Inject VMInstanceDao instanceDao; @Override - public boolean canHandle(DataObject srcData, DataObject destData) { - return false; + public StrategyPriority canHandle(DataObject srcData, DataObject destData) { + return StrategyPriority.CANT_HANDLE; } @Override - public boolean canHandle(Map volumeMap, Host srcHost, Host destHost) { + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { if (srcHost.getHypervisorType() == HypervisorType.VMware && destHost.getHypervisorType() == HypervisorType.VMware) { s_logger.debug(this.getClass() + " can handle the request because the hosts have VMware hypervisor"); - return true; + return StrategyPriority.HYPERVISOR; } - return false; + return StrategyPriority.CANT_HANDLE; } @Override diff --git a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java index b3ea5d53269..1234728f364 100644 --- a/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java +++ b/plugins/hypervisors/vmware/test/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategyTest.java @@ -16,13 +16,6 @@ // under the License. package org.apache.cloudstack.storage.motion; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.isA; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -32,6 +25,7 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; @@ -69,6 +63,14 @@ import com.cloud.utils.component.ComponentContext; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.isA; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + @RunWith(SpringJUnit4ClassRunner.class) @ContextConfiguration(loader = AnnotationConfigContextLoader.class) public class VmwareStorageMotionStrategyTest { @@ -98,8 +100,8 @@ public class VmwareStorageMotionStrategyTest { when(srcHost.getHypervisorType()).thenReturn(HypervisorType.VMware); when(destHost.getHypervisorType()).thenReturn(HypervisorType.VMware); Map volumeMap = new HashMap(); - boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost); - assertTrue("The strategy is only supposed to handle vmware hosts", canHandle); + StrategyPriority canHandle = strategy.canHandle(volumeMap, srcHost, destHost); + assertTrue("The strategy is only supposed to handle vmware hosts", canHandle == StrategyPriority.HYPERVISOR); } @Test @@ -109,8 +111,8 @@ public class VmwareStorageMotionStrategyTest { when(srcHost.getHypervisorType()).thenReturn(HypervisorType.XenServer); when(destHost.getHypervisorType()).thenReturn(HypervisorType.XenServer); Map volumeMap = new HashMap(); - boolean canHandle = strategy.canHandle(volumeMap, srcHost, destHost); - assertFalse("The strategy is only supposed to handle vmware hosts", canHandle); + StrategyPriority canHandle = strategy.canHandle(volumeMap, srcHost, destHost); + assertFalse("The strategy is only supposed to handle vmware hosts", canHandle == StrategyPriority.HYPERVISOR); } @Test @@ -231,8 +233,8 @@ public class VmwareStorageMotionStrategyTest { @Configuration @ComponentScan(basePackageClasses = { VmwareStorageMotionStrategy.class }, - includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, - useDefaultFilters = false) + includeFilters = {@Filter(value = TestConfiguration.Library.class, type = FilterType.CUSTOM)}, + useDefaultFilters = false) public static class TestConfiguration extends SpringUtils.CloudStackTestConfiguration { @Bean diff --git a/plugins/hypervisors/xen/pom.xml b/plugins/hypervisors/xen/pom.xml index 9621f499d3b..66b6b1d5803 100644 --- a/plugins/hypervisors/xen/pom.xml +++ b/plugins/hypervisors/xen/pom.xml @@ -33,6 +33,7 @@ org.apache.httpcomponents httpclient + 4.2.2 compile diff --git a/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/module.properties b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/module.properties new file mode 100644 index 00000000000..c6c91f658fc --- /dev/null +++ b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=xenserver-compute +parent=compute \ No newline at end of file diff --git a/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/spring-xenserver-compute-context.xml b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/spring-xenserver-compute-context.xml new file mode 100644 index 00000000000..e024ad837c8 --- /dev/null +++ b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-compute/spring-xenserver-compute-context.xml @@ -0,0 +1,28 @@ + + + + + + + + + + + + diff --git a/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/module.properties b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/module.properties new file mode 100644 index 00000000000..10d0ecdf729 --- /dev/null +++ b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=xenserver-discoverer +parent=discoverer \ No newline at end of file diff --git a/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/spring-xenserver-discoverer-context.xml b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/spring-xenserver-discoverer-context.xml new file mode 100644 index 00000000000..c3010072dfe --- /dev/null +++ b/plugins/hypervisors/xen/resources/META-INF/cloudstack/xenserver-discoverer/spring-xenserver-discoverer-context.xml @@ -0,0 +1,25 @@ + + + + + + + + diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java index 87a5014efad..688488d071a 100755 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/discoverer/XcpServerDiscoverer.java @@ -16,6 +16,31 @@ // under the License. package com.cloud.hypervisor.xen.discoverer; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; + +import javax.ejb.Local; +import javax.inject.Inject; +import javax.naming.ConfigurationException; +import javax.persistence.EntityExistsException; + +import org.apache.log4j.Logger; +import org.apache.xmlrpc.XmlRpcException; + +import com.xensource.xenapi.Connection; +import com.xensource.xenapi.Host; +import com.xensource.xenapi.Pool; +import com.xensource.xenapi.Session; +import com.xensource.xenapi.Types.SessionAuthenticationFailed; +import com.xensource.xenapi.Types.XenAPIException; + import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -69,33 +94,10 @@ import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.user.Account; import com.cloud.utils.NumbersUtil; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.HypervisorVersionChangedException; -import com.xensource.xenapi.Connection; -import com.xensource.xenapi.Host; -import com.xensource.xenapi.Pool; -import com.xensource.xenapi.Session; -import com.xensource.xenapi.Types.SessionAuthenticationFailed; -import com.xensource.xenapi.Types.XenAPIException; -import org.apache.log4j.Logger; -import org.apache.xmlrpc.XmlRpcException; - -import javax.ejb.Local; -import javax.inject.Inject; -import javax.naming.ConfigurationException; -import javax.persistence.EntityExistsException; -import java.net.InetAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Queue; -import java.util.Set; @Local(value=Discoverer.class) public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, Listener, ResourceStateAdapter { @@ -126,8 +128,8 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L try { _clusterDao.update(cluster.getId(), cluster); } catch (EntityExistsException e) { - SearchCriteriaService sc = SearchCriteria2.create(ClusterVO.class); - sc.addAnd(sc.getEntity().getGuid(), Op.EQ, guid); + QueryBuilder sc = QueryBuilder.create(ClusterVO.class); + sc.and(sc.entity().getGuid(), Op.EQ, guid); List clusters = sc.list(); ClusterVO clu = clusters.get(0); List clusterHosts = _resourceMgr.listAllHostsInCluster(clu.getId()); @@ -309,6 +311,9 @@ public class XcpServerDiscoverer extends DiscovererBase implements Discoverer, L details.put("storage.network.device1", storageNetworkLabel); } + DataCenterVO zone = _dcDao.findById(dcId); + boolean securityGroupEnabled = zone.isSecurityGroupEnabled(); + params.put("securitygroupenabled", Boolean.toString(securityGroupEnabled)); params.put("wait", Integer.toString(_wait)); details.put("wait", Integer.toString(_wait)); diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java index 76016c75b1a..3323a15ba9d 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/CitrixResourceBase.java @@ -174,6 +174,7 @@ import com.cloud.agent.api.to.VolumeTO; import com.cloud.exception.InternalErrorException; import com.cloud.host.Host.Type; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.hypervisor.xen.resource.CitrixHelper; import com.cloud.network.HAProxyConfigurator; import com.cloud.network.LoadBalancerConfigurator; import com.cloud.network.Networks; @@ -207,9 +208,9 @@ import com.cloud.storage.template.TemplateProp; import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.S3Utils; import com.cloud.utils.StringUtils; -import com.cloud.utils.Ternary; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.DiskProfile; @@ -329,6 +330,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected int _migratewait; protected String _instance; //instance name (default is usually "VM") static final Random _rand = new Random(System.currentTimeMillis()); + protected boolean _securityGroupEnabled; protected IAgentControl _agentControl; @@ -734,7 +736,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe private Answer execute(RevertToVMSnapshotCommand cmd) { String vmName = cmd.getVmName(); - List listVolumeTo = cmd.getVolumeTOs(); + List listVolumeTo = cmd.getVolumeTOs(); VMSnapshot.Type vmSnapshotType = cmd.getTarget().getType(); Boolean snapshotMemory = vmSnapshotType == VMSnapshot.Type.DiskAndMemory; Connection conn = getConnection(); @@ -786,7 +788,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } // after revert, VM's volumes path have been changed, need to report to manager - for (VolumeTO volumeTo : listVolumeTo) { + for (VolumeObjectTO volumeTo : listVolumeTo) { Long deviceId = volumeTo.getDeviceId(); VDI vdi = vdiMap.get(deviceId.toString()); volumeTo.setPath(vdi.getUuid(conn)); @@ -1198,10 +1200,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - protected VBD createVbd(Connection conn, DiskTO volume, String vmName, VM vm, BootloaderType bootLoaderType) throws XmlRpcException, XenAPIException { + protected VBD createVbd(Connection conn, DiskTO volume, String vmName, VM vm, BootloaderType bootLoaderType, VDI vdi) throws XmlRpcException, XenAPIException { Volume.Type type = volume.getType(); - VDI vdi = mount(conn, vmName, volume); + if (vdi == null) { + vdi = mount(conn, vmName, volume); + } + if ( vdi != null ) { Map smConfig = vdi.getSmConfig(conn); for (String key : smConfig.keySet()) { @@ -1245,69 +1250,108 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return vbd; } + + + public long getStaticMax(String os, boolean b, long dynamicMinRam, long dynamicMaxRam){ + return dynamicMaxRam; + } + + public long getStaticMin(String os, boolean b, long dynamicMinRam, long dynamicMaxRam){ + return dynamicMinRam; + } protected VM createVmFromTemplate(Connection conn, VirtualMachineTO vmSpec, Host host) throws XenAPIException, XmlRpcException { String guestOsTypeName = getGuestOsType(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD); - if ( guestOsTypeName == null ) { - String msg = " Hypervisor " + this.getClass().getName() + " doesn't support guest OS type " + vmSpec.getOs() - + ". you can choose 'Other install media' to run it as HVM"; - s_logger.warn(msg); - throw new CloudRuntimeException(msg); - } Set templates = VM.getByNameLabel(conn, guestOsTypeName); assert templates.size() == 1 : "Should only have 1 template but found " + templates.size(); - if (!templates.iterator().hasNext()) { - throw new CloudRuntimeException("No matching OS type found for starting a [" + vmSpec.getOs() - + "] VM on host " + host.getHostname(conn)); - } VM template = templates.iterator().next(); - VM vm = template.createClone(conn, vmSpec.getName()); - VM.Record vmr = vm.getRecord(conn); + + VM.Record vmr = template.getRecord(conn); + vmr.affinity = host; + vmr.otherConfig.remove("disks"); + vmr.otherConfig.remove("default_template"); + vmr.otherConfig.remove("mac_seed"); + vmr.isATemplate = false; + vmr.nameLabel = vmSpec.getName(); + vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; + vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY; + + if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) { + //scaling is allowed + vmr.memoryStaticMin = getStaticMin(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam()); + vmr.memoryStaticMax = getStaticMax(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam()); + vmr.memoryDynamicMin = vmSpec.getMinRam(); + vmr.memoryDynamicMax = vmSpec.getMaxRam(); + } else { + //scaling disallowed, set static memory target + if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) { + s_logger.warn("Host "+ host.getHostname(conn) +" does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable"); + } + vmr.memoryStaticMin = vmSpec.getMinRam(); + vmr.memoryStaticMax = vmSpec.getMaxRam(); + vmr.memoryDynamicMin = vmSpec.getMinRam(); + vmr.memoryDynamicMax = vmSpec.getMaxRam(); + } + + if (guestOsTypeName.toLowerCase().contains("windows")) { + vmr.VCPUsMax = (long) vmSpec.getCpus(); + } else { + vmr.VCPUsMax = 32L; + } + + Map details = vmSpec.getDetails(); + if ( details != null ) { + String timeoffset = details.get("timeoffset"); + if (timeoffset != null) { + Map platform = vmr.platform; + platform.put("timeoffset", timeoffset); + vmr.platform = platform; + } + + String coresPerSocket = details.get("cpu.corespersocket"); + if (coresPerSocket != null) { + Map platform = vmr.platform; + platform.put("cores-per-socket", coresPerSocket); + vmr.platform = platform; + } + } + + vmr.VCPUsAtStartup = (long) vmSpec.getCpus(); + vmr.consoles.clear(); + + VM vm = VM.create(conn, vmr); if (s_logger.isDebugEnabled()) { - s_logger.debug("Created VM " + vmr.uuid + " for " + vmSpec.getName()); + s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); } - for (Console console : vmr.consoles) { - console.destroy(conn); - } - - vm.setIsATemplate(conn, false); - vm.setAffinity(conn, host); - vm.removeFromOtherConfig(conn, "disks"); - vm.setNameLabel(conn, vmSpec.getName()); - setMemory(conn, vm, vmSpec.getMinRam(),vmSpec.getMaxRam()); - vm.setVCPUsMax(conn, (long)vmSpec.getCpus()); - vm.setVCPUsAtStartup(conn, (long)vmSpec.getCpus()); - Map vcpuParams = new HashMap(); Integer speed = vmSpec.getMinSpeed(); if (speed != null) { - int cpuWeight = _maxWeight; //cpu_weight - long utilization = 0; // max CPU cap, default is unlimited + int cpuWeight = _maxWeight; // cpu_weight + int utilization = 0; // max CPU cap, default is unlimited - // weight based allocation - cpuWeight = (int)((speed*0.99) / _host.speed * _maxWeight); + // weight based allocation, CPU weight is calculated per VCPU + cpuWeight = (int) ((speed * 0.99) / _host.speed * _maxWeight); if (cpuWeight > _maxWeight) { cpuWeight = _maxWeight; } if (vmSpec.getLimitCpuUse()) { - utilization = ((long)speed * 100 * vmSpec.getCpus()) / _host.speed ; + // CPU cap is per VM, so need to assign cap based on the number of vcpus + utilization = (int) ((speed * 0.99 * vmSpec.getCpus()) / _host.speed * 100); } vcpuParams.put("weight", Integer.toString(cpuWeight)); - vcpuParams.put("cap", Long.toString(utilization)); + vcpuParams.put("cap", Integer.toString(utilization)); + } if (vcpuParams.size() > 0) { vm.setVCPUsParams(conn, vcpuParams); } - vm.setActionsAfterCrash(conn, Types.OnCrashBehaviour.DESTROY); - vm.setActionsAfterShutdown(conn, Types.OnNormalExit.DESTROY); - String bootArgs = vmSpec.getBootArgs(); if (bootArgs != null && bootArgs.length() > 0) { String pvargs = vm.getPVArgs(conn); @@ -1320,37 +1364,44 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) { if (vmSpec.getBootloader() == BootloaderType.CD) { - DiskTO [] disks = vmSpec.getDisks(); + DiskTO[] disks = vmSpec.getDisks(); for (DiskTO disk : disks) { - Volume.Type type = disk.getType(); - if (type == Volume.Type.ISO) { - TemplateObjectTO tmpl = (TemplateObjectTO)disk.getData(); - String osType = tmpl.getGuestOsType(); - if (tmpl.getFormat() == ImageFormat.ISO && osType != null ) { - String isoGuestOsName = getGuestOsType(osType, vmSpec.getBootloader() == BootloaderType.CD); - if (!isoGuestOsName.equals(guestOsTypeName)) { - vmSpec.setBootloader(BootloaderType.PyGrub); - } - } + if (disk.getType() == Volume.Type.ISO ) { + TemplateObjectTO iso = (TemplateObjectTO)disk.getData(); + String osType = iso.getGuestOsType(); + if (osType != null) { + String isoGuestOsName = getGuestOsType(osType, vmSpec.getBootloader() == BootloaderType.CD); + if (!isoGuestOsName.equals(guestOsTypeName)) { + vmSpec.setBootloader(BootloaderType.PyGrub); + } + } } } } if (vmSpec.getBootloader() == BootloaderType.CD) { vm.setPVBootloader(conn, "eliloader"); - Map otherConfig = vm.getOtherConfig(conn); - if ( ! vm.getOtherConfig(conn).containsKey("install-repository") ) { - otherConfig.put( "install-repository", "cdrom"); + if (!vm.getOtherConfig(conn).containsKey("install-repository")) { + vm.addToOtherConfig(conn, "install-repository", "cdrom"); } - vm.setOtherConfig(conn, otherConfig); - } else if (vmSpec.getBootloader() == BootloaderType.PyGrub ){ + } else if (vmSpec.getBootloader() == BootloaderType.PyGrub) { vm.setPVBootloader(conn, "pygrub"); } else { vm.destroy(conn); throw new CloudRuntimeException("Unable to handle boot loader type: " + vmSpec.getBootloader()); } } + try { + finalizeVmMetaData(vm, conn, vmSpec); + } catch ( Exception e) { + throw new CloudRuntimeException("Unable to finalize VM MetaData: " + vmSpec); + } return vm; } + + + protected void finalizeVmMetaData(VM vm, Connection conn, VirtualMachineTO vmSpec) throws Exception { + } + protected String handleVmStartFailure(Connection conn, String vmName, VM vm, String message, Throwable th) { String msg = "Unable to start " + vmName + " due to " + message; @@ -1597,6 +1648,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String vmName = vmSpec.getName(); State state = State.Stopped; VM vm = null; + // if a VDI is created, record its UUID to send back to the CS MS + Map iqnToPath = new HashMap(); try { Set vms = VM.getByNameLabel(conn, vmName); if ( vms != null ) { @@ -1625,7 +1678,37 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vm = createVmFromTemplate(conn, vmSpec, host); for (DiskTO disk : vmSpec.getDisks()) { - createVbd(conn, disk, vmName, vm, vmSpec.getBootloader()); + VDI vdi = null; + + if (disk.getData() instanceof VolumeObjectTO) { + Map details = disk.getDetails(); + boolean isManaged = details != null && Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + + if (isManaged) { + String iScsiName = details.get(DiskTO.IQN); + String storageHost = details.get(DiskTO.STORAGE_HOST); + String chapInitiatorUsername = disk.getDetails().get(DiskTO.CHAP_INITIATOR_USERNAME); + String chapInitiatorSecret = disk.getDetails().get(DiskTO.CHAP_INITIATOR_SECRET); + Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE)); + String vdiNameLabel = vmName + "-DATA"; + + SR sr = getIscsiSR(conn, iScsiName, storageHost, iScsiName, + chapInitiatorUsername, chapInitiatorSecret, true); + + vdi = getVDIbyUuid(conn, disk.getPath(), false); + + if (vdi == null) { + vdi = createVdi(sr, vdiNameLabel, volumeSize); + + iqnToPath.put(iScsiName, vdi.getUuid(conn)); + } + else { + vdi.setNameLabel(conn, vdiNameLabel); + } + } + } + + createVbd(conn, disk, vmName, vm, vmSpec.getBootloader(), vdi); } if (vmSpec.getType() != VirtualMachine.Type.User) { @@ -1707,11 +1790,21 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } state = State.Running; - return new StartAnswer(cmd); + + StartAnswer startAnswer = new StartAnswer(cmd); + + startAnswer.setIqnToPath(iqnToPath); + + return startAnswer; } catch (Exception e) { s_logger.warn("Catch Exception: " + e.getClass().toString() + " due to " + e.toString(), e); String msg = handleVmStartFailure(conn, vmName, vm, "", e); - return new StartAnswer(cmd, msg); + + StartAnswer startAnswer = new StartAnswer(cmd, msg); + + startAnswer.setIqnToPath(iqnToPath); + + return startAnswer; } finally { synchronized (_cluster.intern()) { if (state != State.Stopped) { @@ -1722,6 +1815,46 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.debug("The VM is in stopped state, detected problem during startup : " + vmName); } } + + if (state != State.Running) { + disconnectManagedVolumes(conn, vm); + } + } + } + + private void disconnectManagedVolumes(Connection conn, VM vm) { + try { + Set vbds = vm.getVBDs(conn); + + for (VBD vbd : vbds) { + VDI vdi = vbd.getVDI(conn); + SR sr = null; + + try { + sr = vdi.getSR(conn); + } + catch (Exception ex) { + continue; + } + + if (sr.getNameLabel(conn).startsWith("/iqn.")) { + VBD.Record vbdr = vbd.getRecord(conn); + + if (vbdr.currentlyAttached) { + vbd.unplug(conn); + } + + vbd.destroy(conn); + + vdi.setNameLabel(conn, "detached"); + + umount(conn, vdi); + + handleSrAndVdiDetach(sr.getNameLabel(conn)); + } + } + } catch (Exception ex) { + s_logger.debug(ex.getMessage()); } } @@ -2174,11 +2307,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe args += " -s " + cmd.getVpnServerIp(); args += " -l " + cmd.getLocalIp(); args += " -c "; - } else { args += " -d "; args += " -s " + cmd.getVpnServerIp(); } + args += " -C " + cmd.getLocalCidr(); + args += " -i " + cmd.getPublicInterface(); String result = callHostPlugin(conn, "vmops", "routerProxy", "args", args); if (result == null || result.isEmpty()) { return new Answer(cmd, false, "Configure VPN failed"); @@ -2228,16 +2362,12 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Connection conn = getConnection(); final String password = cmd.getPassword(); final String routerPrivateIPAddress = cmd.getAccessDetail(NetworkElementCommand.ROUTER_IP); - final String vmName = cmd.getVmName(); final String vmIpAddress = cmd.getVmIpAddress(); - final String local = vmName; - // Run save_password_to_domr.sh - String args = "-r " + routerPrivateIPAddress; + String args = "savepassword.sh " + routerPrivateIPAddress; args += " -v " + vmIpAddress; args += " -p " + password; - args += " " + local; - String result = callHostPlugin(conn, "vmops", "savePassword", "args", args); + String result = callHostPlugin(conn, "vmops", "routerProxy", "args", args); if (result == null || result.isEmpty()) { return new Answer(cmd, false, "savePassword failed"); @@ -2866,8 +2996,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return state == null ? State.Unknown : state; } - protected HashMap> getAllVms(Connection conn) { - final HashMap> vmStates = new HashMap>(); + protected HashMap> getAllVms(Connection conn) { + final HashMap> vmStates = new HashMap>(); Map vm_map = null; for (int i = 0; i < 2; i++) { try { @@ -2897,6 +3027,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.trace("VM " + record.nameLabel + ": powerstate = " + ps + "; vm state=" + state.toString()); } Host host = record.residentOn; + String xstoolsversion = getVMXenToolsVersion(record.platform); String host_uuid = null; if( ! isRefNull(host) ) { try { @@ -2908,7 +3039,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } catch (XmlRpcException e) { s_logger.error("Failed to get host uuid for host " + host.toWireString(), e); } - vmStates.put(record.nameLabel, new Pair(host_uuid, state)); + vmStates.put(record.nameLabel, new Ternary(host_uuid, state, xstoolsversion)); } } @@ -3919,7 +4050,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.info("VM does not exist on XenServer" + _host.uuid); s_vms.remove(_cluster, _name, vmName); } - return new StopAnswer(cmd, "VM does not exist", 0 , true); + return new StopAnswer(cmd, "VM does not exist", true); } for (VM vm : vms) { VM.Record vmr = vm.getRecord(conn); @@ -3967,6 +4098,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe try { if (vm.getPowerState(conn) == VmPowerState.HALTED) { + disconnectManagedVolumes(conn, vm); + Map platform = vm.getPlatform(conn); Integer timeoffset = null; try { @@ -3976,6 +4109,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } catch (NumberFormatException e) { s_logger.error("Error while reading the platform:timeoffset field of the instance", e); } + String xentoolsversion = getVMXenToolsVersion(platform); Set vifs = vm.getVIFs(conn); List networks = new ArrayList(); @@ -3997,7 +4131,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe // network might be destroyed by other host } } - return new StopAnswer(cmd, "Stop VM " + vmName + " Succeed", 0, timeoffset, true); + return new StopAnswer(cmd, "Stop VM " + vmName + " Succeed", xentoolsversion, timeoffset, true); } } catch (XenAPIException e) { String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.toString(); @@ -4029,6 +4163,30 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe return new StopAnswer(cmd, "Stop VM failed", false); } + + /*Override by subclass*/ + protected String getVMXenToolsVersion(Map platform) { + return "xenserver56"; + } + + + private List getVdis(Connection conn, VM vm) { + List vdis = new ArrayList(); + try { + Set vbds =vm.getVBDs(conn); + for( VBD vbd : vbds ) { + vdis.add(vbd.getVDI(conn)); + } + } catch (XenAPIException e) { + String msg = "getVdis can not get VPD due to " + e.toString(); + s_logger.warn(msg, e); + } catch (XmlRpcException e) { + String msg = "getVdis can not get VPD due to " + e.getMessage(); + s_logger.warn(msg, e); + } + return vdis; + } + protected String connect(Connection conn, final String vmName, final String ipAddress, final int port) { for (int i = 0; i <= _retry; i++) { try { @@ -4877,7 +5035,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Host.Record hostr = poolr.master.getRecord(conn); if (_host.uuid.equals(hostr.uuid)) { - HashMap> allStates=fullClusterSync(conn); + HashMap> allStates=fullClusterSync(conn); cmd.setClusterVMStateChanges(allStates); } } catch (Throwable e) { @@ -4936,8 +5094,11 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe s_logger.warn("set xenserver Iptable failed"); return null; } - _canBridgeFirewall = can_bridge_firewall(conn); - + + if (_securityGroupEnabled) { + _canBridgeFirewall = can_bridge_firewall(conn); + } + String result = callHostPluginPremium(conn, "heartbeat", "host", _host.uuid, "interval", Integer .toString(_heartbeatInterval)); if (result == null || !result.contains("> DONE <")) { @@ -5348,7 +5509,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe if (pool.getType() == StoragePoolType.NetworkFilesystem) { getNfsSR(conn, pool); } else if (pool.getType() == StoragePoolType.IscsiLUN) { - getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, new Boolean[1]); + getIscsiSR(conn, pool.getUuid(), pool.getHost(), pool.getPath(), null, null, false); } else if (pool.getType() == StoragePoolType.PreSetup) { } else { return new Answer(cmd, false, "The pool type: " + pool.getType().name() + " is not supported."); @@ -5911,6 +6072,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe _publicNetworkName = (String) params.get("public.network.device"); _guestNetworkName = (String)params.get("guest.network.device"); _instance = (String) params.get("instance.name"); + _securityGroupEnabled = Boolean.parseBoolean((String)params.get("securitygroupenabled")); _linkLocalPrivateNetworkName = (String) params.get("private.linkLocal.device"); if (_linkLocalPrivateNetworkName == null) { @@ -6166,17 +6328,27 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } protected VDI getVDIbyUuid(Connection conn, String uuid) { + return getVDIbyUuid(conn, uuid, true); + } + + protected VDI getVDIbyUuid(Connection conn, String uuid, boolean throwExceptionIfNotFound) { try { return VDI.getByUuid(conn, uuid); } catch (Exception e) { - String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); - s_logger.debug(msg); - throw new CloudRuntimeException(msg, e); + if (throwExceptionIfNotFound) { + String msg = "Catch Exception " + e.getClass().getName() + " :VDI getByUuid for uuid: " + uuid + " failed due to " + e.toString(); + + s_logger.debug(msg); + + throw new CloudRuntimeException(msg, e); + } + + return null; } } protected SR getIscsiSR(Connection conn, String srNameLabel, String target, String path, - String chapInitiatorUsername, String chapInitiatorPassword, Boolean[] created) { + String chapInitiatorUsername, String chapInitiatorPassword, boolean ignoreIntroduceException) { synchronized (srNameLabel.intern()) { Map deviceConfig = new HashMap(); try { @@ -6225,8 +6397,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe deviceConfig.put("target", target); deviceConfig.put("targetIQN", targetiqn); - if (StringUtils.isNotBlank(chapInitiatorUsername) && - StringUtils.isNotBlank(chapInitiatorPassword)) { + if (StringUtils.isNotBlank(chapInitiatorUsername) && StringUtils.isNotBlank(chapInitiatorPassword)) { deviceConfig.put("chapuser", chapInitiatorUsername); deviceConfig.put("chappassword", chapInitiatorPassword); } @@ -6280,11 +6451,18 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe { sr = SR.create(conn, host, deviceConfig, new Long(0), srNameLabel, srNameLabel, type, "user", true, smConfig); - - created[0] = true; // note that the SR was created (as opposed to introduced) } else { - sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, - type, "user", true, smConfig); + try { + sr = SR.introduce(conn, pooluuid, srNameLabel, srNameLabel, + type, "user", true, smConfig); + } + catch (XenAPIException ex) { + if (ignoreIntroduceException) { + return sr; + } + + throw ex; + } Set setHosts = Host.getAll(conn); @@ -6459,54 +6637,41 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - protected VDI handleSrAndVdiAttach(String iqn, String storageHostName, - String chapInitiatorName, String chapInitiatorPassword) throws Types.XenAPIException, XmlRpcException { + protected VDI createVdi(SR sr, String vdiNameLabel, Long volumeSize) throws Types.XenAPIException, XmlRpcException { VDI vdi = null; Connection conn = getConnection(); - Boolean[] created = { false }; + VDI.Record vdir = new VDI.Record(); - SR sr = getIscsiSR(conn, iqn, - storageHostName, iqn, - chapInitiatorName, chapInitiatorPassword, created); + vdir.nameLabel = vdiNameLabel; + vdir.SR = sr; + vdir.type = Types.VdiType.USER; - // if created[0] is true, this means the SR was actually created...as opposed to introduced - if (created[0]) { - VDI.Record vdir = new VDI.Record(); - - vdir.nameLabel = iqn; - vdir.SR = sr; - vdir.type = Types.VdiType.USER; - - long totalSpace = sr.getPhysicalSize(conn); - long unavailableSpace = sr.getPhysicalUtilisation(conn); - - vdir.virtualSize = totalSpace - unavailableSpace; - - if (vdir.virtualSize < 0) { - throw new CloudRuntimeException("VDI virtual size cannot be less than 0."); - } - - long maxNumberOfTries = (totalSpace / unavailableSpace >= 1) ? (totalSpace / unavailableSpace) : 1; - long tryNumber = 0; - - while (tryNumber <= maxNumberOfTries) { - try { - vdi = VDI.create(conn, vdir); - - break; - } - catch (Exception ex) { - tryNumber++; - - vdir.virtualSize -= unavailableSpace; - } - } + long totalSrSpace = sr.getPhysicalSize(conn); + long unavailableSrSpace = sr.getPhysicalUtilisation(conn); + long availableSrSpace = totalSrSpace - unavailableSrSpace; + if (availableSrSpace < volumeSize) { + throw new CloudRuntimeException("Available space for SR cannot be less than " + volumeSize + "."); } - else { - vdi = sr.getVDIs(conn).iterator().next(); + + vdir.virtualSize = volumeSize; + + long maxNumberOfTries = (totalSrSpace / unavailableSrSpace >= 1) ? (totalSrSpace / unavailableSrSpace) : 1; + long tryNumber = 0; + + while (tryNumber <= maxNumberOfTries) { + try { + vdi = VDI.create(conn, vdir); + + break; + } + catch (Exception ex) { + tryNumber++; + + vdir.virtualSize -= unavailableSrSpace; + } } return vdi; @@ -6524,6 +6689,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe Connection conn = getConnection(); boolean attach = cmd.getAttach(); String vmName = cmd.getVmName(); + String vdiNameLabel = vmName + "-DATA"; Long deviceId = cmd.getDeviceId(); String errorMsg; @@ -6534,12 +6700,17 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } try { - // Look up the VDI VDI vdi = null; if (cmd.getAttach() && cmd.isManaged()) { - vdi = handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + SR sr = getIscsiSR(conn, cmd.get_iScsiName(), cmd.getStorageHost(), cmd.get_iScsiName(), + cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword(), true); + + vdi = getVDIbyUuid(conn, cmd.getVolumePath(), false); + + if (vdi == null) { + vdi = createVdi(sr, vdiNameLabel, cmd.getVolumeSize()); + } } else { vdi = getVDIbyUuid(conn, cmd.getVolumePath()); @@ -6594,7 +6765,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe vbd.plug(conn); // Update the VDI's label to include the VM name - vdi.setNameLabel(conn, vmName + "-DATA"); + vdi.setNameLabel(conn, vdiNameLabel); return new AttachVolumeAnswer(cmd, Long.parseLong(diskNumber), vdi.getUuid(conn)); } else { @@ -6637,7 +6808,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } - private long getVMSnapshotChainSize(Connection conn, VolumeTO volumeTo, String vmName) + private long getVMSnapshotChainSize(Connection conn, VolumeObjectTO volumeTo, String vmName) throws BadServerResponse, XenAPIException, XmlRpcException { Set allvolumeVDIs = VDI.getByNameLabel(conn, volumeTo.getName()); long size = 0; @@ -6661,7 +6832,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe continue; } } - if (volumeTo.getType() == Volume.Type.ROOT) { + if (volumeTo.getVolumeType() == Volume.Type.ROOT) { Map allVMs = VM.getAllRecords(conn); // add size of memory snapshot vdi if (allVMs.size() > 0) { @@ -6694,7 +6865,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe protected Answer execute(final CreateVMSnapshotCommand cmd) { String vmName = cmd.getVmName(); String vmSnapshotName = cmd.getTarget().getSnapshotName(); - List listVolumeTo = cmd.getVolumeTOs(); + List listVolumeTo = cmd.getVolumeTOs(); VirtualMachine.State vmState = cmd.getVmState(); String guestOSType = cmd.getGuestOSType(); @@ -6774,9 +6945,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } // calculate used capacity for this VM snapshot - for (VolumeTO volumeTo : cmd.getVolumeTOs()){ + for (VolumeObjectTO volumeTo : cmd.getVolumeTOs()){ long size = getVMSnapshotChainSize(conn,volumeTo,cmd.getVmName()); - volumeTo.setChainSize(size); + volumeTo.setSize(size); } success = true; @@ -6825,7 +6996,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } private VM createWorkingVM(Connection conn, String vmName, - String guestOSType, List listVolumeTo) + String guestOSType, List listVolumeTo) throws BadServerResponse, VmBadPowerState, SrFull, OperationNotAllowed, XenAPIException, XmlRpcException { String guestOsTypeName = getGuestOsType(guestOSType, false); @@ -6839,8 +7010,8 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe VM template = getVM(conn, guestOsTypeName); VM vm = template.createClone(conn, vmName); vm.setIsATemplate(conn, false); - Map vdiMap = new HashMap(); - for (VolumeTO volume : listVolumeTo) { + Map vdiMap = new HashMap(); + for (VolumeObjectTO volume : listVolumeTo) { String vdiUuid = volume.getPath(); try { VDI vdi = VDI.getByUuid(conn, vdiUuid); @@ -6851,18 +7022,18 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } for (VDI vdi : vdiMap.keySet()) { - VolumeTO volumeTO = vdiMap.get(vdi); + VolumeObjectTO volumeTO = vdiMap.get(vdi); VBD.Record vbdr = new VBD.Record(); vbdr.VM = vm; vbdr.VDI = vdi; - if (volumeTO.getType() == Volume.Type.ROOT) { + if (volumeTO.getVolumeType() == Volume.Type.ROOT) { vbdr.bootable = true; vbdr.unpluggable = false; } else { vbdr.bootable = false; vbdr.unpluggable = true; } - vbdr.userdevice = new Long(volumeTO.getDeviceId()).toString(); + vbdr.userdevice = Long.toString(volumeTO.getDeviceId()); vbdr.mode = Types.VbdMode.RW; vbdr.type = Types.VbdType.DISK; VBD.create(conn, vbdr); @@ -6902,9 +7073,9 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } // re-calculate used capacify for this VM snapshot - for (VolumeTO volumeTo : cmd.getVolumeTOs()){ + for (VolumeObjectTO volumeTo : cmd.getVolumeTOs()){ long size = getVMSnapshotChainSize(conn,volumeTo,cmd.getVmName()); - volumeTo.setChainSize(size); + volumeTo.setSize(size); } return new DeleteVMSnapshotAnswer(cmd, cmd.getVolumeTOs()); @@ -7059,7 +7230,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } } - success = true; details = null; } else if (cmd.getCommandSwitch().equals(ManageSnapshotCommand.DESTROY_SNAPSHOT)) { @@ -7079,7 +7249,6 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe details += ", reason: " + e.toString(); s_logger.warn(details, e); } - return new ManageSnapshotAnswer(cmd, snapshotId, snapshotUUID, success, details); } @@ -8003,13 +8172,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } catch (Throwable e) { s_logger.warn("Check for master failed, failing the Cluster sync command"); return new Answer(cmd); - } - HashMap> newStates = deltaClusterSync(conn); + } + HashMap> newStates = deltaClusterSync(conn); return new ClusterSyncAnswer(cmd.getClusterId(), newStates); } - protected HashMap> fullClusterSync(Connection conn) { + protected HashMap> fullClusterSync(Connection conn) { synchronized (_cluster.intern()) { s_vms.clear(_cluster); } @@ -8022,12 +8191,13 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe String vm_name = record.nameLabel; VmPowerState ps = record.powerState; final State state = convertToState(ps); + String xstoolsversion = getVMXenToolsVersion(record.platform); Host host = record.residentOn; String host_uuid = null; if( ! isRefNull(host) ) { host_uuid = host.getUuid(conn); synchronized (_cluster.intern()) { - s_vms.put(_cluster, host_uuid, vm_name, state); + s_vms.put(_cluster, host_uuid, vm_name, state, xstoolsversion); } } if (s_logger.isTraceEnabled()) { @@ -8043,38 +8213,50 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } - protected HashMap> deltaClusterSync(Connection conn) { - final HashMap> changes = new HashMap>(); + protected HashMap> deltaClusterSync(Connection conn) { + final HashMap> changes = new HashMap>(); synchronized (_cluster.intern()) { - HashMap> newStates = getAllVms(conn); + HashMap> newStates = getAllVms(conn); if (newStates == null) { s_logger.warn("Unable to get the vm states so no state sync at this point."); return null; } - HashMap> oldStates = new HashMap>(s_vms.size(_cluster)); + HashMap> oldStates = new HashMap>(s_vms.size(_cluster)); oldStates.putAll(s_vms.getClusterVmState(_cluster)); - for (final Map.Entry> entry : newStates.entrySet()) { + for (final Map.Entry> entry : newStates.entrySet()) { final String vm = entry.getKey(); - + String xstoolsversion = entry.getValue().third(); State newState = entry.getValue().second(); String host_uuid = entry.getValue().first(); - final Pair oldState = oldStates.remove(vm); + final Ternary oldState = oldStates.remove(vm); + + // check if xstoolsversion changed + if (xstoolsversion != null && oldState != null){ + if (xstoolsversion != oldState.third() && newState != State.Stopped && newState != State.Stopping){ + s_logger.warn("Detecting a change in xstoolsversion for " + vm); + changes.put(vm, new Ternary(host_uuid, newState, xstoolsversion)); + s_logger.debug("11. The VM " + vm + " is in " + newState + " state"); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); + continue; + } + } //check if host is changed if (host_uuid != null && oldState != null){ if (!host_uuid.equals(oldState.first()) && newState != State.Stopped && newState != State.Stopping){ s_logger.warn("Detecting a change in host for " + vm); - changes.put(vm, new Pair(host_uuid, newState)); + changes.put(vm, new Ternary(host_uuid, newState, null)); s_logger.debug("11. The VM " + vm + " is in " + newState + " state"); - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); continue; } } + if (newState == State.Stopped && oldState != null && oldState.second() != State.Stopping && oldState.second() != State.Stopped) { newState = getRealPowerState(conn, vm); } @@ -8088,42 +8270,42 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe continue; } if (oldState == null) { - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); s_logger.warn("Detecting a new state but couldn't find a old state so adding it to the changes: " + vm); - changes.put(vm, new Pair(host_uuid, newState)); + changes.put(vm, new Ternary(host_uuid, newState, null)); } else if (oldState.second() == State.Starting) { if (newState == State.Running) { s_logger.debug("12. The VM " + vm + " is in " + State.Running + " state"); - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); } else if (newState == State.Stopped) { s_logger.warn("Ignoring vm " + vm + " because of a lag in starting the vm."); } } else if (oldState.second() == State.Migrating) { if (newState == State.Running) { s_logger.debug("Detected that an migrating VM is now running: " + vm); - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); } } else if (oldState.second() == State.Stopping) { if (newState == State.Stopped) { s_logger.debug("13. The VM " + vm + " is in " + State.Stopped + " state"); - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); } else if (newState == State.Running) { s_logger.warn("Ignoring vm " + vm + " because of a lag in stopping the vm. "); } } else if (oldState.second() != newState) { s_logger.debug("14. The VM " + vm + " is in " + newState + " state was " + oldState.second()); - s_vms.put(_cluster, host_uuid, vm, newState); + s_vms.put(_cluster, host_uuid, vm, newState, xstoolsversion); if (newState == State.Stopped) { /* * if (s_vmsKilled.remove(vm)) { s_logger.debug("VM " + vm + " has been killed for storage. "); * newState = State.Error; } */ } - changes.put(vm, new Pair(host_uuid, newState)); + changes.put(vm, new Ternary(host_uuid, newState, null)); } } - for (final Map.Entry> entry : oldStates.entrySet()) { + for (final Map.Entry> entry : oldStates.entrySet()) { final String vm = entry.getKey(); final State oldState = entry.getValue().second(); String host_uuid = entry.getValue().first(); @@ -8145,7 +8327,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe } else { State newState = State.Stopped; s_logger.warn("The VM is now missing marking it as Stopped " + vm); - changes.put(vm, new Pair(host_uuid, newState)); + changes.put(vm, new Ternary(host_uuid, newState, null)); } } } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java index 214dbd4059a..9c65a037c9f 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer56FP1Resource.java @@ -152,137 +152,6 @@ public class XenServer56FP1Resource extends XenServer56Resource { return dynamicMinRam; } - @Override - protected VM createVmFromTemplate(Connection conn, VirtualMachineTO vmSpec, Host host) throws XenAPIException, XmlRpcException { - String guestOsTypeName = getGuestOsType(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD); - Set templates = VM.getByNameLabel(conn, guestOsTypeName); - assert templates.size() == 1 : "Should only have 1 template but found " + templates.size(); - VM template = templates.iterator().next(); - - VM.Record vmr = template.getRecord(conn); - vmr.affinity = host; - vmr.otherConfig.remove("disks"); - vmr.otherConfig.remove("default_template"); - vmr.otherConfig.remove("mac_seed"); - vmr.isATemplate = false; - vmr.nameLabel = vmSpec.getName(); - vmr.actionsAfterCrash = Types.OnCrashBehaviour.DESTROY; - vmr.actionsAfterShutdown = Types.OnNormalExit.DESTROY; - - Map details = vmSpec.getDetails(); - if (isDmcEnabled(conn, host) && vmSpec.isEnableDynamicallyScaleVm()) { - //scaling is allowed - vmr.memoryStaticMin = getStaticMin(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam()); - vmr.memoryStaticMax = getStaticMax(vmSpec.getOs(), vmSpec.getBootloader() == BootloaderType.CD, vmSpec.getMinRam(), vmSpec.getMaxRam()); - vmr.memoryDynamicMin = vmSpec.getMinRam(); - vmr.memoryDynamicMax = vmSpec.getMaxRam(); - } else { - //scaling disallowed, set static memory target - if (vmSpec.isEnableDynamicallyScaleVm() && !isDmcEnabled(conn, host)) { - s_logger.warn("Host "+ host.getHostname(conn) +" does not support dynamic scaling, so the vm " + vmSpec.getName() + " is not dynamically scalable"); - } - vmr.memoryStaticMin = vmSpec.getMinRam(); - vmr.memoryStaticMax = vmSpec.getMaxRam(); - vmr.memoryDynamicMin = vmSpec.getMinRam(); - vmr.memoryDynamicMax = vmSpec.getMaxRam(); - } - - if (guestOsTypeName.toLowerCase().contains("windows")) { - vmr.VCPUsMax = (long) vmSpec.getCpus(); - } else { - vmr.VCPUsMax = 32L; - } - - String timeoffset = details.get("timeoffset"); - if (timeoffset != null) { - Map platform = vmr.platform; - platform.put("timeoffset", timeoffset); - vmr.platform = platform; - } - - String coresPerSocket = details.get("cpu.corespersocket"); - if (coresPerSocket != null) { - Map platform = vmr.platform; - platform.put("cores-per-socket", coresPerSocket); - vmr.platform = platform; - } - - vmr.VCPUsAtStartup = (long) vmSpec.getCpus(); - vmr.consoles.clear(); - - VM vm = VM.create(conn, vmr); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Created VM " + vm.getUuid(conn) + " for " + vmSpec.getName()); - } - - Map vcpuParams = new HashMap(); - - Integer speed = vmSpec.getMinSpeed(); - if (speed != null) { - - int cpuWeight = _maxWeight; // cpu_weight - int utilization = 0; // max CPU cap, default is unlimited - - // weight based allocation, CPU weight is calculated per VCPU - cpuWeight = (int) ((speed * 0.99) / _host.speed * _maxWeight); - if (cpuWeight > _maxWeight) { - cpuWeight = _maxWeight; - } - - if (vmSpec.getLimitCpuUse()) { - // CPU cap is per VM, so need to assign cap based on the number of vcpus - utilization = (int) ((speed * 0.99 * vmSpec.getCpus()) / _host.speed * 100); - } - - vcpuParams.put("weight", Integer.toString(cpuWeight)); - vcpuParams.put("cap", Integer.toString(utilization)); - - } - - if (vcpuParams.size() > 0) { - vm.setVCPUsParams(conn, vcpuParams); - } - - String bootArgs = vmSpec.getBootArgs(); - if (bootArgs != null && bootArgs.length() > 0) { - String pvargs = vm.getPVArgs(conn); - pvargs = pvargs + vmSpec.getBootArgs().replaceAll(" ", "%"); - if (s_logger.isDebugEnabled()) { - s_logger.debug("PV args are " + pvargs); - } - vm.setPVArgs(conn, pvargs); - } - - if (!(guestOsTypeName.startsWith("Windows") || guestOsTypeName.startsWith("Citrix") || guestOsTypeName.startsWith("Other"))) { - if (vmSpec.getBootloader() == BootloaderType.CD) { - DiskTO[] disks = vmSpec.getDisks(); - for (DiskTO disk : disks) { - if (disk.getType() == Volume.Type.ISO ) { - TemplateObjectTO iso = (TemplateObjectTO)disk.getData(); - String osType = iso.getGuestOsType(); - if (osType != null) { - String isoGuestOsName = getGuestOsType(osType, vmSpec.getBootloader() == BootloaderType.CD); - if (!isoGuestOsName.equals(guestOsTypeName)) { - vmSpec.setBootloader(BootloaderType.PyGrub); - } - } - } - } - } - if (vmSpec.getBootloader() == BootloaderType.CD) { - vm.setPVBootloader(conn, "eliloader"); - if (!vm.getOtherConfig(conn).containsKey("install-repository")) { - vm.addToOtherConfig(conn, "install-repository", "cdrom"); - } - } else if (vmSpec.getBootloader() == BootloaderType.PyGrub) { - vm.setPVBootloader(conn, "pygrub"); - } else { - vm.destroy(conn); - throw new CloudRuntimeException("Unable to handle boot loader type: " + vmSpec.getBootloader()); - } - } - return vm; - } /** * When Dynamic Memory Control (DMC) is enabled - diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java index c3c0307ca1b..2603922fd18 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServer610Resource.java @@ -452,5 +452,27 @@ public class XenServer610Resource extends XenServer56FP1Resource { @Override protected void plugDom0Vif(Connection conn, VIF dom0Vif) throws XmlRpcException, XenAPIException { // do nothing. In xenserver 6.1 and beyond this step isn't needed. + } + + @Override + protected String getVMXenToolsVersion(Map platform) { + if (platform.containsKey("device_id")) { + return "xenserver61"; + } + return "xenserver56"; } + + @Override + protected void finalizeVmMetaData(VM vm, Connection conn, VirtualMachineTO vmSpec) throws Exception { + Map details = vmSpec.getDetails(); + if ( details!= null ) { + String xentoolsversion = details.get("hypervisortoolsversion"); + if ( xentoolsversion == null || !xentoolsversion.equalsIgnoreCase("xenserver61") ) { + Map platform = vm.getPlatform(conn); + platform.remove("device_id"); + vm.setPlatform(conn, platform); + } + } + } + } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerPoolVms.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerPoolVms.java index 33f2bf96606..f22fb286c8a 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerPoolVms.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerPoolVms.java @@ -20,20 +20,20 @@ import java.util.HashMap; import java.util.Map; import org.apache.log4j.Logger; import java.util.concurrent.ConcurrentHashMap; -import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.vm.VirtualMachine.State; public class XenServerPoolVms { private static final Logger s_logger = Logger.getLogger(XenServerPoolVms.class); - private final Map>> _cluster_vms = - new ConcurrentHashMap>>(); + private final Map>> _cluster_vms = + new ConcurrentHashMap>>(); - public HashMap> getClusterVmState(String clusterId){ - HashMap> _vms= _cluster_vms.get(clusterId); + public HashMap> getClusterVmState(String clusterId){ + HashMap> _vms= _cluster_vms.get(clusterId); if (_vms==null) { - HashMap> vmStates = new HashMap>(); + HashMap> vmStates = new HashMap>(); _cluster_vms.put(clusterId, vmStates); return vmStates; } @@ -41,40 +41,47 @@ public class XenServerPoolVms { } public void clear(String clusterId){ - HashMap> _vms= getClusterVmState(clusterId); + HashMap> _vms= getClusterVmState(clusterId); _vms.clear(); } public State getState(String clusterId, String name){ - HashMap> vms = getClusterVmState(clusterId); - Pair pv = vms.get(name); + HashMap> vms = getClusterVmState(clusterId); + Ternary pv = vms.get(name); return pv == null ? State.Stopped : pv.second(); // if a VM is absent on the cluster, it is effectively in stopped state. } + + + public void put(String clusterId, String hostUuid, String name, State state, String xstoolsversion){ + HashMap> vms= getClusterVmState(clusterId); + vms.put(name, new Ternary(hostUuid, state, xstoolsversion)); + } + public void put(String clusterId, String hostUuid, String name, State state){ - HashMap> vms= getClusterVmState(clusterId); - vms.put(name, new Pair(hostUuid, state)); + HashMap> vms= getClusterVmState(clusterId); + vms.put(name, new Ternary(hostUuid, state, null)); } public void remove(String clusterId, String hostUuid, String name){ - HashMap> vms= getClusterVmState(clusterId); + HashMap> vms= getClusterVmState(clusterId); vms.remove(name); } - public void putAll(String clusterId, HashMap> new_vms){ - HashMap> vms= getClusterVmState(clusterId); + public void putAll(String clusterId, HashMap> new_vms){ + HashMap> vms= getClusterVmState(clusterId); vms.putAll(new_vms); } public int size(String clusterId){ - HashMap> vms= getClusterVmState(clusterId); + HashMap> vms= getClusterVmState(clusterId); return vms.size(); } @Override public String toString(){ StringBuilder sbuf = new StringBuilder("PoolVms="); - for (HashMap> clusterVM: _cluster_vms.values()){ + for (HashMap> clusterVM: _cluster_vms.values()){ for (String vmname: clusterVM.keySet()){ sbuf.append(vmname).append("-").append(clusterVM.get(vmname).second()).append(","); } diff --git a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java index 739b9743f44..5a19aee2468 100644 --- a/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java +++ b/plugins/hypervisors/xen/src/com/cloud/hypervisor/xen/resource/XenServerStorageProcessor.java @@ -18,6 +18,41 @@ */ package com.cloud.hypervisor.xen.resource; + +import static com.cloud.utils.ReflectUtil.flattenProperties; +import static com.google.common.collect.Lists.newArrayList; + + +import java.io.File; +import java.net.URI; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import org.apache.cloudstack.storage.command.AttachAnswer; +import org.apache.cloudstack.storage.command.AttachCommand; +import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer; +import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.CreateObjectAnswer; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.command.DettachAnswer; +import org.apache.cloudstack.storage.command.DettachCommand; +import org.apache.cloudstack.storage.command.ForgetObjectCmd; +import org.apache.cloudstack.storage.command.IntroduceObjectAnswer; +import org.apache.cloudstack.storage.command.IntroduceObjectCmd; +import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; +import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; +import org.apache.xmlrpc.XmlRpcException; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.CreateStoragePoolCommand; import com.cloud.agent.api.to.DataObjectType; @@ -51,36 +86,6 @@ import com.xensource.xenapi.VBD; import com.xensource.xenapi.VDI; import com.xensource.xenapi.VM; import com.xensource.xenapi.VMGuestMetrics; -import org.apache.cloudstack.storage.command.AttachAnswer; -import org.apache.cloudstack.storage.command.AttachCommand; -import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreAnswer; -import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; -import org.apache.cloudstack.storage.command.CopyCmdAnswer; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.CreateObjectAnswer; -import org.apache.cloudstack.storage.command.CreateObjectCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.command.DettachAnswer; -import org.apache.cloudstack.storage.command.DettachCommand; -import org.apache.cloudstack.storage.datastore.protocol.DataStoreProtocol; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; -import org.apache.cloudstack.storage.to.SnapshotObjectTO; -import org.apache.cloudstack.storage.to.TemplateObjectTO; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; -import org.apache.xmlrpc.XmlRpcException; - -import java.io.File; -import java.net.URI; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import static com.cloud.utils.ReflectUtil.flattenProperties; -import static com.google.common.collect.Lists.newArrayList; public class XenServerStorageProcessor implements StorageProcessor { private static final Logger s_logger = Logger.getLogger(XenServerStorageProcessor.class); @@ -159,17 +164,33 @@ public class XenServerStorageProcessor implements StorageProcessor { @Override public AttachAnswer attachVolume(AttachCommand cmd) { String vmName = cmd.getVmName(); + String vdiNameLabel = vmName + "-DATA"; DiskTO disk = cmd.getDisk(); DataTO data = disk.getData(); try { Connection conn = this.hypervisorResource.getConnection(); - // Look up the VDI + VDI vdi = null; - if (cmd.isManaged()) { - vdi = this.hypervisorResource.handleSrAndVdiAttach(cmd.get_iScsiName(), cmd.getStorageHost(), - cmd.getChapInitiatorUsername(), cmd.getChapInitiatorPassword()); + Map details = cmd.getDisk().getDetails(); + boolean isManaged = Boolean.parseBoolean(details.get(DiskTO.MANAGED)); + + if (isManaged) { + String iScsiName = details.get(DiskTO.IQN); + String storageHost = details.get(DiskTO.STORAGE_HOST); + String chapInitiatorUsername = disk.getDetails().get(DiskTO.CHAP_INITIATOR_USERNAME); + String chapInitiatorSecret = disk.getDetails().get(DiskTO.CHAP_INITIATOR_SECRET); + Long volumeSize = Long.parseLong(details.get(DiskTO.VOLUME_SIZE)); + + SR sr = this.hypervisorResource.getIscsiSR(conn, iScsiName, storageHost, iScsiName, + chapInitiatorUsername, chapInitiatorSecret, true); + + vdi = this.hypervisorResource.getVDIbyUuid(conn, data.getPath(), false); + + if (vdi == null) { + vdi = this.hypervisorResource.createVdi(sr, vdiNameLabel, volumeSize); + } } else { vdi = this.hypervisorResource.mount(conn, null, null, data.getPath()); @@ -225,7 +246,7 @@ public class XenServerStorageProcessor implements StorageProcessor { vbd.plug(conn); // Update the VDI's label to include the VM name - vdi.setNameLabel(conn, vmName + "-DATA"); + vdi.setNameLabel(conn, vdiNameLabel); DiskTO newDisk = new DiskTO(disk.getData(), Long.parseLong(diskNumber), vdi.getUuid(conn), disk.getType()); return new AttachAnswer(newDisk); @@ -841,8 +862,7 @@ public class XenServerStorageProcessor implements StorageProcessor { URI uri = new URI(storeUrl); String tmplpath = uri.getHost() + ":" + uri.getPath() + "/" + srcData.getPath(); - PrimaryDataStoreTO destStore = (PrimaryDataStoreTO)destData.getDataStore(); - String poolName = destStore.getUuid(); + String poolName = destData.getDataStore().getUuid(); Connection conn = hypervisorResource.getConnection(); SR poolsr = null; @@ -892,8 +912,7 @@ public class XenServerStorageProcessor implements StorageProcessor { try { Connection conn = hypervisorResource.getConnection(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)data.getDataStore(); - SR poolSr = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid()); + SR poolSr = hypervisorResource.getStorageRepository(conn, data.getDataStore().getUuid()); VDI.Record vdir = new VDI.Record(); vdir.nameLabel = volume.getName(); vdir.SR = poolSr; @@ -921,7 +940,6 @@ public class XenServerStorageProcessor implements StorageProcessor { Connection conn = hypervisorResource.getConnection(); DataTO srcData = cmd.getSrcTO(); DataTO destData = cmd.getDestTO(); - PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore(); VolumeObjectTO volume = (VolumeObjectTO)destData; VDI vdi = null; try { @@ -943,7 +961,7 @@ public class XenServerStorageProcessor implements StorageProcessor { return new CopyCmdAnswer(newVol); } catch (Exception e) { - s_logger.warn("Unable to create volume; Pool=" + pool + "; Disk: ", e); + s_logger.warn("Unable to create volume; Pool=" + destData + "; Disk: ", e); return new CopyCmdAnswer(e.toString()); } } @@ -956,13 +974,12 @@ public class XenServerStorageProcessor implements StorageProcessor { int wait = cmd.getWait(); VolumeObjectTO srcVolume = (VolumeObjectTO)srcData; VolumeObjectTO destVolume = (VolumeObjectTO)destData; - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)destVolume.getDataStore(); DataStoreTO srcStore = srcVolume.getDataStore(); if (srcStore instanceof NfsTO) { NfsTO nfsStore = (NfsTO)srcStore; try { - SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, primaryStore.getUuid()); + SR primaryStoragePool = hypervisorResource.getStorageRepository(conn, destVolume.getDataStore().getUuid()); String srUuid = primaryStoragePool.getUuid(conn); URI uri = new URI(nfsStore.getUrl()); String volumePath = uri.getHost() + ":" + uri.getPath() + File.separator + srcVolume.getPath(); @@ -1076,11 +1093,12 @@ public class XenServerStorageProcessor implements StorageProcessor { S3Utils.ClientOptions.class)); // https workaround for Introspector bug that does not // recognize Boolean accessor methods ... + parameters.addAll(Arrays.asList("operation", "put", "filename", dir + "/" + filename, "iSCSIFlag", iSCSIFlag.toString(), "bucket", s3.getBucketName(), "key", key, "https", s3.isHttps() != null ? s3.isHttps().toString() - : "null")); + : "null", "maxSingleUploadSizeInBytes", String.valueOf(s3.getMaxSingleUploadSizeInBytes()))); final String result = hypervisorResource.callHostPluginAsync(connection, "s3xen", "s3", wait, parameters.toArray(new String[parameters.size()])); @@ -1179,8 +1197,7 @@ public class XenServerStorageProcessor implements StorageProcessor { DataTO cacheData = cmd.getCacheTO(); DataTO destData = cmd.getDestTO(); int wait = cmd.getWait(); - PrimaryDataStoreTO primaryStore = (PrimaryDataStoreTO)srcData.getDataStore(); - String primaryStorageNameLabel = primaryStore.getUuid(); + String primaryStorageNameLabel = srcData.getDataStore().getUuid(); String secondaryStorageUrl = null; NfsTO cacheStore = null; String destPath = null; @@ -1415,7 +1432,6 @@ public class XenServerStorageProcessor implements StorageProcessor { DataTO srcData = cmd.getSrcTO(); SnapshotObjectTO snapshot = (SnapshotObjectTO)srcData; DataTO destData = cmd.getDestTO(); - PrimaryDataStoreTO pool = (PrimaryDataStoreTO)destData.getDataStore(); DataStoreTO imageStore = srcData.getDataStore(); if (!(imageStore instanceof NfsTO)) { @@ -1423,7 +1439,7 @@ public class XenServerStorageProcessor implements StorageProcessor { } NfsTO nfsImageStore = (NfsTO)imageStore; - String primaryStorageNameLabel = pool.getUuid(); + String primaryStorageNameLabel = destData.getDataStore().getUuid(); String secondaryStorageUrl = nfsImageStore.getUrl(); int wait = cmd.getWait(); boolean result = false; @@ -1503,4 +1519,32 @@ public class XenServerStorageProcessor implements StorageProcessor { } return new Answer(cmd, false, "unsupported storage type"); } + + @Override + public Answer introduceObject(IntroduceObjectCmd cmd) { + try { + Connection conn = hypervisorResource.getConnection(); + DataStoreTO store = cmd.getDataTO().getDataStore(); + SR poolSr = hypervisorResource.getStorageRepository(conn, store.getUuid()); + poolSr.scan(conn); + return new IntroduceObjectAnswer(cmd.getDataTO()); + } catch (Exception e) { + s_logger.debug("Failed to introduce object", e); + return new Answer(cmd, false, e.toString()); + } + } + + @Override + public Answer forgetObject(ForgetObjectCmd cmd) { + try { + Connection conn = hypervisorResource.getConnection(); + DataTO data = cmd.getDataTO(); + VDI vdi = VDI.getByUuid(conn, data.getPath()); + vdi.forget(conn); + return new IntroduceObjectAnswer(cmd.getDataTO()); + } catch (Exception e) { + s_logger.debug("Failed to introduce object", e); + return new Answer(cmd, false, e.toString()); + } + } } diff --git a/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java b/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java index c796b691d8d..3dc7dd86cbc 100644 --- a/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java +++ b/plugins/hypervisors/xen/src/org/apache/cloudstack/storage/motion/XenServerStorageMotionStrategy.java @@ -24,18 +24,18 @@ import java.util.Map; import javax.inject.Inject; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -71,18 +71,18 @@ public class XenServerStorageMotionStrategy implements DataMotionStrategy { @Inject VMInstanceDao instanceDao; @Override - public boolean canHandle(DataObject srcData, DataObject destData) { - return false; + public StrategyPriority canHandle(DataObject srcData, DataObject destData) { + return StrategyPriority.CANT_HANDLE; } @Override - public boolean canHandle(Map volumeMap, Host srcHost, Host destHost) { - boolean canHandle = false; + public StrategyPriority canHandle(Map volumeMap, Host srcHost, Host destHost) { if (srcHost.getHypervisorType() == HypervisorType.XenServer && destHost.getHypervisorType() == HypervisorType.XenServer) { - canHandle = true; + return StrategyPriority.HYPERVISOR; } - return canHandle; + + return StrategyPriority.CANT_HANDLE; } @Override diff --git a/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/module.properties b/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/module.properties new file mode 100644 index 00000000000..5783d38e5cb --- /dev/null +++ b/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vns +parent=network \ No newline at end of file diff --git a/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/spring-vns-context.xml b/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/spring-vns-context.xml new file mode 100644 index 00000000000..d5bb92afe3d --- /dev/null +++ b/plugins/network-elements/bigswitch-vns/resources/META-INF/cloudstack/vns/spring-vns-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + + diff --git a/plugins/network-elements/bigswitch-vns/src/com/cloud/network/element/BigSwitchVnsElement.java b/plugins/network-elements/bigswitch-vns/src/com/cloud/network/element/BigSwitchVnsElement.java index cb6e7fcec61..6b75634f653 100644 --- a/plugins/network-elements/bigswitch-vns/src/com/cloud/network/element/BigSwitchVnsElement.java +++ b/plugins/network-elements/bigswitch-vns/src/com/cloud/network/element/BigSwitchVnsElement.java @@ -29,7 +29,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import com.cloud.agent.AgentManager; @@ -84,6 +83,9 @@ import com.cloud.resource.UnableDeleteHostException; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -329,11 +331,10 @@ public class BigSwitchVnsElement extends AdapterBase implements @DB public BigSwitchVnsDeviceVO addBigSwitchVnsDevice(AddBigSwitchVnsDeviceCmd cmd) { ServerResource resource = new BigSwitchVnsResource(); - String deviceName = VnsConstants.BigSwitchVns.getName(); + final String deviceName = VnsConstants.BigSwitchVns.getName(); NetworkDevice networkDevice = NetworkDevice .getNetworkDevice(deviceName); - Long physicalNetworkId = cmd.getPhysicalNetworkId(); - BigSwitchVnsDeviceVO bigswitchVnsDevice = null; + final Long physicalNetworkId = cmd.getPhysicalNetworkId(); PhysicalNetworkVO physicalNetwork = _physicalNetworkDao .findById(physicalNetworkId); @@ -344,7 +345,7 @@ public class BigSwitchVnsElement extends AdapterBase implements } long zoneId = physicalNetwork.getDataCenterId(); - PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao + final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao .findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { @@ -377,33 +378,33 @@ public class BigSwitchVnsElement extends AdapterBase implements Map hostdetails = new HashMap(); hostdetails.putAll(params); - Transaction txn = Transaction.currentTxn(); try { resource.configure(cmd.getHost(), hostdetails); - Host host = _resourceMgr.addHost(zoneId, resource, + final Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.L2Networking, params); if (host != null) { - txn.start(); - - bigswitchVnsDevice = new BigSwitchVnsDeviceVO(host.getId(), - physicalNetworkId, ntwkSvcProvider.getProviderName(), - deviceName); - _bigswitchVnsDao.persist(bigswitchVnsDevice); - - DetailVO detail = new DetailVO(host.getId(), - "bigswitchvnsdeviceid", - String.valueOf(bigswitchVnsDevice.getId())); - _hostDetailsDao.persist(detail); - - txn.commit(); - return bigswitchVnsDevice; + return Transaction.execute(new TransactionCallback() { + @Override + public BigSwitchVnsDeviceVO doInTransaction(TransactionStatus status) { + BigSwitchVnsDeviceVO bigswitchVnsDevice = new BigSwitchVnsDeviceVO(host.getId(), + physicalNetworkId, ntwkSvcProvider.getProviderName(), + deviceName); + _bigswitchVnsDao.persist(bigswitchVnsDevice); + + DetailVO detail = new DetailVO(host.getId(), + "bigswitchvnsdeviceid", + String.valueOf(bigswitchVnsDevice.getId())); + _hostDetailsDao.persist(detail); + + return bigswitchVnsDevice; + } + }); } else { throw new CloudRuntimeException( "Failed to add BigSwitch Vns Device due to internal error."); } } catch (ConfigurationException e) { - txn.rollback(); throw new CloudRuntimeException(e.getMessage()); } } diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/agent/api/ConfigureNexusVsmForAsaCommand.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/agent/api/ConfigureNexusVsmForAsaCommand.java index b20ad1f2df6..863b3476a9f 100755 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/agent/api/ConfigureNexusVsmForAsaCommand.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/agent/api/ConfigureNexusVsmForAsaCommand.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.agent.api; +import com.cloud.agent.api.LogLevel.Log4jLevel; + /** * Command for configuring n1kv VSM for asa1kv device. It does the following in VSM: * a. creating vservice node for asa1kv @@ -25,6 +27,7 @@ public class ConfigureNexusVsmForAsaCommand extends Command { private long _vlanId; private String _ipAddress; private String _vsmUsername; + @LogLevel(Log4jLevel.Off) private String _vsmPassword; private String _vsmIp; private String _asaInPortProfile; diff --git a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java index e27a0599b0b..fbda707fce2 100644 --- a/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java +++ b/plugins/network-elements/cisco-vnmc/src/com/cloud/network/element/CiscoVnmcElement.java @@ -29,7 +29,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -71,6 +70,7 @@ import com.cloud.dc.dao.ClusterVSMMapDao; import com.cloud.dc.dao.VlanDao; import com.cloud.deploy.DeployDestination; import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; @@ -123,7 +123,11 @@ import com.cloud.user.Account; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.net.NetUtils; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; @@ -274,11 +278,11 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro } @Override - public boolean implement(Network network, NetworkOffering offering, - DeployDestination dest, ReservationContext context) + public boolean implement(final Network network, final NetworkOffering offering, + final DeployDestination dest, final ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { - DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); + final DataCenter zone = _entityMgr.findById(DataCenter.class, network.getDataCenterId()); if (zone.getNetworkType() == NetworkType.Basic) { s_logger.debug("Not handling network implement in zone of type " + NetworkType.Basic); @@ -289,7 +293,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro return false; } - List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); + final List devices = _ciscoVnmcDao.listByPhysicalNetwork(network.getPhysicalNetworkId()); if (devices.isEmpty()) { s_logger.error("No Cisco Vnmc device on network " + network.getName()); return false; @@ -312,29 +316,25 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro return false; } - Transaction txn = Transaction.currentTxn(); - boolean status = false; try { - txn.start(); - // ensure that there is an ASA 1000v assigned to this network CiscoAsa1000vDevice assignedAsa = assignAsa1000vToNetwork(network); if (assignedAsa == null) { s_logger.error("Unable to assign ASA 1000v device to network " + network.getName()); - return false; + throw new CloudRuntimeException("Unable to assign ASA 1000v device to network " + network.getName()); } ClusterVO asaCluster = _clusterDao.findById(assignedAsa.getClusterId()); ClusterVSMMapVO clusterVsmMap = _clusterVsmMapDao.findByClusterId(assignedAsa.getClusterId()); if (clusterVsmMap == null) { s_logger.error("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); - return false; + throw new CloudRuntimeException("Vmware cluster " + asaCluster.getName() + " has no Cisco Nexus VSM device associated with it"); } CiscoNexusVSMDeviceVO vsmDevice = _vsmDeviceDao.findById(clusterVsmMap.getVsmId()); if (vsmDevice == null) { s_logger.error("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); - return false; + throw new CloudRuntimeException("Unable to load details of Cisco Nexus VSM device associated with cluster " + asaCluster.getName()); } CiscoVnmcControllerVO ciscoVnmcDevice = devices.get(0); @@ -369,7 +369,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro outsideIp = _ipAddrMgr.allocateIp(owner, false, caller, callerUserId, zone); } catch (ResourceAllocationException e) { s_logger.error("Unable to allocate additional public Ip address. Exception details " + e); - return false; + throw new CloudRuntimeException("Unable to allocate additional public Ip address. Exception details " + e); } try { @@ -377,7 +377,8 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro } catch (ResourceAllocationException e) { s_logger.error("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + e); - return false; + throw new CloudRuntimeException("Unable to assign allocated additional public Ip " + outsideIp.getAddress().addr() + " to network with vlan " + vlanId + ". Exception details " + + e); } } @@ -388,7 +389,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro if (!createLogicalEdgeFirewall(vlanId, network.getGateway(), gatewayNetmask, outsideIp.getAddress().addr(), sourceNatIp.getNetmask(), publicGateways, ciscoVnmcHost.getId())) { s_logger.error("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); - return false; + throw new CloudRuntimeException("Failed to create logical edge firewall in Cisco VNMC device for network " + network.getName()); } // create stuff in VSM for ASA device @@ -397,29 +398,33 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro assignedAsa.getInPortProfile(), ciscoVnmcHost.getId())) { s_logger.error("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + " for ASA device for network " + network.getName()); - return false; + throw new CloudRuntimeException("Failed to configure Cisco Nexus VSM " + vsmDevice.getipaddr() + + " for ASA device for network " + network.getName()); } // configure source NAT if (!configureSourceNat(vlanId, network.getCidr(), sourceNatIp, ciscoVnmcHost.getId())) { s_logger.error("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); - return false; + throw new CloudRuntimeException("Failed to configure source NAT in Cisco VNMC device for network " + network.getName()); } // associate Asa 1000v instance with logical edge firewall if (!associateAsaWithLogicalEdgeFirewall(vlanId, assignedAsa.getManagementIp(), ciscoVnmcHost.getId())) { s_logger.error("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + ") with logical edge firewall in VNMC for network " + network.getName()); - return false; - } - - status = true; - txn.commit(); - } finally { - if (!status) { - txn.rollback(); - //FIXME: also undo changes in VNMC, VSM if anything failed + throw new CloudRuntimeException("Failed to associate Cisco ASA 1000v (" + assignedAsa.getManagementIp() + + ") with logical edge firewall in VNMC for network " + network.getName()); } + } catch (CloudRuntimeException e) { + unassignAsa1000vFromNetwork(network); + s_logger.error("CiscoVnmcElement failed", e); + return false; + } catch (Exception e) { + unassignAsa1000vFromNetwork(network); + ExceptionUtil.rethrowRuntime(e); + ExceptionUtil.rethrow(e, InsufficientAddressCapacityException.class); + ExceptionUtil.rethrow(e, ResourceUnavailableException.class); + throw new IllegalStateException(e); } return true; @@ -522,9 +527,9 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro @Override public CiscoVnmcController addCiscoVnmcResource(AddCiscoVnmcResourceCmd cmd) { - String deviceName = Provider.CiscoVnmc.getName(); + final String deviceName = Provider.CiscoVnmc.getName(); NetworkDevice networkDevice = NetworkDevice.getNetworkDevice(deviceName); - Long physicalNetworkId = cmd.getPhysicalNetworkId(); + final Long physicalNetworkId = cmd.getPhysicalNetworkId(); CiscoVnmcController ciscoVnmcResource = null; PhysicalNetworkVO physicalNetwork = _physicalNetworkDao.findById(physicalNetworkId); @@ -533,7 +538,7 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro } long zoneId = physicalNetwork.getDataCenterId(); - PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), + final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { throw new CloudRuntimeException("Network Service Provider: " + networkDevice.getNetworkServiceProvder() + @@ -560,27 +565,27 @@ public class CiscoVnmcElement extends AdapterBase implements SourceNatServicePro hostdetails.putAll(params); ServerResource resource = new CiscoVnmcResource(); - Transaction txn = Transaction.currentTxn(); try { resource.configure(cmd.getHost(), hostdetails); - Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalFirewall, params); + final Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalFirewall, params); if (host != null) { - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public CiscoVnmcController doInTransaction(TransactionStatus status) { + CiscoVnmcController ciscoVnmcResource = new CiscoVnmcControllerVO(host.getId(), physicalNetworkId, ntwkSvcProvider.getProviderName(), deviceName); + _ciscoVnmcDao.persist((CiscoVnmcControllerVO)ciscoVnmcResource); - ciscoVnmcResource = new CiscoVnmcControllerVO(host.getId(), physicalNetworkId, ntwkSvcProvider.getProviderName(), deviceName); - _ciscoVnmcDao.persist((CiscoVnmcControllerVO)ciscoVnmcResource); + DetailVO detail = new DetailVO(host.getId(), "deviceid", String.valueOf(ciscoVnmcResource.getId())); + _hostDetailsDao.persist(detail); - DetailVO detail = new DetailVO(host.getId(), "deviceid", String.valueOf(ciscoVnmcResource.getId())); - _hostDetailsDao.persist(detail); - - txn.commit(); - return ciscoVnmcResource; + return ciscoVnmcResource; + } + }); } else { throw new CloudRuntimeException("Failed to add Cisco Vnmc device due to internal error."); } } catch (ConfigurationException e) { - txn.rollback(); throw new CloudRuntimeException(e.getMessage()); } } diff --git a/plugins/network-elements/dns-notifier/pom.xml b/plugins/network-elements/dns-notifier/pom.xml index a0fa242a802..8c63ebf809c 100644 --- a/plugins/network-elements/dns-notifier/pom.xml +++ b/plugins/network-elements/dns-notifier/pom.xml @@ -25,7 +25,6 @@ 4.3.0-SNAPSHOT ../../pom.xml - org.apache.cloudstack cloud-plugin-example-dns-notifier Apache CloudStack Plugin - Dns Notifier Example This is sample source code on how to write a plugin for CloudStack diff --git a/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/module.properties b/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/module.properties new file mode 100644 index 00000000000..a8e3b9c8669 --- /dev/null +++ b/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=elb +parent=network \ No newline at end of file diff --git a/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/spring-elb-context.xml b/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/spring-elb-context.xml new file mode 100644 index 00000000000..247153c4558 --- /dev/null +++ b/plugins/network-elements/elastic-loadbalancer/resources/META-INF/cloudstack/elb/spring-elb-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java index ecd6006edad..5c6f2e7dd63 100644 --- a/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java +++ b/plugins/network-elements/elastic-loadbalancer/src/com/cloud/network/lb/ElasticLoadBalancerManagerImpl.java @@ -36,11 +36,11 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.command.user.loadbalancer.CreateLoadBalancerRuleCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; @@ -85,7 +85,7 @@ import com.cloud.network.NetworkModel; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; @@ -123,6 +123,10 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.vm.DomainRouterVO; @@ -311,7 +315,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast maxconn = offering.getConcurrentConnections().toString(); } LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs,elbVm.getPublicIpAddress(), - _nicDao.getIpAddress(guestNetworkId, elbVm.getId()),elbVm.getPrivateIpAddress(), null, null, maxconn); + _nicDao.getIpAddress(guestNetworkId, elbVm.getId()),elbVm.getPrivateIpAddress(), null, null, maxconn, offering.isKeepAliveEnabled()); cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, elbVm.getPrivateIpAddress()); cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, @@ -506,7 +510,7 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast if (provider == null) { throw new CloudRuntimeException("Cannot find service provider " + typeString + " in physical network " + physicalNetworkId); } - VirtualRouterProvider vrProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), VirtualRouterProviderType.ElasticLoadBalancerVm); + VirtualRouterProvider vrProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), Type.ElasticLoadBalancerVm); if (vrProvider == null) { throw new CloudRuntimeException("Cannot find virtual router provider " + typeString + " as service provider " + provider.getId()); } @@ -582,19 +586,21 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast } @DB - public PublicIp allocDirectIp(Account account, long guestNetworkId) throws InsufficientAddressCapacityException { - Network frontEndNetwork = _networkModel.getNetwork(guestNetworkId); - Transaction txn = Transaction.currentTxn(); - txn.start(); - - PublicIp ip = _ipAddrMgr.assignPublicIpAddress(frontEndNetwork.getDataCenterId(), null, account, VlanType.DirectAttached, frontEndNetwork.getId(), null, true); - IPAddressVO ipvo = _ipAddressDao.findById(ip.getId()); - ipvo.setAssociatedWithNetworkId(frontEndNetwork.getId()); - _ipAddressDao.update(ipvo.getId(), ipvo); - txn.commit(); - s_logger.info("Acquired frontend IP for ELB " + ip); + public PublicIp allocDirectIp(final Account account, final long guestNetworkId) throws InsufficientAddressCapacityException { + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException { + Network frontEndNetwork = _networkModel.getNetwork(guestNetworkId); - return ip; + PublicIp ip = _ipAddrMgr.assignPublicIpAddress(frontEndNetwork.getDataCenterId(), null, account, VlanType.DirectAttached, frontEndNetwork.getId(), null, true); + IPAddressVO ipvo = _ipAddressDao.findById(ip.getId()); + ipvo.setAssociatedWithNetworkId(frontEndNetwork.getId()); + _ipAddressDao.update(ipvo.getId(), ipvo); + s_logger.info("Acquired frontend IP for ELB " + ip); + + return ip; + } + }); } public void releaseIp(long ipId, long userId, Account caller) { @@ -749,9 +755,9 @@ public class ElasticLoadBalancerManagerImpl extends ManagerBase implements Elast _gcCandidateElbVmIds = currentGcCandidates; } - public class CleanupThread implements Runnable { + public class CleanupThread extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { garbageCollectUnusedElbVms(); } diff --git a/plugins/network-elements/internal-loadbalancer/resources/META-INF/cloudstack/core/spring-internallb-core-context.xml b/plugins/network-elements/internal-loadbalancer/resources/META-INF/cloudstack/core/spring-internallb-core-context.xml new file mode 100644 index 00000000000..c03887931b8 --- /dev/null +++ b/plugins/network-elements/internal-loadbalancer/resources/META-INF/cloudstack/core/spring-internallb-core-context.xml @@ -0,0 +1,37 @@ + + + + + + + + + + diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java index 0b9a1b44b40..7080ca34106 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/element/InternalLoadBalancerElement.java @@ -53,7 +53,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.PublicIpAddress; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.dao.NetworkServiceMapDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; @@ -73,9 +73,8 @@ import com.cloud.user.AccountManager; import com.cloud.user.User; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.vm.DomainRouterVO; @@ -270,7 +269,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public boolean isReady(PhysicalNetworkServiceProvider provider) { VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), - VirtualRouterProviderType.InternalLbVm); + Type.InternalLbVm); if (element == null) { return false; } @@ -282,7 +281,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), - VirtualRouterProviderType.InternalLbVm); + Type.InternalLbVm); if (element == null) { return true; } @@ -467,7 +466,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public VirtualRouterProvider configureInternalLoadBalancerElement(long id, boolean enable) { VirtualRouterProviderVO element = _vrProviderDao.findById(id); - if (element == null || element.getType() != VirtualRouterProviderType.InternalLbVm) { + if (element == null || element.getType() != Type.InternalLbVm) { throw new InvalidParameterValueException("Can't find " + getName() + " element with network service provider id " + id + " to be used as a provider for " + getName()); } @@ -480,7 +479,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public VirtualRouterProvider addInternalLoadBalancerElement(long ntwkSvcProviderId) { - VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(ntwkSvcProviderId, VirtualRouterProviderType.InternalLbVm); + VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(ntwkSvcProviderId, Type.InternalLbVm); if (element != null) { s_logger.debug("There is already an " + getName() + " with service provider id " + ntwkSvcProviderId); return null; @@ -491,7 +490,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala throw new InvalidParameterValueException("Invalid network service provider is specified"); } - element = new VirtualRouterProviderVO(ntwkSvcProviderId, VirtualRouterProviderType.InternalLbVm); + element = new VirtualRouterProviderVO(ntwkSvcProviderId, Type.InternalLbVm); element = _vrProviderDao.persist(element); return element; } @@ -500,7 +499,7 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public VirtualRouterProvider getInternalLoadBalancerElement(long id) { VirtualRouterProvider provider = _vrProviderDao.findById(id); - if (provider == null || provider.getType() != VirtualRouterProviderType.InternalLbVm) { + if (provider == null || provider.getType() != Type.InternalLbVm) { throw new InvalidParameterValueException("Unable to find " + getName() + " by id"); } return provider; @@ -509,19 +508,19 @@ public class InternalLoadBalancerElement extends AdapterBase implements LoadBala @Override public List searchForInternalLoadBalancerElements(Long id, Long ntwkSvsProviderId, Boolean enabled) { - SearchCriteriaService sc = SearchCriteria2.create(VirtualRouterProviderVO.class); + QueryBuilder sc = QueryBuilder.create(VirtualRouterProviderVO.class); if (id != null) { - sc.addAnd(sc.getEntity().getId(), Op.EQ, id); + sc.and(sc.entity().getId(), Op.EQ, id); } if (ntwkSvsProviderId != null) { - sc.addAnd(sc.getEntity().getNspId(), Op.EQ, ntwkSvsProviderId); + sc.and(sc.entity().getNspId(), Op.EQ, ntwkSvsProviderId); } if (enabled != null) { - sc.addAnd(sc.getEntity().isEnabled(), Op.EQ, enabled); + sc.and(sc.entity().isEnabled(), Op.EQ, enabled); } //return only Internal LB elements - sc.addAnd(sc.getEntity().getType(), Op.EQ, VirtualRouterProvider.VirtualRouterProviderType.InternalLbVm); + sc.and(sc.entity().getType(), Op.EQ, VirtualRouterProvider.Type.InternalLbVm); return sc.list(); } diff --git a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java index 587ae993553..b6269ebae9d 100644 --- a/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java +++ b/plugins/network-elements/internal-loadbalancer/src/org/apache/cloudstack/network/lb/InternalLoadBalancerVMManagerImpl.java @@ -69,7 +69,7 @@ import com.cloud.network.NetworkModel; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.VirtualRouterProviderDao; @@ -310,7 +310,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In VirtualRouterProvider lbProvider = _vrProviderDao.findById(internalLbVm.getElementId()); if (lbProvider == null) { - throw new CloudRuntimeException("Cannot find related element " + VirtualRouterProviderType.InternalLbVm + " of vm: " + internalLbVm.getHostName()); + throw new CloudRuntimeException("Cannot find related element " + Type.InternalLbVm + " of vm: " + internalLbVm.getHostName()); } Provider provider = Network.Provider.getProvider(lbProvider.getType().toString()); @@ -461,7 +461,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs, guestNic.getIp4Address(), guestNic.getIp4Address(), internalLbVm.getPrivateIpAddress(), - _itMgr.toNicTO(guestNicProfile, internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn); + _itMgr.toNicTO(guestNicProfile, internalLbVm.getHypervisorType()), internalLbVm.getVpcId(), maxconn, offering.isKeepAliveEnabled()); cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key()); cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key()); @@ -629,7 +629,7 @@ public class InternalLoadBalancerVMManagerImpl extends ManagerBase implements In } protected long getInternalLbProviderId(Network guestNetwork) { - VirtualRouterProviderType type = VirtualRouterProviderType.InternalLbVm; + Type type = Type.InternalLbVm; long physicalNetworkId = _ntwkModel.getPhysicalNetworkId(guestNetwork); PhysicalNetworkServiceProvider provider = _physicalProviderDao.findByServiceProvider(physicalNetworkId, type.toString()); diff --git a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementServiceTest.java b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementServiceTest.java index bdc50cafb8c..7a47af9c4c5 100644 --- a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementServiceTest.java +++ b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementServiceTest.java @@ -33,7 +33,7 @@ import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; import com.cloud.network.dao.PhysicalNetworkServiceProviderVO; import com.cloud.network.dao.VirtualRouterProviderDao; @@ -65,15 +65,15 @@ public class InternalLbElementServiceTest { public void setUp() { ComponentContext.initComponentsLifeCycle(); - VirtualRouterProviderVO validElement = new VirtualRouterProviderVO(1, VirtualRouterProviderType.InternalLbVm); - VirtualRouterProviderVO invalidElement = new VirtualRouterProviderVO(1, VirtualRouterProviderType.VirtualRouter); + VirtualRouterProviderVO validElement = new VirtualRouterProviderVO(1, Type.InternalLbVm); + VirtualRouterProviderVO invalidElement = new VirtualRouterProviderVO(1, Type.VirtualRouter); Mockito.when(_vrProviderDao.findById(validElId)).thenReturn(validElement); Mockito.when(_vrProviderDao.findById(invalidElId)).thenReturn(invalidElement); Mockito.when(_vrProviderDao.persist(validElement)).thenReturn(validElement); - Mockito.when(_vrProviderDao.findByNspIdAndType(validProviderId, VirtualRouterProviderType.InternalLbVm)).thenReturn(validElement); + Mockito.when(_vrProviderDao.findByNspIdAndType(validProviderId, Type.InternalLbVm)).thenReturn(validElement); PhysicalNetworkServiceProviderVO validProvider = new PhysicalNetworkServiceProviderVO(1, "InternalLoadBalancerElement"); PhysicalNetworkServiceProviderVO invalidProvider = new PhysicalNetworkServiceProviderVO(1, "Invalid name!"); diff --git a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementTest.java b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementTest.java index 7af679649fe..f170fee28e1 100644 --- a/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementTest.java +++ b/plugins/network-elements/internal-loadbalancer/test/org/apache/cloudstack/internallbelement/InternalLbElementTest.java @@ -47,7 +47,7 @@ import com.cloud.dc.DataCenterVO; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.NetworkVO; import com.cloud.network.dao.PhysicalNetworkServiceProviderDao; @@ -91,18 +91,18 @@ public class InternalLbElementTest { public void setUp() { ComponentContext.initComponentsLifeCycle(); - VirtualRouterProviderVO validElement = new VirtualRouterProviderVO(1, VirtualRouterProviderType.InternalLbVm); + VirtualRouterProviderVO validElement = new VirtualRouterProviderVO(1, Type.InternalLbVm); validElement.setEnabled(true); - VirtualRouterProviderVO invalidElement = new VirtualRouterProviderVO(1, VirtualRouterProviderType.VirtualRouter); - VirtualRouterProviderVO notEnabledElement = new VirtualRouterProviderVO(1, VirtualRouterProviderType.InternalLbVm); + VirtualRouterProviderVO invalidElement = new VirtualRouterProviderVO(1, Type.VirtualRouter); + VirtualRouterProviderVO notEnabledElement = new VirtualRouterProviderVO(1, Type.InternalLbVm); - Mockito.when(_vrProviderDao.findByNspIdAndType(validElId, VirtualRouterProviderType.InternalLbVm)).thenReturn(validElement); - Mockito.when(_vrProviderDao.findByNspIdAndType(invalidElId, VirtualRouterProviderType.InternalLbVm)).thenReturn(invalidElement); - Mockito.when(_vrProviderDao.findByNspIdAndType(notEnabledElId, VirtualRouterProviderType.InternalLbVm)).thenReturn(notEnabledElement); + Mockito.when(_vrProviderDao.findByNspIdAndType(validElId, Type.InternalLbVm)).thenReturn(validElement); + Mockito.when(_vrProviderDao.findByNspIdAndType(invalidElId, Type.InternalLbVm)).thenReturn(invalidElement); + Mockito.when(_vrProviderDao.findByNspIdAndType(notEnabledElId, Type.InternalLbVm)).thenReturn(notEnabledElement); Mockito.when(_vrProviderDao.persist(validElement)).thenReturn(validElement); - Mockito.when(_vrProviderDao.findByNspIdAndType(validProviderId, VirtualRouterProviderType.InternalLbVm)).thenReturn(validElement); + Mockito.when(_vrProviderDao.findByNspIdAndType(validProviderId, Type.InternalLbVm)).thenReturn(validElement); PhysicalNetworkServiceProviderVO validProvider = new PhysicalNetworkServiceProviderVO(1, "InternalLoadBalancerElement"); PhysicalNetworkServiceProviderVO invalidProvider = new PhysicalNetworkServiceProviderVO(1, "Invalid name!"); diff --git a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java index af67b026de0..85210379af7 100644 --- a/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java +++ b/plugins/network-elements/juniper-srx/src/com/cloud/network/element/JuniperSRXExternalFirewallElement.java @@ -95,7 +95,7 @@ import com.cloud.vm.VirtualMachineProfile; PortForwardingServiceProvider.class, IpDeployer.class, SourceNatServiceProvider.class, RemoteAccessVPNServiceProvider.class}) public class JuniperSRXExternalFirewallElement extends ExternalFirewallDeviceManagerImpl implements SourceNatServiceProvider, FirewallServiceProvider, -PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, JuniperSRXFirewallElementService, StaticNatServiceProvider { +PortForwardingServiceProvider, IpDeployer, JuniperSRXFirewallElementService, StaticNatServiceProvider { private static final Logger s_logger = Logger.getLogger(JuniperSRXExternalFirewallElement.class); @@ -225,42 +225,6 @@ PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, Junip return applyFirewallRules(config, rules); } - @Override - public boolean startVpn(Network config, RemoteAccessVpn vpn) throws ResourceUnavailableException { - if (!canHandle(config, Service.Vpn)) { - return false; - } - - return manageRemoteAccessVpn(true, config, vpn); - - } - - @Override - public boolean stopVpn(Network config, RemoteAccessVpn vpn) throws ResourceUnavailableException { - if (!canHandle(config, Service.Vpn)) { - return false; - } - - return manageRemoteAccessVpn(false, config, vpn); - } - - @Override - public String[] applyVpnUsers(RemoteAccessVpn vpn, List users) throws ResourceUnavailableException { - Network config = _networksDao.findById(vpn.getNetworkId()); - - if (!canHandle(config, Service.Vpn)) { - return null; - } - - boolean result = manageRemoteAccessVpnUsers(config, vpn, users); - String[] results = new String[users.size()]; - for (int i = 0; i < results.length; i++) { - results[i] = String.valueOf(result); - } - - return results; - } - @Override public Provider getProvider() { return Provider.JuniperSRX; diff --git a/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/module.properties b/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/module.properties new file mode 100644 index 00000000000..786ccc8dbe5 --- /dev/null +++ b/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=midonet +parent=network \ No newline at end of file diff --git a/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/spring-midonet-context.xml b/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/spring-midonet-context.xml new file mode 100644 index 00000000000..400f7dd6ed6 --- /dev/null +++ b/plugins/network-elements/midonet/resources/META-INF/cloudstack/midonet/spring-midonet-context.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + diff --git a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java index eccf4aa3944..c11c8719496 100644 --- a/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java +++ b/plugins/network-elements/midonet/src/com/cloud/network/guru/MidoNetPublicNetworkGuru.java @@ -46,6 +46,8 @@ import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.Nic; import com.cloud.vm.NicProfile; @@ -206,16 +208,15 @@ public class MidoNetPublicNetworkGuru extends PublicNetworkGuru { s_logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIp4Address()); } - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); if (ip != null && nic.getReservationStrategy() != Nic.ReservationStrategy.Managed) { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - _ipAddrMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _ipAddrMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + } + }); } nic.deallocate(); diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java b/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java index 30dd06db1aa..e9af1066da0 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/dao/NetScalerPodDaoImpl.java @@ -30,7 +30,7 @@ import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; @Component -@Local(value=NetScalerPodDao.class) @DB(txn=false) +@Local(value=NetScalerPodDao.class) @DB public class NetScalerPodDaoImpl extends GenericDaoBase implements NetScalerPodDao { final SearchBuilder podIdSearch; diff --git a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java index d63b14f8a58..8101864840b 100644 --- a/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java +++ b/plugins/network-elements/netscaler/src/com/cloud/network/element/NetscalerElement.java @@ -31,7 +31,6 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; import org.apache.cloudstack.region.gslb.GslbServiceProvider; - import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; @@ -110,13 +109,14 @@ import com.cloud.offering.NetworkOffering; import com.cloud.utils.NumbersUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.UrlUtil; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; - import com.google.gson.Gson; @Local(value = {NetworkElement.class, StaticNatServiceProvider.class, LoadBalancingServiceProvider.class, GslbServiceProvider.class}) @@ -409,9 +409,9 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } @DB - private ExternalLoadBalancerDeviceVO configureNetscalerLoadBalancer(long lbDeviceId, Long capacity, Boolean dedicatedUse, List newPodsConfig) { - ExternalLoadBalancerDeviceVO lbDeviceVo = _lbDeviceDao.findById(lbDeviceId); - Map lbDetails = _detailsDao.findDetails(lbDeviceVo.getHostId()); + private ExternalLoadBalancerDeviceVO configureNetscalerLoadBalancer(final long lbDeviceId, Long capacity, Boolean dedicatedUse, List newPodsConfig) { + final ExternalLoadBalancerDeviceVO lbDeviceVo = _lbDeviceDao.findById(lbDeviceId); + final Map lbDetails = _detailsDao.findDetails(lbDeviceVo.getHostId()); if ((lbDeviceVo == null) || !isNetscalerDevice(lbDeviceVo.getDeviceName())) { throw new InvalidParameterValueException("No netscaler device found with ID: " + lbDeviceId); @@ -425,7 +425,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } } - List podsToAssociate = new ArrayList(); + final List podsToAssociate = new ArrayList(); if (newPodsConfig != null && newPodsConfig.size() > 0) { for (Long podId: newPodsConfig) { HostPodVO pod = _podDao.findById(podId); @@ -441,7 +441,7 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl } } - List podsToDeassociate = new ArrayList(); + final List podsToDeassociate = new ArrayList(); for (Long podId: currentPodsConfig) { if (!newPodsConfig.contains(podId)) { podsToDeassociate.add(podId); @@ -482,26 +482,28 @@ public class NetscalerElement extends ExternalLoadBalancerDeviceManagerImpl impl lbDeviceVo.setIsDedicatedDevice(dedicatedUse); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _lbDeviceDao.update(lbDeviceId, lbDeviceVo); + + for (Long podId: podsToAssociate) { + NetScalerPodVO nsPodVo = new NetScalerPodVO(lbDeviceId, podId); + _netscalerPodDao.persist(nsPodVo); + } + + for (Long podId: podsToDeassociate) { + NetScalerPodVO nsPodVo = _netscalerPodDao.findByPodId(podId); + _netscalerPodDao.remove(nsPodVo.getId()); + } + + // FIXME get the row lock to avoid race condition + _detailsDao.persist(lbDeviceVo.getHostId(), lbDetails); - _lbDeviceDao.update(lbDeviceId, lbDeviceVo); - - for (Long podId: podsToAssociate) { - NetScalerPodVO nsPodVo = new NetScalerPodVO(lbDeviceId, podId); - _netscalerPodDao.persist(nsPodVo); - } - - for (Long podId: podsToDeassociate) { - NetScalerPodVO nsPodVo = _netscalerPodDao.findByPodId(podId); - _netscalerPodDao.remove(nsPodVo.getId()); - } - - // FIXME get the row lock to avoid race condition - _detailsDao.persist(lbDeviceVo.getHostId(), lbDetails); + } + }); HostVO host = _hostDao.findById(lbDeviceVo.getHostId()); - txn.commit(); - + _agentMgr.reconnect(host.getId()); return lbDeviceVo; } diff --git a/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/module.properties b/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/module.properties new file mode 100644 index 00000000000..92f92a4b4b0 --- /dev/null +++ b/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=nvp +parent=network \ No newline at end of file diff --git a/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/spring-nvp-context.xml b/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/spring-nvp-context.xml new file mode 100644 index 00000000000..302b072c8d0 --- /dev/null +++ b/plugins/network-elements/nicira-nvp/resources/META-INF/cloudstack/nvp/spring-nvp-context.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + diff --git a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java index 5400dd4137c..ef8ec863c9a 100644 --- a/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java +++ b/plugins/network-elements/nicira-nvp/src/com/cloud/network/element/NiciraNvpElement.java @@ -30,7 +30,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.network.ExternalNetworkDeviceManager.NetworkDevice; @@ -114,6 +113,8 @@ import com.cloud.user.Account; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.NicProfile; @@ -552,12 +553,10 @@ public class NiciraNvpElement extends AdapterBase implements @DB public NiciraNvpDeviceVO addNiciraNvpDevice(AddNiciraNvpDeviceCmd cmd) { ServerResource resource = new NiciraNvpResource(); - String deviceName = Network.Provider.NiciraNvp.getName(); + final String deviceName = Network.Provider.NiciraNvp.getName(); NetworkDevice networkDevice = NetworkDevice .getNetworkDevice(deviceName); - Long physicalNetworkId = cmd.getPhysicalNetworkId(); - NiciraNvpDeviceVO niciraNvpDevice = null; - + final Long physicalNetworkId = cmd.getPhysicalNetworkId(); PhysicalNetworkVO physicalNetwork = _physicalNetworkDao .findById(physicalNetworkId); if (physicalNetwork == null) { @@ -567,7 +566,7 @@ public class NiciraNvpElement extends AdapterBase implements } long zoneId = physicalNetwork.getDataCenterId(); - PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao + final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao .findByServiceProvider(physicalNetwork.getId(), networkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null) { @@ -606,33 +605,33 @@ public class NiciraNvpElement extends AdapterBase implements Map hostdetails = new HashMap(); hostdetails.putAll(params); - Transaction txn = Transaction.currentTxn(); try { resource.configure(cmd.getHost(), hostdetails); - Host host = _resourceMgr.addHost(zoneId, resource, + final Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.L2Networking, params); if (host != null) { - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public NiciraNvpDeviceVO doInTransaction(TransactionStatus status) { + NiciraNvpDeviceVO niciraNvpDevice = new NiciraNvpDeviceVO(host.getId(), + physicalNetworkId, ntwkSvcProvider.getProviderName(), + deviceName); + _niciraNvpDao.persist(niciraNvpDevice); + + DetailVO detail = new DetailVO(host.getId(), + "niciranvpdeviceid", String.valueOf(niciraNvpDevice + .getId())); + _hostDetailsDao.persist(detail); - niciraNvpDevice = new NiciraNvpDeviceVO(host.getId(), - physicalNetworkId, ntwkSvcProvider.getProviderName(), - deviceName); - _niciraNvpDao.persist(niciraNvpDevice); - - DetailVO detail = new DetailVO(host.getId(), - "niciranvpdeviceid", String.valueOf(niciraNvpDevice - .getId())); - _hostDetailsDao.persist(detail); - - txn.commit(); - return niciraNvpDevice; + return niciraNvpDevice; + } + }); } else { throw new CloudRuntimeException( "Failed to add Nicira Nvp Device due to internal error."); } } catch (ConfigurationException e) { - txn.rollback(); throw new CloudRuntimeException(e.getMessage()); } } diff --git a/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/module.properties b/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/module.properties new file mode 100644 index 00000000000..c3fa5ff294b --- /dev/null +++ b/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ovs +parent=network diff --git a/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/spring-ovs-context.xml b/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/spring-ovs-context.xml new file mode 100644 index 00000000000..9180eebd040 --- /dev/null +++ b/plugins/network-elements/ovs/resources/META-INF/cloudstack/ovs/spring-ovs-context.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + diff --git a/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/module.properties b/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/module.properties new file mode 100644 index 00000000000..5a99e561b04 --- /dev/null +++ b/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=ssp +parent=network \ No newline at end of file diff --git a/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/spring-ssp-context.xml b/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/spring-ssp-context.xml new file mode 100644 index 00000000000..528f3e3e38c --- /dev/null +++ b/plugins/network-elements/stratosphere-ssp/resources/META-INF/cloudstack/ssp/spring-ssp-context.xml @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + diff --git a/plugins/network-elements/vxlan/pom.xml b/plugins/network-elements/vxlan/pom.xml new file mode 100644 index 00000000000..fe3bbd4ac7a --- /dev/null +++ b/plugins/network-elements/vxlan/pom.xml @@ -0,0 +1,29 @@ + + + 4.0.0 + cloud-plugin-network-vxlan + Apache CloudStack Plugin - Network VXLAN + + org.apache.cloudstack + cloudstack-plugins + 4.3.0-SNAPSHOT + ../../pom.xml + + diff --git a/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/module.properties b/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/module.properties new file mode 100644 index 00000000000..4c2c7f7f7ce --- /dev/null +++ b/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=vxlan +parent=network \ No newline at end of file diff --git a/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/spring-vxlan-context.xml b/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/spring-vxlan-context.xml new file mode 100644 index 00000000000..fcf0b0189eb --- /dev/null +++ b/plugins/network-elements/vxlan/resources/META-INF/cloudstack/vxlan/spring-vxlan-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java b/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java new file mode 100644 index 00000000000..e2ba8689ad8 --- /dev/null +++ b/plugins/network-elements/vxlan/src/com/cloud/network/guru/VxlanGuestNetworkGuru.java @@ -0,0 +1,179 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.guru; + +import javax.ejb.Local; + +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.event.ActionEventUtils; +import com.cloud.event.EventTypes; +import com.cloud.event.EventVO; +import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientVirtualNetworkCapcityException; +import com.cloud.network.Network; +import com.cloud.network.NetworkProfile; +import com.cloud.network.Network.GuestType; +import com.cloud.network.Network.State; +import com.cloud.network.Networks.BroadcastDomainType; +import com.cloud.network.PhysicalNetwork; +import com.cloud.network.PhysicalNetwork.IsolationMethod; +import com.cloud.network.dao.NetworkVO; +import com.cloud.offering.NetworkOffering; +import com.cloud.user.Account; +import com.cloud.vm.NicProfile; +import com.cloud.vm.ReservationContext; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.VirtualMachineProfile; + +@Component +@Local(value=NetworkGuru.class) +public class VxlanGuestNetworkGuru extends GuestNetworkGuru { + private static final Logger s_logger = Logger.getLogger(VxlanGuestNetworkGuru.class); + + public VxlanGuestNetworkGuru() { + super(); + _isolationMethods = new IsolationMethod[] { IsolationMethod.VXLAN }; + } + + @Override + protected boolean canHandle(NetworkOffering offering, final NetworkType networkType, final PhysicalNetwork physicalNetwork) { + // This guru handles only Guest Isolated network that supports Source nat service + if (networkType == NetworkType.Advanced + && isMyTrafficType(offering.getTrafficType()) + && offering.getGuestType() == Network.GuestType.Isolated + && isMyIsolationMethod(physicalNetwork)) { + return true; + } else { + s_logger.trace("We only take care of Guest networks of type " + GuestType.Isolated + " in zone of type " + NetworkType.Advanced); + return false; + } + } + + @Override + public Network design(NetworkOffering offering, DeploymentPlan plan, Network userSpecified, Account owner) { + + NetworkVO network = (NetworkVO) super.design(offering, plan, userSpecified, owner); + if (network == null) { + return null; + } + + network.setBroadcastDomainType(BroadcastDomainType.Vxlan); + + return network; + } + + protected void allocateVnet(Network network, NetworkVO implemented, long dcId, + long physicalNetworkId, String reservationId) throws InsufficientVirtualNetworkCapcityException { + if (network.getBroadcastUri() == null) { + String vnet = _dcDao.allocateVnet(dcId, physicalNetworkId, network.getAccountId(), reservationId, + UseSystemGuestVlans.valueIn(network.getAccountId())); + if (vnet == null) { + throw new InsufficientVirtualNetworkCapcityException("Unable to allocate vnet as a " + + "part of network " + network + " implement ", DataCenter.class, dcId); + } + implemented.setBroadcastUri(BroadcastDomainType.Vxlan.toUri(vnet)); + allocateVnetComplete(network, implemented, dcId, physicalNetworkId, reservationId, vnet); + } else { + implemented.setBroadcastUri(network.getBroadcastUri()); + } + } + + // For Test: Mockit cannot mock static method, wrap it + protected void allocateVnetComplete(Network network, NetworkVO implemented, long dcId, + long physicalNetworkId, String reservationId, String vnet) { + //TODO(VXLAN): Add new event type for vxlan? + ActionEventUtils.onCompletedActionEvent(CallContext.current().getCallingUserId(), network.getAccountId(), + EventVO.LEVEL_INFO, EventTypes.EVENT_ZONE_VLAN_ASSIGN, "Assigned Zone vNet: " + vnet + " Network Id: " + network.getId(), 0); + } + + @Override + public Network implement(Network network, NetworkOffering offering, + DeployDestination dest, ReservationContext context) + throws InsufficientVirtualNetworkCapcityException { + assert (network.getState() == State.Implementing) : "Why are we implementing " + network; + + long dcId = dest.getDataCenter().getId(); + + //get physical network id + Long physicalNetworkId = network.getPhysicalNetworkId(); + + // physical network id can be null in Guest Network in Basic zone, so locate the physical network + if (physicalNetworkId == null) { + physicalNetworkId = _networkModel.findPhysicalNetworkId(dcId, offering.getTags(), offering.getTrafficType()); + } + + NetworkVO implemented = new NetworkVO(network.getTrafficType(), network.getMode(), network.getBroadcastDomainType(), network.getNetworkOfferingId(), State.Allocated, + network.getDataCenterId(), physicalNetworkId); + + allocateVnet(network, implemented, dcId, physicalNetworkId, context.getReservationId()); + + if (network.getGateway() != null) { + implemented.setGateway(network.getGateway()); + } + + if (network.getCidr() != null) { + implemented.setCidr(network.getCidr()); + } + + return implemented; + } + + @Override + public void reserve(NicProfile nic, Network network, + VirtualMachineProfile vm, + DeployDestination dest, ReservationContext context) + throws InsufficientVirtualNetworkCapcityException, + InsufficientAddressCapacityException { + super.reserve(nic, network, vm, dest, context); + } + + @Override + public boolean release(NicProfile nic, + VirtualMachineProfile vm, + String reservationId) { + return super.release(nic, vm, reservationId); + } + + @Override + public void shutdown(NetworkProfile profile, NetworkOffering offering) { + NetworkVO networkObject = _networkDao.findById(profile.getId()); + if (networkObject.getBroadcastDomainType() != BroadcastDomainType.Vxlan || + networkObject.getBroadcastUri() == null) { + s_logger.warn("BroadcastUri is empty or incorrect for guestnetwork " + networkObject.getDisplayText()); + return; + } + + super.shutdown(profile, offering); + } + + @Override + public boolean trash(Network network, NetworkOffering offering) { + return super.trash(network, offering); + } + + + + + +} diff --git a/plugins/network-elements/vxlan/test/com/cloud/network/guru/VxlanGuestNetworkGuruTest.java b/plugins/network-elements/vxlan/test/com/cloud/network/guru/VxlanGuestNetworkGuruTest.java new file mode 100644 index 00000000000..fc1767444a3 --- /dev/null +++ b/plugins/network-elements/vxlan/test/com/cloud/network/guru/VxlanGuestNetworkGuruTest.java @@ -0,0 +1,274 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.network.guru; + +import java.net.URI; +import java.net.URISyntaxException; + +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.junit.Before; +import org.junit.Test; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Command; +import com.cloud.dc.DataCenter; +import com.cloud.dc.DataCenterVO; +import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.deploy.DeployDestination; +import com.cloud.deploy.DeploymentPlan; +import com.cloud.domain.Domain; +import com.cloud.exception.InsufficientVirtualNetworkCapcityException; +import com.cloud.network.Network; +import com.cloud.network.Network.GuestType; +import com.cloud.network.Network.Service; +import com.cloud.network.Network.State; +import com.cloud.network.NetworkModel; +import com.cloud.network.NetworkProfile; +import com.cloud.network.Networks.BroadcastDomainType; +import com.cloud.network.Networks.TrafficType; +import com.cloud.network.dao.NetworkDao; +import com.cloud.network.dao.NetworkVO; +import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.network.dao.PhysicalNetworkVO; +import com.cloud.offering.NetworkOffering; +import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; +import com.cloud.server.ConfigurationServer; +import com.cloud.user.Account; +import com.cloud.vm.ReservationContext; + +import java.util.Arrays; + +public class VxlanGuestNetworkGuruTest { + PhysicalNetworkDao physnetdao = mock (PhysicalNetworkDao.class); + DataCenterDao dcdao = mock(DataCenterDao.class); + AgentManager agentmgr = mock (AgentManager.class); + NetworkOrchestrationService netmgr = mock (NetworkOrchestrationService.class); + NetworkModel netmodel = mock (NetworkModel.class); + ConfigurationServer confsvr = mock(ConfigurationServer.class); + + NetworkDao netdao = mock(NetworkDao.class); + VxlanGuestNetworkGuru guru; + + @Before + public void setUp() { + guru = spy( new VxlanGuestNetworkGuru() ); + ((GuestNetworkGuru) guru)._physicalNetworkDao = physnetdao; + guru._physicalNetworkDao = physnetdao; + guru._dcDao = dcdao; + guru._networkModel = netmodel; + guru._networkDao = netdao; + ((GuestNetworkGuru) guru)._configServer = confsvr; + + DataCenterVO dc = mock(DataCenterVO.class); + when(dc.getNetworkType()).thenReturn(NetworkType.Advanced); + when(dc.getGuestNetworkCidr()).thenReturn("10.1.1.1/24"); + + when(dcdao.findById(anyLong())).thenReturn((DataCenterVO) dc); + } + + @Test + public void testCanHandle() { + NetworkOffering offering = mock(NetworkOffering.class); + when(offering.getId()).thenReturn(42L); + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + + PhysicalNetworkVO physnet = mock(PhysicalNetworkVO.class); + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VXLAN" })); + when(physnet.getId()).thenReturn(42L); + + assertTrue(guru.canHandle(offering, NetworkType.Advanced, physnet) == true); + + // Not supported TrafficType != Guest + when(offering.getTrafficType()).thenReturn(TrafficType.Management); + assertFalse(guru.canHandle(offering, NetworkType.Advanced, physnet) == true); + + // Not supported: GuestType Shared + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Shared); + assertFalse(guru.canHandle(offering, NetworkType.Advanced, physnet) == true); + + // Not supported: Basic networking + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + assertFalse(guru.canHandle(offering, NetworkType.Basic, physnet) == true); + + // Not supported: IsolationMethod != VXLAN + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VLAN" })); + assertFalse(guru.canHandle(offering, NetworkType.Advanced, physnet) == true); + + } + + @Test + public void testDesign() { + PhysicalNetworkVO physnet = mock(PhysicalNetworkVO.class); + when(physnetdao.findById(anyLong())).thenReturn(physnet); + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VXLAN" })); + when(physnet.getId()).thenReturn(42L); + + NetworkOffering offering = mock(NetworkOffering.class); + when(offering.getId()).thenReturn(42L); + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + + DeploymentPlan plan = mock(DeploymentPlan.class); + Network network = mock(Network.class); + Account account = mock(Account.class); + + Network designednetwork = guru.design(offering, plan, network, account); + assertTrue(designednetwork != null); + assertTrue(designednetwork.getBroadcastDomainType() == BroadcastDomainType.Vxlan); + } + + @Test + public void testImplement() throws InsufficientVirtualNetworkCapcityException { + PhysicalNetworkVO physnet = mock(PhysicalNetworkVO.class); + when(physnetdao.findById(anyLong())).thenReturn(physnet); + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VXLAN" })); + when(physnet.getId()).thenReturn(42L); + + NetworkOffering offering = mock(NetworkOffering.class); + when(offering.getId()).thenReturn(42L); + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + + NetworkVO network = mock(NetworkVO.class); + when(network.getName()).thenReturn("testnetwork"); + when(network.getState()).thenReturn(State.Implementing); + when(network.getPhysicalNetworkId()).thenReturn(42L); + + DeployDestination dest = mock(DeployDestination.class); + + DataCenter dc = mock(DataCenter.class); + when(dest.getDataCenter()).thenReturn(dc); + + when(netmodel.findPhysicalNetworkId(anyLong(), (String) any(), (TrafficType) any())).thenReturn(42L); + //TODO(VXLAN): doesn't support VNI specified + //when(confsvr.getConfigValue((String) any(), (String) any(), anyLong())).thenReturn("true"); + when(dcdao.allocateVnet(anyLong(), anyLong(), anyLong(), (String) any(), eq(true))).thenReturn("42"); + doNothing().when(guru).allocateVnetComplete((Network) any(), (NetworkVO) any(), anyLong(), anyLong(), (String) any(), eq("42")); + + Domain dom = mock(Domain.class); + when(dom.getName()).thenReturn("domain"); + + Account acc = mock(Account.class); + when(acc.getAccountName()).thenReturn("accountname"); + + ReservationContext res = mock(ReservationContext.class); + when(res.getDomain()).thenReturn(dom); + when(res.getAccount()).thenReturn(acc); + + Network implementednetwork = guru.implement(network, offering, dest, res); + assertTrue(implementednetwork != null); + } + + @Test + public void testImplementWithCidr() throws InsufficientVirtualNetworkCapcityException { + PhysicalNetworkVO physnet = mock(PhysicalNetworkVO.class); + when(physnetdao.findById(anyLong())).thenReturn(physnet); + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VXLAN" })); + when(physnet.getId()).thenReturn(42L); + + NetworkOffering offering = mock(NetworkOffering.class); + when(offering.getId()).thenReturn(42L); + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + + NetworkVO network = mock(NetworkVO.class); + when(network.getName()).thenReturn("testnetwork"); + when(network.getState()).thenReturn(State.Implementing); + when(network.getGateway()).thenReturn("10.1.1.1"); + when(network.getCidr()).thenReturn("10.1.1.0/24"); + when(network.getPhysicalNetworkId()).thenReturn(42L); + + DeployDestination dest = mock(DeployDestination.class); + + DataCenter dc = mock(DataCenter.class); + when(dest.getDataCenter()).thenReturn(dc); + + when(netmodel.findPhysicalNetworkId(anyLong(), (String) any(), (TrafficType) any())).thenReturn(42L); + + //TODO(VXLAN): doesn't support VNI specified + //when(confsvr.getConfigValue((String) any(), (String) any(), anyLong())).thenReturn("true"); + when(dcdao.allocateVnet(anyLong(), anyLong(), anyLong(), (String) any(), eq(true))).thenReturn("42"); + doNothing().when(guru).allocateVnetComplete((Network) any(), (NetworkVO) any(), anyLong(), anyLong(), (String) any(), eq("42")); + + Domain dom = mock(Domain.class); + when(dom.getName()).thenReturn("domain"); + + Account acc = mock(Account.class); + when(acc.getAccountName()).thenReturn("accountname"); + + ReservationContext res = mock(ReservationContext.class); + when(res.getDomain()).thenReturn(dom); + when(res.getAccount()).thenReturn(acc); + + Network implementednetwork = guru.implement(network, offering, dest, res); + assertTrue(implementednetwork != null); + assertTrue(implementednetwork.getCidr().equals("10.1.1.0/24")); + assertTrue(implementednetwork.getGateway().equals("10.1.1.1")); + } + + @Test + public void testShutdown() throws InsufficientVirtualNetworkCapcityException, URISyntaxException { + PhysicalNetworkVO physnet = mock(PhysicalNetworkVO.class); + when(physnetdao.findById(anyLong())).thenReturn(physnet); + when(physnet.getIsolationMethods()).thenReturn(Arrays.asList(new String[] { "VXLAN" })); + when(physnet.getId()).thenReturn(42L); + + NetworkOffering offering = mock(NetworkOffering.class); + when(offering.getId()).thenReturn(42L); + when(offering.getTrafficType()).thenReturn(TrafficType.Guest); + when(offering.getGuestType()).thenReturn(GuestType.Isolated); + + NetworkVO network = mock(NetworkVO.class); + when(network.getName()).thenReturn("testnetwork"); + when(network.getState()).thenReturn(State.Implementing); + when(network.getBroadcastDomainType()).thenReturn(BroadcastDomainType.Vxlan); + when(network.getBroadcastUri()).thenReturn(new URI("vxlan:12345")); + when(network.getPhysicalNetworkId()).thenReturn(42L); + when(netdao.findById(42L)).thenReturn(network); + + DeployDestination dest = mock(DeployDestination.class); + + DataCenter dc = mock(DataCenter.class); + when(dest.getDataCenter()).thenReturn(dc); + + when(netmodel.findPhysicalNetworkId(anyLong(), (String) any(), (TrafficType) any())).thenReturn(42L); + + Domain dom = mock(Domain.class); + when(dom.getName()).thenReturn("domain"); + + Account acc = mock(Account.class); + when(acc.getAccountName()).thenReturn("accountname"); + + ReservationContext res = mock(ReservationContext.class); + when(res.getDomain()).thenReturn(dom); + when(res.getAccount()).thenReturn(acc); + + NetworkProfile implementednetwork = mock(NetworkProfile.class); + when(implementednetwork.getId()).thenReturn(42L); + when(implementednetwork.getBroadcastUri()).thenReturn(new URI("vxlan:12345")); + when(offering.getSpecifyVlan()).thenReturn(false); + + guru.shutdown(implementednetwork, offering); + verify(implementednetwork, times(1)).setBroadcastUri(null); + } +} diff --git a/plugins/pom.xml b/plugins/pom.xml index a218bde32a8..adc850b079f 100755 --- a/plugins/pom.xml +++ b/plugins/pom.xml @@ -27,6 +27,7 @@ api/rate-limit api/discovery acl/static-role-based + acl/role-based-access-checkers affinity-group-processors/host-anti-affinity affinity-group-processors/explicit-dedication deployment-planners/user-concentrated-pod @@ -62,7 +63,7 @@ alert-handlers/snmp-alerts alert-handlers/syslog-alerts network-elements/internal-loadbalancer - acl/role-based-access-checkers + network-elements/vxlan @@ -100,7 +101,7 @@ netapp - nonoss + noredist @@ -111,7 +112,7 @@ kvm - nonoss + noredist @@ -122,7 +123,7 @@ f5 - nonoss + noredist @@ -133,7 +134,7 @@ netscaler - nonoss + noredist @@ -144,7 +145,7 @@ srx - nonoss + noredist @@ -155,7 +156,7 @@ vmware - nonoss + noredist diff --git a/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/module.properties b/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/module.properties new file mode 100644 index 00000000000..8381f6eff18 --- /dev/null +++ b/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-image-default +parent=storage \ No newline at end of file diff --git a/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/spring-storage-image-default-context.xml b/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/spring-storage-image-default-context.xml new file mode 100644 index 00000000000..6d3c63c2772 --- /dev/null +++ b/plugins/storage/image/default/resources/META-INF/cloudstack/storage-image-default/spring-storage-image-default-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java index ee6f47b8fd1..d6448785a93 100644 --- a/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackImageStoreLifeCycleImpl.java @@ -161,4 +161,13 @@ public class CloudStackImageStoreLifeCycleImpl implements ImageStoreLifeCycle { public boolean deleteDataStore(DataStore store) { return false; } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return imageStoreHelper.convertToStagingStore(store); + } + } diff --git a/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/module.properties b/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/module.properties new file mode 100644 index 00000000000..da571e2dda9 --- /dev/null +++ b/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-image-s3 +parent=storage \ No newline at end of file diff --git a/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/spring-storage-image-s3-context.xml b/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/spring-storage-image-s3-context.xml new file mode 100644 index 00000000000..610506340a8 --- /dev/null +++ b/plugins/storage/image/s3/resources/META-INF/cloudstack/storage-image-s3/spring-storage-image-s3-context.xml @@ -0,0 +1,34 @@ + + + + + + + diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java index 7ca482422e3..6b5175eedae 100644 --- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java +++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/driver/S3ImageStoreDriverImpl.java @@ -66,11 +66,22 @@ public class S3ImageStoreDriverImpl extends BaseImageStoreDriverImpl { details.get(ApiConstants.S3_SOCKET_TIMEOUT) == null ? null : Integer.valueOf(details .get(ApiConstants.S3_SOCKET_TIMEOUT)), imgStore.getCreated(), _configDao.getValue(Config.S3EnableRRS.toString()) == null ? false : Boolean.parseBoolean(_configDao - .getValue(Config.S3EnableRRS.toString()))); + .getValue(Config.S3EnableRRS.toString())), + getMaxSingleUploadSizeInBytes() + ); } + private long getMaxSingleUploadSizeInBytes() { + try { + return Long.parseLong(_configDao.getValue(Config.S3MaxSingleUploadSize.toString())) * 1024L * 1024L * 1024L; + } catch (NumberFormatException e) { + // use default 5GB + return 5L * 1024L * 1024L * 1024L; + } + } + @Override public String createEntityExtractUrl(DataStore store, String installPath, ImageFormat format, DataObject dataObject) { // for S3, no need to do anything, just return template url for diff --git a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java index 249a4c6a1e8..de25830822f 100644 --- a/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/s3/src/org/apache/cloudstack/storage/datastore/lifecycle/S3ImageStoreLifeCycleImpl.java @@ -22,6 +22,8 @@ import java.util.Map; import javax.inject.Inject; +import org.apache.log4j.Logger; + import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; @@ -32,7 +34,6 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper; import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager; import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle; -import org.apache.log4j.Logger; import com.cloud.agent.api.StoragePoolInfo; import com.cloud.hypervisor.Hypervisor.HypervisorType; @@ -131,4 +132,13 @@ public class S3ImageStoreLifeCycleImpl implements ImageStoreLifeCycle { public boolean deleteDataStore(DataStore store) { return false; } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java index e4df6f55f3a..b10b756d11f 100644 --- a/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SampleImageStoreLifeCycleImpl.java @@ -79,4 +79,13 @@ public class SampleImageStoreLifeCycleImpl implements ImageStoreLifeCycle { public boolean deleteDataStore(DataStore store) { return false; } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/module.properties b/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/module.properties new file mode 100644 index 00000000000..1fa4be6f5e5 --- /dev/null +++ b/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-image-swift +parent=storage \ No newline at end of file diff --git a/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/spring-storage-image-swift-context.xml b/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/spring-storage-image-swift-context.xml new file mode 100644 index 00000000000..5e986e88360 --- /dev/null +++ b/plugins/storage/image/swift/resources/META-INF/cloudstack/storage-image-swift/spring-storage-image-swift-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java b/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java index 4256cc2cc7b..0a2b72c3aa3 100644 --- a/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java +++ b/plugins/storage/image/swift/src/org/apache/cloudstack/storage/datastore/lifecycle/SwiftImageStoreLifeCycleImpl.java @@ -16,11 +16,13 @@ // under the License. package org.apache.cloudstack.storage.datastore.lifecycle; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.resource.ResourceManager; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ScopeType; +import java.util.HashMap; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.log4j.Logger; + import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; @@ -30,11 +32,12 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreHelper; import org.apache.cloudstack.storage.image.datastore.ImageStoreProviderManager; import org.apache.cloudstack.storage.image.store.lifecycle.ImageStoreLifeCycle; -import org.apache.log4j.Logger; -import javax.inject.Inject; -import java.util.HashMap; -import java.util.Map; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; public class SwiftImageStoreLifeCycleImpl implements ImageStoreLifeCycle { @@ -113,4 +116,13 @@ public class SwiftImageStoreLifeCycleImpl implements ImageStoreLifeCycle { public boolean deleteDataStore(DataStore store) { return false; } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/module.properties b/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/module.properties new file mode 100644 index 00000000000..61369884987 --- /dev/null +++ b/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-default +parent=storage \ No newline at end of file diff --git a/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/spring-storage-volume-default-context.xml b/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/spring-storage-volume-default-context.xml new file mode 100644 index 00000000000..8b50455315d --- /dev/null +++ b/plugins/storage/volume/default/resources/META-INF/cloudstack/storage-volume-default/spring-storage-volume-default-context.xml @@ -0,0 +1,35 @@ + + + + + + + + diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java index a854d2ef415..82dc34769b8 100644 --- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/driver/CloudStackPrimaryDataStoreDriverImpl.java @@ -18,6 +18,35 @@ */ package org.apache.cloudstack.storage.datastore.driver; +import java.util.UUID; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.CreateObjectCommand; +import org.apache.cloudstack.storage.command.DeleteCommand; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.cloudstack.storage.to.TemplateObjectTO; +import org.apache.cloudstack.storage.volume.VolumeObject; +import org.apache.log4j.Logger; + import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.ResizeVolumeAnswer; import com.cloud.agent.api.storage.ResizeVolumeCommand; @@ -25,9 +54,12 @@ import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.configuration.Config; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.dao.HostDao; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.ResizeVolumePayload; +import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.dao.DiskOfferingDao; @@ -35,21 +67,10 @@ import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.snapshot.SnapshotManager; +import com.cloud.template.TemplateManager; +import com.cloud.utils.NumbersUtil; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.*; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.command.CommandResult; -import org.apache.cloudstack.storage.command.CreateObjectCommand; -import org.apache.cloudstack.storage.command.DeleteCommand; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.volume.VolumeObject; - -import org.apache.log4j.Logger; - -import javax.inject.Inject; - public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger.getLogger(CloudStackPrimaryDataStoreDriverImpl.class); @Inject @@ -74,7 +95,12 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri SnapshotManager snapshotMgr; @Inject EndPointSelector epSelector; - + @Inject + ConfigurationDao configDao; + @Inject + TemplateManager templateManager; + @Inject + TemplateDataFactory templateDataFactory; @Override public DataTO getTO(DataObject data) { return null; @@ -165,10 +191,49 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { + DataStore store = destData.getDataStore(); + if (store.getRole() == DataStoreRole.Primary) { + if ((srcdata.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE)) { + //For CLVM, we need to copy template to primary storage at all, just fake the copy result. + TemplateObjectTO templateObjectTO = new TemplateObjectTO(); + templateObjectTO.setPath(UUID.randomUUID().toString()); + templateObjectTO.setSize(srcdata.getSize()); + templateObjectTO.setPhysicalSize(srcdata.getSize()); + templateObjectTO.setFormat(Storage.ImageFormat.RAW); + CopyCmdAnswer answer = new CopyCmdAnswer(templateObjectTO); + CopyCommandResult result = new CopyCommandResult("", answer); + callback.complete(result); + } else if (srcdata.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.VOLUME) { + //For CLVM, we need to pass template on secondary storage to hypervisor + String value = configDao.getValue(Config.PrimaryStorageDownloadWait.toString()); + int _primaryStorageDownloadWait = NumbersUtil.parseInt(value, + Integer.parseInt(Config.PrimaryStorageDownloadWait.getDefaultValue())); + StoragePoolVO storagePoolVO = primaryStoreDao.findById(store.getId()); + DataStore imageStore = templateManager.getImageStore(storagePoolVO.getDataCenterId(), srcdata.getId()); + DataObject srcData = templateDataFactory.getTemplate(srcdata.getId(), imageStore); + + CopyCommand cmd = new CopyCommand(srcData.getTO(), destData.getTO(), _primaryStorageDownloadWait, true); + EndPoint ep = epSelector.select(srcData, destData); + Answer answer = ep.sendMessage(cmd); + CopyCommandResult result = new CopyCommandResult("", answer); + callback.complete(result); + } + } } @Override public boolean canCopy(DataObject srcData, DataObject destData) { + //BUG fix for CLOUDSTACK-4618 + DataStore store = destData.getDataStore(); + if (store.getRole() == DataStoreRole.Primary) { + if ((srcData.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE) || + (srcData.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.VOLUME)) { + StoragePoolVO storagePoolVO = primaryStoreDao.findById(store.getId()); + if (storagePoolVO != null && storagePoolVO.getPoolType() == Storage.StoragePoolType.CLVM) { + return true; + } + } + } return false; } @@ -241,5 +306,4 @@ public class CloudStackPrimaryDataStoreDriverImpl implements PrimaryDataStoreDri callback.complete(result); } - } diff --git a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java index 9a7012494e4..d916d454422 100644 --- a/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/default/src/org/apache/cloudstack/storage/datastore/lifecycle/CloudStackPrimaryDataStoreLifeCycleImpl.java @@ -433,7 +433,7 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore List poolHosts = new ArrayList(); for (HostVO host : hosts) { try { - this.storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); poolHosts.add(host); } catch (Exception e) { s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); @@ -444,20 +444,20 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore primaryDataStoreDao.expunge(dataStore.getId()); throw new CloudRuntimeException("Failed to create storage pool as it is not accessible to hosts."); } - this.dataStoreHelper.attachZone(dataStore, hypervisorType); + dataStoreHelper.attachZone(dataStore, hypervisorType); return true; } @Override public boolean maintain(DataStore dataStore) { storagePoolAutmation.maintain(dataStore); - this.dataStoreHelper.maintain(dataStore); + dataStoreHelper.maintain(dataStore); return true; } @Override public boolean cancelMaintain(DataStore store) { - this.dataStoreHelper.cancelMaintain(store); + dataStoreHelper.cancelMaintain(store); storagePoolAutmation.cancelMaintain(store); return true; } @@ -472,6 +472,8 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore HypervisorType hType = null; if(hostPoolRecords.size() > 0 ){ hType = getHypervisorType(hostPoolRecords.get(0).getHostId()); + } else { + return false; } // Remove the SR associated with the Xenserver @@ -511,4 +513,13 @@ public class CloudStackPrimaryDataStoreLifeCycleImpl implements PrimaryDataStore dataStoreHelper.attachHost(store, scope, existingInfo); return true; } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java index ece7b260cd4..75e8823a1b5 100644 --- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java +++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/driver/SamplePrimaryDataStoreDriverImpl.java @@ -16,11 +16,18 @@ // under the License. package org.apache.cloudstack.storage.datastore.driver; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.to.DataStoreTO; -import com.cloud.agent.api.to.DataTO; -import com.cloud.storage.dao.StoragePoolHostDao; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; @@ -29,7 +36,10 @@ import org.apache.cloudstack.storage.command.CreateObjectCommand; import org.apache.cloudstack.storage.datastore.DataObjectManager; import org.apache.log4j.Logger; -import javax.inject.Inject; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.storage.dao.StoragePoolHostDao; public class SamplePrimaryDataStoreDriverImpl implements PrimaryDataStoreDriver { private static final Logger s_logger = Logger.getLogger(SamplePrimaryDataStoreDriverImpl.class); diff --git a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java index 92538ad5f4b..6b5e43127b8 100644 --- a/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java +++ b/plugins/storage/volume/sample/src/org/apache/cloudstack/storage/datastore/lifecycle/SamplePrimaryDataStoreLifeCycleImpl.java @@ -18,12 +18,18 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; -import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.StoragePoolStatus; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; +import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.command.AttachPrimaryDataStoreCmd; import org.apache.cloudstack.storage.command.CreatePrimaryDataStoreCmd; import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager; @@ -31,9 +37,11 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; -import javax.inject.Inject; -import java.util.List; -import java.util.Map; +import com.cloud.agent.api.StoragePoolInfo; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.StoragePoolStatus; public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLifeCycle { @Inject @@ -119,4 +127,12 @@ public class SamplePrimaryDataStoreLifeCycleImpl implements PrimaryDataStoreLife return false; } + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/module.properties b/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/module.properties new file mode 100644 index 00000000000..335a9d2b45e --- /dev/null +++ b/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=storage-volume-solidfire +parent=storage \ No newline at end of file diff --git a/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/spring-storage-volume-solidfire-context.xml b/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/spring-storage-volume-solidfire-context.xml new file mode 100644 index 00000000000..a83e3cab7e3 --- /dev/null +++ b/plugins/storage/volume/solidfire/resources/META-INF/cloudstack/storage-volume-solidfire/spring-storage-volume-solidfire-context.xml @@ -0,0 +1,33 @@ + + + + + + diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java index c73e409af6b..a02474d7371 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/driver/SolidfirePrimaryDataStoreDriver.java @@ -20,12 +20,19 @@ import java.util.List; import javax.inject.Inject; -import org.apache.cloudstack.engine.subsystem.api.storage.*; +import org.apache.cloudstack.engine.subsystem.api.storage.ChapInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.commons.lang.StringUtils; @@ -39,9 +46,9 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.VolumeDao; import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.user.AccountVO; -import com.cloud.user.AccountDetailsDao; import com.cloud.user.AccountDetailVO; +import com.cloud.user.AccountDetailsDao; +import com.cloud.user.AccountVO; import com.cloud.user.dao.AccountDao; public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { @@ -122,10 +129,10 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { String clusterAdminPassword = sfConnection.getClusterAdminPassword(); long accountNumber = SolidFireUtil.createSolidFireAccount(mVip, mPort, - clusterAdminUsername, clusterAdminPassword, sfAccountName); + clusterAdminUsername, clusterAdminPassword, sfAccountName); return SolidFireUtil.getSolidFireAccountById(mVip, mPort, - clusterAdminUsername, clusterAdminPassword, accountNumber); + clusterAdminUsername, clusterAdminPassword, accountNumber); } private void updateCsDbWithAccountInfo(long csAccountId, SolidFireUtil.SolidFireAccount sfAccount) { @@ -174,18 +181,22 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { _targetSecret = targetSecret; } + @Override public String getInitiatorUsername() { return _initiatorUsername; } + @Override public String getInitiatorSecret() { return _initiatorSecret; } + @Override public String getTargetUsername() { return _targetUsername; } + @Override public String getTargetSecret() { return _targetSecret; } @@ -268,7 +279,7 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { Long maxIops = volumeInfo.getMaxIops(); if (minIops == null || minIops <= 0 || - maxIops == null || maxIops <= 0) { + maxIops == null || maxIops <= 0) { long defaultMaxIops = getDefaultMaxIops(storagePoolId); iops = new Iops(getDefaultMinIops(storagePoolId), defaultMaxIops, getDefaultBurstIops(storagePoolId, defaultMaxIops)); @@ -277,8 +288,10 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { iops = new Iops(volumeInfo.getMinIops(), volumeInfo.getMaxIops(), getDefaultBurstIops(storagePoolId, volumeInfo.getMaxIops())); } + long volumeSize = volumeInfo.getSize() * 2; // in reality, use a multiplier that's at cluster-level scope + long sfVolumeId = SolidFireUtil.createSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, - getSolidFireVolumeName(volumeInfo.getName()), sfAccountId, volumeInfo.getSize(), true, + getSolidFireVolumeName(volumeInfo.getName()), sfAccountId, volumeSize, true, volumeInfo.getSize().toString(), iops.getMinIops(), iops.getMaxIops(), iops.getBurstIops()); return SolidFireUtil.getSolidFireVolume(mVip, mPort, clusterAdminUsername, clusterAdminPassword, sfVolumeId); @@ -326,22 +339,22 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { _minIops = minIops; _maxIops = maxIops; _burstIops = burstIops; - } + } - public long getMinIops() - { - return _minIops; - } + public long getMinIops() + { + return _minIops; + } - public long getMaxIops() - { - return _maxIops; - } + public long getMaxIops() + { + return _maxIops; + } - public long getBurstIops() - { - return _burstIops; - } + public long getBurstIops() + { + return _burstIops; + } } private void deleteSolidFireVolume(VolumeInfo volumeInfo, SolidFireConnection sfConnection) @@ -499,14 +512,14 @@ public class SolidfirePrimaryDataStoreDriver implements PrimaryDataStoreDriver { _volumeDao.deleteVolumesByInstance(volumeInfo.getId()); -// if (!sfAccountHasVolume(sfAccountId, sfConnection)) { -// // delete the account from the SolidFire SAN -// deleteSolidFireAccount(sfAccountId, sfConnection); -// -// // delete the info in the account_details table -// // that's related to the SolidFire account -// _accountDetailsDao.deleteDetails(account.getAccountId()); -// } + // if (!sfAccountHasVolume(sfAccountId, sfConnection)) { + // // delete the account from the SolidFire SAN + // deleteSolidFireAccount(sfAccountId, sfConnection); + // + // // delete the info in the account_details table + // // that's related to the SolidFire account + // _accountDetailsDao.deleteDetails(account.getAccountId()); + // } StoragePoolVO storagePool = _storagePoolDao.findById(storagePoolId); diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java index f1ac3b3efc8..120a357b270 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/lifecycle/SolidFirePrimaryDataStoreLifeCycle.java @@ -18,36 +18,47 @@ */ package org.apache.cloudstack.storage.datastore.lifecycle; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import java.util.StringTokenizer; import javax.inject.Inject; +import org.apache.log4j.Logger; + import org.apache.cloudstack.engine.subsystem.api.storage.ClusterScope; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.HostScope; -import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreLifeCycle; import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreParameters; +import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.util.SolidFireUtil; import org.apache.cloudstack.storage.volume.datastore.PrimaryDataStoreHelper; +import com.cloud.agent.api.StoragePoolInfo; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.host.HostVO; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.agent.api.StoragePoolInfo; -import com.cloud.storage.StoragePoolAutomation; +import com.cloud.resource.ResourceManager; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StorageManager; +import com.cloud.storage.StoragePoolAutomation; import com.cloud.utils.exception.CloudRuntimeException; public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeCycle { - @Inject PrimaryDataStoreDao storagePoolDao; - @Inject PrimaryDataStoreHelper dataStoreHelper; - @Inject StoragePoolAutomation storagePoolAutomation; - @Inject StoragePoolDetailsDao storagePoolDetailsDao; - @Inject DataCenterDao zoneDao; + private static final Logger s_logger = Logger.getLogger(SolidFirePrimaryDataStoreLifeCycle.class); + + @Inject private DataCenterDao zoneDao; + @Inject private PrimaryDataStoreDao storagePoolDao; + @Inject private PrimaryDataStoreHelper dataStoreHelper; + @Inject private ResourceManager _resourceMgr; + @Inject StorageManager _storageMgr; + @Inject private StoragePoolAutomation storagePoolAutomation; + @Inject private StoragePoolDetailsDao storagePoolDetailsDao; private static final int DEFAULT_MANAGEMENT_PORT = 443; private static final int DEFAULT_STORAGE_PORT = 3260; @@ -305,9 +316,23 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC public boolean attachZone(DataStore dataStore, ZoneScope scope, HypervisorType hypervisorType) { dataStoreHelper.attachZone(dataStore); - return true; + List xenServerHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.XenServer, scope.getScopeId()); + List kvmHosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId()); + List hosts = new ArrayList(); + + hosts.addAll(xenServerHosts); + hosts.addAll(kvmHosts); + + for (HostVO host : hosts) { + try { + _storageMgr.connectHostToSharedPool(host.getId(), dataStore.getId()); + } catch (Exception e) { + s_logger.warn("Unable to establish a connection between " + host + " and " + dataStore, e); + } } + return true; + } @Override public boolean maintain(DataStore dataStore) { @@ -330,4 +355,13 @@ public class SolidFirePrimaryDataStoreLifeCycle implements PrimaryDataStoreLifeC public boolean deleteDataStore(DataStore store) { return dataStoreHelper.deletePrimaryDataStore(store); } + + /* (non-Javadoc) + * @see org.apache.cloudstack.engine.subsystem.api.storage.DataStoreLifeCycle#migrateToObjectStore(org.apache.cloudstack.engine.subsystem.api.storage.DataStore) + */ + @Override + public boolean migrateToObjectStore(DataStore store) { + return false; + } + } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java new file mode 100644 index 00000000000..43e98301c0f --- /dev/null +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidFireHostListener.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.cloudstack.storage.datastore.provider; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener; +import org.apache.log4j.Logger; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; +import com.cloud.agent.api.ModifyStoragePoolAnswer; +import com.cloud.agent.api.ModifyStoragePoolCommand; +import com.cloud.alert.AlertManager; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.StoragePool; +import com.cloud.storage.StoragePoolHostVO; +import com.cloud.storage.dao.StoragePoolHostDao; +import com.cloud.utils.exception.CloudRuntimeException; + +public class SolidFireHostListener implements HypervisorHostListener { + private static final Logger s_logger = Logger.getLogger(SolidFireHostListener.class); + + @Inject private AgentManager _agentMgr; + @Inject private AlertManager _alertMgr; + @Inject private DataStoreManager _dataStoreMgr; + @Inject private HostDao _hostDao; + @Inject private StoragePoolHostDao storagePoolHostDao; + + @Override + public boolean hostConnect(long hostId, long storagePoolId) { + HostVO host = _hostDao.findById(hostId); + + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + + if (storagePoolHost == null) { + storagePoolHost = new StoragePoolHostVO(storagePoolId, hostId, ""); + + storagePoolHostDao.persist(storagePoolHost); + } + + // just want to send the ModifyStoragePoolCommand for KVM + if (host.getHypervisorType() != HypervisorType.KVM) { + return true; + } + + StoragePool storagePool = (StoragePool)_dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + ModifyStoragePoolCommand cmd = new ModifyStoragePoolCommand(true, storagePool); + + Answer answer = _agentMgr.easySend(hostId, cmd); + + if (answer == null) { + throw new CloudRuntimeException("Unable to get an answer to the modify storage pool command (" + storagePool.getId() + ")"); + } + + if (!answer.getResult()) { + String msg = "Unable to attach storage pool " + storagePoolId + " to host " + hostId; + + _alertMgr.sendAlert(AlertManager.ALERT_TYPE_HOST, storagePool.getDataCenterId(), storagePool.getPodId(), msg, msg); + + throw new CloudRuntimeException("Unable to establish a connection from agent to storage pool " + storagePool.getId() + + " due to " + answer.getDetails() + " (" + storagePool.getId() + ")"); + } + + assert (answer instanceof ModifyStoragePoolAnswer) : "ModifyStoragePoolAnswer expected ; Pool = " + storagePool.getId() + " Host = " + hostId; + + s_logger.info("Connection established between storage pool " + storagePool + " and host + " + hostId); + + return true; + } + + @Override + public boolean hostDisconnected(long hostId, long storagePoolId) { + StoragePoolHostVO storagePoolHost = storagePoolHostDao.findByPoolHost(storagePoolId, hostId); + + if (storagePoolHost != null) { + storagePoolHostDao.deleteStoragePoolHostDetails(hostId, storagePoolId); + } + + return true; + } +} diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java index 9c784ba023a..576d1a284db 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/provider/SolidfirePrimaryDataStoreProvider.java @@ -66,15 +66,7 @@ public class SolidfirePrimaryDataStoreProvider implements PrimaryDataStoreProvid public boolean configure(Map params) { lifecycle = ComponentContext.inject(SolidFirePrimaryDataStoreLifeCycle.class); driver = ComponentContext.inject(SolidfirePrimaryDataStoreDriver.class); - listener = ComponentContext.inject(new HypervisorHostListener() { - public boolean hostConnect(long hostId, long poolId) { - return true; - } - - public boolean hostDisconnected(long hostId, long poolId) { - return true; - } - }); + listener = ComponentContext.inject(SolidFireHostListener.class); return true; } diff --git a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java index ac11272a0c1..6659f98f15f 100644 --- a/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java +++ b/plugins/storage/volume/solidfire/src/org/apache/cloudstack/storage/datastore/util/SolidFireUtil.java @@ -78,13 +78,13 @@ public class SolidFireUtil public static final String USE_MUTUAL_CHAP_FOR_VMWARE = "useMutualChapForVMware"; public static long createSolidFireVolume(String strSfMvip, int iSfPort, String strSfAdmin, String strSfPassword, - String strSfVolumeName, long lSfAccountId, long lTotalSize, boolean bEnable512e, + String strSfVolumeName, long lSfAccountId, long lTotalSize, boolean bEnable512e, final String strCloudStackVolumeSize, long lMinIops, long lMaxIops, long lBurstIops) { final Gson gson = new GsonBuilder().create(); VolumeToCreate volumeToCreate = new VolumeToCreate(strSfVolumeName, lSfAccountId, lTotalSize, bEnable512e, - lMinIops, lMaxIops, lBurstIops); + strCloudStackVolumeSize, lMinIops, lMaxIops, lBurstIops); String strVolumeToCreateJson = gson.toJson(volumeToCreate); @@ -443,10 +443,10 @@ public class SolidFireUtil private final VolumeToCreateParams params; private VolumeToCreate(final String strVolumeName, final long lAccountId, final long lTotalSize, - final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) + final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { params = new VolumeToCreateParams(strVolumeName, lAccountId, lTotalSize, bEnable512e, - lMinIOPS, lMaxIOPS, lBurstIOPS); + strCloudStackVolumeSize, lMinIOPS, lMaxIOPS, lBurstIOPS); } private static final class VolumeToCreateParams @@ -456,18 +456,30 @@ public class SolidFireUtil private final long totalSize; private final boolean enable512e; private final VolumeToCreateParamsQoS qos; + private final VolumeToCreateParamsAttributes attributes; private VolumeToCreateParams(final String strVolumeName, final long lAccountId, final long lTotalSize, - final boolean bEnable512e, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) + final boolean bEnable512e, final String strCloudStackVolumeSize, final long lMinIOPS, final long lMaxIOPS, final long lBurstIOPS) { name = strVolumeName; accountID = lAccountId; totalSize = lTotalSize; enable512e = bEnable512e; + attributes = new VolumeToCreateParamsAttributes(strCloudStackVolumeSize); qos = new VolumeToCreateParamsQoS(lMinIOPS, lMaxIOPS, lBurstIOPS); } + private static final class VolumeToCreateParamsAttributes + { + private final String CloudStackVolumeSize; + + private VolumeToCreateParamsAttributes(final String strCloudStackVolumeSize) + { + CloudStackVolumeSize = strCloudStackVolumeSize; + } + } + private static final class VolumeToCreateParamsQoS { private final long minIOPS; diff --git a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java index 902f5953eb1..233b0461150 100644 --- a/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java +++ b/plugins/storage/volume/solidfire/test/org/apache/cloudstack/storage/test/AopTestAdvice.java @@ -18,11 +18,11 @@ package org.apache.cloudstack.storage.test; import org.aspectj.lang.ProceedingJoinPoint; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class AopTestAdvice { public Object AopTestMethod(ProceedingJoinPoint call) throws Throwable { - Transaction txn = Transaction.open(call.getSignature().getName()); + TransactionLegacy txn = TransactionLegacy.open(call.getSignature().getName()); Object ret = null; try { ret = call.proceed(); diff --git a/plugins/storage/volume/solidfire/test/resource/storageContext.xml b/plugins/storage/volume/solidfire/test/resource/storageContext.xml index e4ba9867803..8187f29e5e9 100644 --- a/plugins/storage/volume/solidfire/test/resource/storageContext.xml +++ b/plugins/storage/volume/solidfire/test/resource/storageContext.xml @@ -1,3 +1,4 @@ + - + + + + + + + + + + + + + diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/command/LdapImportUsersCmd.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/command/LdapImportUsersCmd.java new file mode 100644 index 00000000000..f872247f07e --- /dev/null +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/command/LdapImportUsersCmd.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command; + +import com.cloud.domain.Domain; +import com.cloud.exception.*; +import com.cloud.user.AccountService; +import com.cloud.user.DomainService; +import org.apache.cloudstack.api.*; +import org.apache.cloudstack.api.response.LdapUserResponse; +import org.apache.cloudstack.api.response.ListResponse; +import org.apache.cloudstack.ldap.LdapManager; +import org.apache.cloudstack.ldap.LdapUser; +import org.apache.cloudstack.ldap.NoLdapUserMatchingQueryException; +import org.apache.log4j.Logger; +import org.bouncycastle.util.encoders.Base64; + +import javax.inject.Inject; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +@APICommand(name = "importLdapUsers", description = "Import LDAP users", responseObject = LdapUserResponse.class, since = "4.3.0") +public class LdapImportUsersCmd extends BaseListCmd { + + public static final Logger s_logger = Logger.getLogger(LdapImportUsersCmd.class.getName()); + + private static final String s_name = "ldapuserresponse"; + + @Parameter(name = ApiConstants.TIMEZONE, type = CommandType.STRING, + description = "Specifies a timezone for this command. For more information on the timezone parameter, see Time Zone Format.") + private String timezone; + + @Parameter(name = ApiConstants.ACCOUNT_TYPE, type = CommandType.SHORT, required = true, + description = "Type of the account. Specify 0 for user, 1 for root admin, and 2 for domain admin") + private Short accountType; + + @Parameter(name = ApiConstants.ACCOUNT_DETAILS, type = CommandType.MAP, description = "details for account used to store specific parameters") + private Map details; + + @Inject + private LdapManager _ldapManager; + + public LdapImportUsersCmd() { + super(); + } + + public LdapImportUsersCmd(final LdapManager ldapManager, final DomainService domainService, final AccountService accountService) { + super(); + _ldapManager = ldapManager; + _domainService = domainService; + _accountService = accountService; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + List ldapResponses = null; + final ListResponse response = new ListResponse(); + try { + final List users = _ldapManager.getUsers(); + for (LdapUser user : users) { + Domain domain = _domainService.getDomainByName(user.getDomain(), Domain.ROOT_DOMAIN); + + if (domain == null) { + domain = _domainService.createDomain(user.getDomain(), Domain.ROOT_DOMAIN, user.getDomain(), UUID.randomUUID().toString()); + } + _accountService.createUserAccount(user.getUsername(), generatePassword(), user.getFirstname(), user.getLastname(), user.getEmail(), timezone, user.getUsername(), + accountType, domain.getId(), domain.getNetworkDomain(), details, UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + ldapResponses = createLdapUserResponse(users); + } catch (final NoLdapUserMatchingQueryException ex) { + ldapResponses = new ArrayList(); + } finally { + response.setResponses(ldapResponses); + response.setResponseName(getCommandName()); + setResponseObject(response); + } + } + + private List createLdapUserResponse(List users) { + final List ldapResponses = new ArrayList(); + for (final LdapUser user : users) { + final LdapUserResponse ldapResponse = _ldapManager.createLdapUserResponse(user); + ldapResponse.setObjectName("LdapUser"); + ldapResponses.add(ldapResponse); + } + return ldapResponses; + } + + @Override + public String getCommandName() { + return s_name; + } + + private String generatePassword() throws ServerApiException { + try { + final SecureRandom randomGen = SecureRandom.getInstance("SHA1PRNG"); + final byte bytes[] = new byte[20]; + randomGen.nextBytes(bytes); + return Base64.encode(bytes).toString(); + } catch (final NoSuchAlgorithmException e) { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate random password"); + } + } +} diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/response/LdapUserResponse.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/response/LdapUserResponse.java index 9b21c8f54e8..1672703fd89 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/response/LdapUserResponse.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/api/response/LdapUserResponse.java @@ -16,84 +16,94 @@ // under the License. package org.apache.cloudstack.api.response; -import org.apache.cloudstack.api.BaseResponse; - import com.cloud.serializer.Param; import com.google.gson.annotations.SerializedName; +import org.apache.cloudstack.api.BaseResponse; public class LdapUserResponse extends BaseResponse { - @SerializedName("email") - @Param(description = "The user's email") - private String email; + @SerializedName("email") + @Param(description = "The user's email") + private String email; - @SerializedName("principal") - @Param(description = "The user's principle") - private String principal; + @SerializedName("principal") + @Param(description = "The user's principle") + private String principal; - @SerializedName("firstname") - @Param(description = "The user's firstname") - private String firstname; + @SerializedName("firstname") + @Param(description = "The user's firstname") + private String firstname; - @SerializedName("lastname") - @Param(description = "The user's lastname") - private String lastname; + @SerializedName("lastname") + @Param(description = "The user's lastname") + private String lastname; - @SerializedName("username") - @Param(description = "The user's username") - private String username; + @SerializedName("username") + @Param(description = "The user's username") + private String username; - public LdapUserResponse() { - super(); - } + @SerializedName("domain") + @Param(description = "The user's domain") + private String domain; - public LdapUserResponse(final String username, final String email, - final String firstname, final String lastname, - final String principal) { - super(); - this.username = username; - this.email = email; - this.firstname = firstname; - this.lastname = lastname; - this.principal = principal; - } + public LdapUserResponse() { + super(); + } - public String getEmail() { - return email; - } + public LdapUserResponse(final String username, final String email, final String firstname, final String lastname, final String principal, String domain) { + super(); + this.username = username; + this.email = email; + this.firstname = firstname; + this.lastname = lastname; + this.principal = principal; + this.domain = domain; + } - public String getFirstname() { - return firstname; - } + public String getEmail() { + return email; + } - public String getLastname() { - return lastname; - } + public String getFirstname() { + return firstname; + } - public String getPrincipal() { - return principal; - } + public String getLastname() { + return lastname; + } - public String getUsername() { - return username; - } + public String getPrincipal() { + return principal; + } - public void setEmail(final String email) { - this.email = email; - } + public String getUsername() { + return username; + } - public void setFirstname(final String firstname) { - this.firstname = firstname; - } + public String getDomain() { + return domain; + } - public void setLastname(final String lastname) { - this.lastname = lastname; - } + public void setEmail(final String email) { + this.email = email; + } - public void setPrincipal(final String principal) { - this.principal = principal; - } + public void setFirstname(final String firstname) { + this.firstname = firstname; + } - public void setUsername(final String username) { - this.username = username; - } + public void setLastname(final String lastname) { + this.lastname = lastname; + } + + public void setPrincipal(final String principal) { + this.principal = principal; + } + + public void setUsername(final String username) { + this.username = username; + } + + public void setDomain(String domain) { + this.domain = domain; + } } \ No newline at end of file diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java index e62a3d8f1dc..559a9794b2d 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapAuthenticator.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.ldap; import java.util.Map; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -47,15 +46,6 @@ public class LdapAuthenticator extends DefaultUserAuthenticator { _userAccountDao = userAccountDao; } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - if (name == null) { - name = "LDAP"; - } - super.configure(name, params); - return true; - } - @Override public boolean authenticate(final String username, final String password, final Long domainId, final Map requestParameters) { diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapConfiguration.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapConfiguration.java index 0cfb37c5d31..a08dccbd412 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapConfiguration.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapConfiguration.java @@ -105,7 +105,7 @@ public class LdapConfiguration { public String[] getReturnAttributes() { return new String[] { getUsernameAttribute(), getEmailAttribute(), - getFirstnameAttribute(), getLastnameAttribute() }; + getFirstnameAttribute(), getLastnameAttribute(), getCommonNameAttribute() }; } public int getScope() { @@ -142,4 +142,8 @@ public class LdapConfiguration { final String userObject = _configDao.getValue("ldap.user.object"); return userObject == null ? "inetOrgPerson" : userObject; } + + public String getCommonNameAttribute() { + return "cn"; + } } \ No newline at end of file diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapManagerImpl.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapManagerImpl.java index 87406ad9c34..90a79b3f96a 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapManagerImpl.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapManagerImpl.java @@ -25,12 +25,7 @@ import javax.naming.NamingException; import javax.naming.directory.DirContext; import org.apache.cloudstack.api.LdapValidator; -import org.apache.cloudstack.api.command.LdapAddConfigurationCmd; -import org.apache.cloudstack.api.command.LdapCreateAccountCmd; -import org.apache.cloudstack.api.command.LdapDeleteConfigurationCmd; -import org.apache.cloudstack.api.command.LdapListConfigurationCmd; -import org.apache.cloudstack.api.command.LdapListUsersCmd; -import org.apache.cloudstack.api.command.LdapUserSearchCmd; +import org.apache.cloudstack.api.command.*; import org.apache.cloudstack.api.response.LdapConfigurationResponse; import org.apache.cloudstack.api.response.LdapUserResponse; import org.apache.cloudstack.ldap.dao.LdapConfigurationDao; @@ -136,6 +131,7 @@ public class LdapManagerImpl implements LdapManager, LdapValidator { response.setLastname(user.getLastname()); response.setEmail(user.getEmail()); response.setPrincipal(user.getPrincipal()); + response.setDomain(user.getDomain()); return response; } @@ -164,6 +160,7 @@ public class LdapManagerImpl implements LdapManager, LdapValidator { cmdList.add(LdapDeleteConfigurationCmd.class); cmdList.add(LdapListConfigurationCmd.class); cmdList.add(LdapCreateAccountCmd.class); + cmdList.add(LdapImportUsersCmd.class); return cmdList; } diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUser.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUser.java index 18ad7d95119..592459eb815 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUser.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUser.java @@ -22,15 +22,15 @@ public class LdapUser implements Comparable { private final String firstname; private final String lastname; private final String username; + private final String domain; - public LdapUser(final String username, final String email, - final String firstname, final String lastname, - final String principal) { + public LdapUser(final String username, final String email, final String firstname, final String lastname, final String principal, String domain) { this.username = username; this.email = email; this.firstname = firstname; this.lastname = lastname; this.principal = principal; + this.domain = domain; } @Override @@ -70,7 +70,11 @@ public class LdapUser implements Comparable { return username; } - @Override + public String getDomain() { + return domain; + } + + @Override public int hashCode() { return getUsername().hashCode(); } diff --git a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUserManager.java b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUserManager.java index 7494346856a..47697c9127e 100644 --- a/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUserManager.java +++ b/plugins/user-authenticators/ldap/src/org/apache/cloudstack/ldap/LdapUserManager.java @@ -30,111 +30,100 @@ import javax.naming.directory.SearchResult; public class LdapUserManager { - @Inject - private LdapConfiguration _ldapConfiguration; + @Inject + private LdapConfiguration _ldapConfiguration; - public LdapUserManager() { + public LdapUserManager() { + } + + public LdapUserManager(final LdapConfiguration ldapConfiguration) { + _ldapConfiguration = ldapConfiguration; + } + + private LdapUser createUser(final SearchResult result) throws NamingException { + final Attributes attributes = result.getAttributes(); + + final String username = LdapUtils.getAttributeValue(attributes, _ldapConfiguration.getUsernameAttribute()); + final String email = LdapUtils.getAttributeValue(attributes, _ldapConfiguration.getEmailAttribute()); + final String firstname = LdapUtils.getAttributeValue(attributes, _ldapConfiguration.getFirstnameAttribute()); + final String lastname = LdapUtils.getAttributeValue(attributes, _ldapConfiguration.getLastnameAttribute()); + final String principal = result.getNameInNamespace(); + + String domain = principal.replace("cn="+LdapUtils.getAttributeValue(attributes,_ldapConfiguration.getCommonNameAttribute())+",", ""); + domain = domain.replace(","+_ldapConfiguration.getBaseDn(), ""); + domain = domain.replace("ou=",""); + + return new LdapUser(username, email, firstname, lastname, principal, domain); + } + + private String generateSearchFilter(final String username) { + final StringBuilder userObjectFilter = new StringBuilder(); + userObjectFilter.append("(objectClass="); + userObjectFilter.append(_ldapConfiguration.getUserObject()); + userObjectFilter.append(")"); + + final StringBuilder usernameFilter = new StringBuilder(); + usernameFilter.append("("); + usernameFilter.append(_ldapConfiguration.getUsernameAttribute()); + usernameFilter.append("="); + usernameFilter.append((username == null ? "*" : username)); + usernameFilter.append(")"); + + final StringBuilder memberOfFilter = new StringBuilder(); + if (_ldapConfiguration.getSearchGroupPrinciple() != null) { + memberOfFilter.append("(memberof="); + memberOfFilter.append(_ldapConfiguration.getSearchGroupPrinciple()); + memberOfFilter.append(")"); } - public LdapUserManager(final LdapConfiguration ldapConfiguration) { - _ldapConfiguration = ldapConfiguration; + final StringBuilder result = new StringBuilder(); + result.append("(&"); + result.append(userObjectFilter); + result.append(usernameFilter); + result.append(memberOfFilter); + result.append(")"); + + return result.toString(); + } + + public LdapUser getUser(final String username, final DirContext context) throws NamingException { + final NamingEnumeration result = searchUsers(username, context); + if (result.hasMoreElements()) { + return createUser(result.nextElement()); + } else { + throw new NamingException("No user found for username " + username); + } + } + + public List getUsers(final DirContext context) throws NamingException { + return getUsers(null, context); + } + + public List getUsers(final String username, final DirContext context) throws NamingException { + final NamingEnumeration results = searchUsers(username, context); + + final List users = new ArrayList(); + + while (results.hasMoreElements()) { + final SearchResult result = results.nextElement(); + users.add(createUser(result)); } - private LdapUser createUser(final SearchResult result) - throws NamingException { - final Attributes attributes = result.getAttributes(); + Collections.sort(users); - final String username = LdapUtils.getAttributeValue(attributes, - _ldapConfiguration.getUsernameAttribute()); - final String email = LdapUtils.getAttributeValue(attributes, - _ldapConfiguration.getEmailAttribute()); - final String firstname = LdapUtils.getAttributeValue(attributes, - _ldapConfiguration.getFirstnameAttribute()); - final String lastname = LdapUtils.getAttributeValue(attributes, - _ldapConfiguration.getLastnameAttribute()); - final String principal = result.getName() + "," - + _ldapConfiguration.getBaseDn(); + return users; + } - return new LdapUser(username, email, firstname, lastname, principal); - } + public NamingEnumeration searchUsers(final DirContext context) throws NamingException { + return searchUsers(null, context); + } - private String generateSearchFilter(final String username) { - final StringBuilder userObjectFilter = new StringBuilder(); - userObjectFilter.append("(objectClass="); - userObjectFilter.append(_ldapConfiguration.getUserObject()); - userObjectFilter.append(")"); + public NamingEnumeration searchUsers(final String username, final DirContext context) throws NamingException { + final SearchControls controls = new SearchControls(); - final StringBuilder usernameFilter = new StringBuilder(); - usernameFilter.append("("); - usernameFilter.append(_ldapConfiguration.getUsernameAttribute()); - usernameFilter.append("="); - usernameFilter.append((username == null ? "*" : username)); - usernameFilter.append(")"); + controls.setSearchScope(_ldapConfiguration.getScope()); + controls.setReturningAttributes(_ldapConfiguration.getReturnAttributes()); - final StringBuilder memberOfFilter = new StringBuilder(); - if (_ldapConfiguration.getSearchGroupPrinciple() != null) { - memberOfFilter.append("(memberof="); - memberOfFilter.append(_ldapConfiguration.getSearchGroupPrinciple()); - memberOfFilter.append(")"); - } - - final StringBuilder result = new StringBuilder(); - result.append("(&"); - result.append(userObjectFilter); - result.append(usernameFilter); - result.append(memberOfFilter); - result.append(")"); - - return result.toString(); - } - - public LdapUser getUser(final String username, final DirContext context) - throws NamingException { - final NamingEnumeration result = searchUsers(username, - context); - if (result.hasMoreElements()) { - return createUser(result.nextElement()); - } else { - throw new NamingException("No user found for username " + username); - } - } - - public List getUsers(final DirContext context) - throws NamingException { - return getUsers(null, context); - } - - public List getUsers(final String username, - final DirContext context) throws NamingException { - final NamingEnumeration results = searchUsers(username, - context); - - final List users = new ArrayList(); - - while (results.hasMoreElements()) { - final SearchResult result = results.nextElement(); - users.add(createUser(result)); - } - - Collections.sort(users); - - return users; - } - - public NamingEnumeration searchUsers(final DirContext context) - throws NamingException { - return searchUsers(null, context); - } - - public NamingEnumeration searchUsers(final String username, - final DirContext context) throws NamingException { - final SearchControls controls = new SearchControls(); - - controls.setSearchScope(_ldapConfiguration.getScope()); - controls.setReturningAttributes(_ldapConfiguration - .getReturnAttributes()); - - return context.search(_ldapConfiguration.getBaseDn(), - generateSearchFilter(username), controls); - } + return context.search(_ldapConfiguration.getBaseDn(), generateSearchFilter(username), controls); + } } \ No newline at end of file diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapConfigurationSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapConfigurationSpec.groovy index c5939593059..66b4673b258 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapConfigurationSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapConfigurationSpec.groovy @@ -120,8 +120,8 @@ class LdapConfigurationSpec extends spock.lang.Specification { def ldapConfiguration = new LdapConfiguration(configDao, ldapManager) when: "Get return attributes is called" String[] returnAttributes = ldapConfiguration.getReturnAttributes() - then: "An array containing uid, mail, givenname and sn is returned" - returnAttributes == ["uid", "mail", "givenname", "sn"] + then: "An array containing uid, mail, givenname, sn and cn is returned" + returnAttributes == ["uid", "mail", "givenname", "sn", "cn"] } def "Test that getScope returns SearchControls.SUBTREE_SCOPE"() { diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapImportUsersCmdSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapImportUsersCmdSpec.groovy new file mode 100644 index 00000000000..d04b0940c2b --- /dev/null +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapImportUsersCmdSpec.groovy @@ -0,0 +1,71 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package groovy.org.apache.cloudstack.ldap + +import com.cloud.domain.Domain +import com.cloud.domain.DomainVO +import com.cloud.user.AccountService +import com.cloud.user.DomainService +import com.cloud.user.UserAccount +import com.cloud.user.UserAccountVO +import org.apache.cloudstack.api.command.LdapImportUsersCmd +import org.apache.cloudstack.api.response.LdapUserResponse +import org.apache.cloudstack.ldap.LdapManager +import org.apache.cloudstack.ldap.LdapUser + +class LdapImportUsersCmdSpec extends spock.lang.Specification { + + + def "Test successful return of getCommandName"() { + given: "We have an LdapManager, DomainService and a LdapImportUsersCmd" + def ldapManager = Mock(LdapManager) + def domainService = Mock(DomainService) + def ldapImportUsersCmd = new LdapImportUsersCmd(ldapManager, domainService) + when: "Get command name is called" + String commandName = ldapImportUsersCmd.getCommandName() + then: "ldapuserresponse is returned" + commandName == "ldapuserresponse" + } + + def "Test successful response from execute"() { + given: "We have an LdapManager, DomainService, one user and a LdapImportUsersCmd" + def ldapManager = Mock(LdapManager) + def domainService = Mock(DomainService) + def accountService = Mock(AccountService) + + List users = new ArrayList() + users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,ou=engineering,dc=cloudstack,dc=org", "engineering")) + users.add(new LdapUser("bob", "bob@test.com", "Robert", "Young", "cn=bob,ou=engineering,dc=cloudstack,dc=org", "engineering")) + ldapManager.getUsers() >> users + LdapUserResponse response1 = new LdapUserResponse("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,ou=engineering,dc=cloudstack,dc=org", "engineering") + LdapUserResponse response2 = new LdapUserResponse("bob", "bob@test.com", "Robert", "Young", "cn=bob,ou=engineering,dc=cloudstack,dc=org", "engineering") + ldapManager.createLdapUserResponse(_) >>>[response1, response2] + + + Domain domain = new DomainVO("engineering", 1L, 1L, "engineering", UUID.randomUUID().toString()) + domainService.getDomainByName("engineering", 1L) >>> [null, domain] + 1 * domainService.createDomain("engineering", 1L, "engineering", _) >> domain + + def ldapImportUsersCmd = new LdapImportUsersCmd(ldapManager, domainService, accountService) + ldapImportUsersCmd.accountType = 2; + + when: "LdapListUsersCmd is executed" + ldapImportUsersCmd.execute() + then: "a list of size 2 is returned" + ldapImportUsersCmd.responseObject.getResponses().size() == 2 + } +} diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapListUsersCmdSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapListUsersCmdSpec.groovy index 5039443c6ef..4b32eb1ecd6 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapListUsersCmdSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapListUsersCmdSpec.groovy @@ -17,8 +17,6 @@ package groovy.org.apache.cloudstack.ldap import org.apache.cloudstack.api.command.LdapListUsersCmd -import org.apache.cloudstack.api.ServerApiException -import org.apache.cloudstack.api.command.admin.user.ListUsersCmd import org.apache.cloudstack.api.response.LdapUserResponse import org.apache.cloudstack.api.response.ListResponse import org.apache.cloudstack.api.response.UserResponse @@ -55,9 +53,9 @@ class LdapListUsersCmdSpec extends spock.lang.Specification { given: "We have an LdapManager, one user, QueryService and a LdapListUsersCmd" def ldapManager = Mock(LdapManager) List users = new ArrayList() - users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org")) + users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null)) ldapManager.getUsers() >> users - LdapUserResponse response = new LdapUserResponse("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") + LdapUserResponse response = new LdapUserResponse("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) ldapManager.createLdapUserResponse(_) >> response def queryService = Mock(QueryService) def ldapListUsersCmd = new LdapListUsersCmd(ldapManager, queryService) @@ -94,7 +92,7 @@ class LdapListUsersCmdSpec extends spock.lang.Specification { queryService.searchForUsers(_) >> queryServiceResponse - def ldapUser = new LdapUser("rmurphy", "rmurphy@cloudstack.org", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") + def ldapUser = new LdapUser("rmurphy", "rmurphy@cloudstack.org", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) def ldapListUsersCmd = new LdapListUsersCmd(ldapManager,queryService) when: "isACloudstackUser is executed" @@ -111,7 +109,7 @@ class LdapListUsersCmdSpec extends spock.lang.Specification { queryService.searchForUsers(_) >> new ListResponse() - def ldapUser = new LdapUser("rmurphy", "rmurphy@cloudstack.org", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") + def ldapUser = new LdapUser("rmurphy", "rmurphy@cloudstack.org", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) def ldapListUsersCmd = new LdapListUsersCmd(ldapManager,queryService) when: "isACloudstackUser is executed" diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapManagerImplSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapManagerImplSpec.groovy index d681eace90a..321e1af2ab4 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapManagerImplSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapManagerImplSpec.groovy @@ -16,6 +16,13 @@ // under the License. package groovy.org.apache.cloudstack.ldap +import org.apache.cloudstack.api.command.LdapAddConfigurationCmd +import org.apache.cloudstack.api.command.LdapCreateAccountCmd +import org.apache.cloudstack.api.command.LdapDeleteConfigurationCmd +import org.apache.cloudstack.api.command.LdapImportUsersCmd +import org.apache.cloudstack.api.command.LdapListUsersCmd +import org.apache.cloudstack.api.command.LdapUserSearchCmd + import javax.naming.NamingException import javax.naming.ldap.InitialLdapContext @@ -86,13 +93,15 @@ class LdapManagerImplSpec extends spock.lang.Specification { def ldapUserManager = Mock(LdapUserManager) def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManager) when: "A ldap user response is generated" - def result = ldapManager.createLdapUserResponse(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org")) + def result = ldapManager.createLdapUserResponse(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,ou=engineering,dc=cloudstack,dc=org", + "engineering")) then: "The result of the response should match the given ldap user" result.username == "rmurphy" result.email == "rmurphy@test.com" result.firstname == "Ryan" result.lastname == "Murphy" - result.principal == "cn=rmurphy,dc=cloudstack,dc=org" + result.principal == "cn=rmurphy,ou=engineering,dc=cloudstack,dc=org" + result.domain == "engineering" } def "Test success getUsers"() { @@ -102,7 +111,7 @@ class LdapManagerImplSpec extends spock.lang.Specification { def ldapUserManager = Mock(LdapUserManager) ldapContextFactory.createBindContext() >> null List users = new ArrayList<>(); - users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org")) + users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null)) ldapUserManager.getUsers(_) >> users; def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManager) when: "We search for a group of users" @@ -117,7 +126,7 @@ class LdapManagerImplSpec extends spock.lang.Specification { def ldapContextFactory = Mock(LdapContextFactory) def ldapUserManager = Mock(LdapUserManager) ldapContextFactory.createBindContext() >> null - ldapUserManager.getUser(_, _) >> new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") + ldapUserManager.getUser(_, _) >> new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManager) when: "We search for a user" def result = ldapManager.getUser("rmurphy") @@ -149,7 +158,7 @@ class LdapManagerImplSpec extends spock.lang.Specification { ldapContextFactory.createUserContext(_, _) >> { throw new NamingException() } def ldapUserManager = Mock(LdapUserManager) def ldapManager = Spy(LdapManagerImpl, constructorArgs: [ldapConfigurationDao, ldapContextFactory, ldapUserManager]) - ldapManager.getUser(_) >> { new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") } + ldapManager.getUser(_) >> { new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) } when: "The user attempts to authenticate with a bad password" def result = ldapManager.canAuthenticate("rmurphy", "password") then: "The authentication fails" @@ -203,7 +212,7 @@ class LdapManagerImplSpec extends spock.lang.Specification { ldapContextFactory.createUserContext(_, _) >> null def ldapUserManager = Mock(LdapUserManager) def ldapManager = Spy(LdapManagerImpl, constructorArgs: [ldapConfigurationDao, ldapContextFactory, ldapUserManager]) - ldapManager.getUser(_) >> { new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") } + ldapManager.getUser(_) >> { new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) } when: "A user authenticates" def result = ldapManager.canAuthenticate("rmurphy", "password") then: "The result is true" @@ -237,7 +246,7 @@ class LdapManagerImplSpec extends spock.lang.Specification { ldapContextFactory.createBindContext() >> null; List users = new ArrayList(); - users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org")) + users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,ou=engineering,dc=cloudstack,dc=org", "engineering")) ldapUserManager.getUsers(_, _) >> users; def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManager) @@ -288,16 +297,30 @@ class LdapManagerImplSpec extends spock.lang.Specification { thrown InvalidParameterValueException } + def supportedLdapCommands() { + List> cmdList = new ArrayList>(); + cmdList.add(LdapUserSearchCmd.class); + cmdList.add(LdapListUsersCmd.class); + cmdList.add(LdapAddConfigurationCmd.class); + cmdList.add(LdapDeleteConfigurationCmd.class); + cmdList.add(LdapListConfigurationCmd.class); + cmdList.add(LdapCreateAccountCmd.class); + cmdList.add(LdapImportUsersCmd.class); + return cmdList + } + def "Test that getCommands isn't empty"() { given: "We have an LdapConfigurationDao, LdapContextFactory, LdapUserManager and LdapManager" def ldapConfigurationDao = Mock(LdapConfigurationDaoImpl) def ldapContextFactory = Mock(LdapContextFactory) def ldapUserManager = Mock(LdapUserManager) + final List> cmdList = supportedLdapCommands() def ldapManager = new LdapManagerImpl(ldapConfigurationDao, ldapContextFactory, ldapUserManager) when: "Get commands is called" def result = ldapManager.getCommands() - then: "it must have atleast 1 command" + then: "it must return all the commands" result.size() > 0 + result == cmdList } def "Testing of listConfigurations"() { diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapSearchUserCmdSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapSearchUserCmdSpec.groovy index fce299d933d..1411c29f7b4 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapSearchUserCmdSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapSearchUserCmdSpec.groovy @@ -16,7 +16,6 @@ // under the License. package groovy.org.apache.cloudstack.ldap -import org.apache.cloudstack.api.ServerApiException import org.apache.cloudstack.api.command.LdapUserSearchCmd import org.apache.cloudstack.api.response.LdapUserResponse import org.apache.cloudstack.ldap.LdapManager @@ -49,9 +48,9 @@ class LdapSearchUserCmdSpec extends spock.lang.Specification { given: "We have an Ldap manager and ldap user search cmd" def ldapManager = Mock(LdapManager) List users = new ArrayList() - users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org")) + users.add(new LdapUser("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null)) ldapManager.searchUsers(_) >> users - LdapUserResponse response = new LdapUserResponse("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org") + LdapUserResponse response = new LdapUserResponse("rmurphy", "rmurphy@test.com", "Ryan", "Murphy", "cn=rmurphy,dc=cloudstack,dc=org", null) ldapManager.createLdapUserResponse(_) >> response def ldapUserSearchCmd = new LdapUserSearchCmd(ldapManager) when: "The command is executed" diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserResponseSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserResponseSpec.groovy index f1978fa60d2..9a64539eec5 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserResponseSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserResponseSpec.groovy @@ -64,4 +64,22 @@ class LdapUserResponseSpec extends spock.lang.Specification { then: "Get username returns the set value." response.getUsername() == "rmurphy" } + + def "Testing successful setting of LdapUserResponse domain"() { + given: "We have an LdapUserResponse" + LdapUserResponse response = new LdapUserResponse() + when: "A domain is set" + response.setDomain("engineering") + then: "Get domain returns the set value." + response.getDomain() == "engineering" + } + + def "Testing setting of LdapUserResponse domain to null"() { + given: "We have an LdapUserResponse" + LdapUserResponse response = new LdapUserResponse() + when: "A domain is set" + response.setDomain(null) + then: "Get domain returns the set value." + response.getDomain() == null + } } diff --git a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserSpec.groovy b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserSpec.groovy index 8fd1ccc680e..6df947be22a 100644 --- a/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserSpec.groovy +++ b/plugins/user-authenticators/ldap/test/groovy/org/apache/cloudstack/ldap/LdapUserSpec.groovy @@ -22,7 +22,7 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing LdapUsers hashCode generation"() { given: - def userA = new LdapUser(usernameA, "", "", "", "") + def userA = new LdapUser(usernameA, "", "", "", "", "") expect: userA.hashCode() == usernameA.hashCode() where: @@ -31,8 +31,8 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing that LdapUser successfully gives the correct result for a compare to"() { given: "You have created two LDAP user objects" - def userA = new LdapUser(usernameA, "", "", "", "") - def userB = new LdapUser(usernameB, "", "", "", "") + def userA = new LdapUser(usernameA, "", "", "", "", "") + def userB = new LdapUser(usernameB, "", "", "", "", "") expect: "That when compared the result is less than or equal to 0" userA.compareTo(userB) <= 0 where: "The following values are used" @@ -43,8 +43,8 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing that LdapUsers equality"() { given: - def userA = new LdapUser(usernameA, "", "", "", "") - def userB = new LdapUser(usernameB, "", "", "", "") + def userA = new LdapUser(usernameA, "", "", "", "", "") + def userB = new LdapUser(usernameB, "", "", "", "", "") expect: userA.equals(userA) == true userA.equals(new Object()) == false @@ -56,7 +56,7 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing that the username is correctly set with the ldap object"() { given: "You have created a LDAP user object with a username" - def user = new LdapUser(username, "", "", "","") + def user = new LdapUser(username, "", "", "", "", "") expect: "The username is equal to the given data source" user.getUsername() == username where: "The username is set to " @@ -65,7 +65,7 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing the email is correctly set with the ldap object"() { given: "You have created a LDAP user object with a email" - def user = new LdapUser("", email, "", "","") + def user = new LdapUser("", email, "", "", "", "") expect: "The email is equal to the given data source" user.getEmail() == email where: "The email is set to " @@ -74,7 +74,7 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing the firstname is correctly set with the ldap object"() { given: "You have created a LDAP user object with a firstname" - def user = new LdapUser("", "", firstname, "", "") + def user = new LdapUser("", "", firstname, "", "", "") expect: "The firstname is equal to the given data source" user.getFirstname() == firstname where: "The firstname is set to " @@ -83,7 +83,7 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing the lastname is correctly set with the ldap object"() { given: "You have created a LDAP user object with a lastname" - def user = new LdapUser("", "", "", lastname, "") + def user = new LdapUser("", "", "", lastname, "", "") expect: "The lastname is equal to the given data source" user.getLastname() == lastname where: "The lastname is set to " @@ -92,10 +92,19 @@ class LdapUserSpec extends spock.lang.Specification { def "Testing the principal is correctly set with the ldap object"() { given: "You have created a LDAP user object with a principal" - def user = new LdapUser("", "", "", "", principal) + def user = new LdapUser("", "", "", "", principal, "") expect: "The principal is equal to the given data source" user.getPrincipal() == principal - where: "The username is set to " + where: "The principal is set to " principal << ["", null, "cn=rmurphy,dc=cloudstack,dc=org"] } + + def "Testing the domain is correctly set with the ldap object"() { + given: "You have created a LDAP user object with a principal" + def user = new LdapUser("", "", "", "", "", domain) + expect: "The principal is equal to the given data source" + user.getDomain() == domain + where: "The username is set to " + domain << ["", null, "engineering"] + } } \ No newline at end of file diff --git a/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/module.properties b/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/module.properties new file mode 100644 index 00000000000..03ba7397117 --- /dev/null +++ b/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=md5 +parent=api \ No newline at end of file diff --git a/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/spring-md5-context.xml b/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/spring-md5-context.xml new file mode 100644 index 00000000000..4a8e64dd864 --- /dev/null +++ b/plugins/user-authenticators/md5/resources/META-INF/cloudstack/md5/spring-md5-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java index e5b169fc456..63583af4ad6 100644 --- a/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java +++ b/plugins/user-authenticators/md5/src/com/cloud/server/auth/MD5UserAuthenticator.java @@ -22,12 +22,11 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; + import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; - import com.cloud.utils.exception.CloudRuntimeException; /** @@ -59,17 +58,6 @@ public class MD5UserAuthenticator extends DefaultUserAuthenticator { return true; } - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - if(name == null) { - name = "MD5"; - } - super.configure(name, params); - return true; - } - - @Override public String encode(String password) { MessageDigest md5 = null; try { diff --git a/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/module.properties b/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/module.properties new file mode 100644 index 00000000000..5a295638f0f --- /dev/null +++ b/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=plaintext +parent=api \ No newline at end of file diff --git a/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml b/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml new file mode 100644 index 00000000000..639411a1086 --- /dev/null +++ b/plugins/user-authenticators/plain-text/resources/META-INF/cloudstack/plaintext/spring-plaintext-context.xml @@ -0,0 +1,35 @@ + + + + + + + + + diff --git a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java index f102275905f..849e82e093d 100644 --- a/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java +++ b/plugins/user-authenticators/plain-text/src/com/cloud/server/auth/PlainTextUserAuthenticator.java @@ -15,20 +15,15 @@ package com.cloud.server.auth; -import java.math.BigInteger; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import com.cloud.user.UserAccount; import com.cloud.user.dao.UserAccountDao; -import com.cloud.utils.exception.CloudRuntimeException; @Local(value={UserAuthenticator.class}) @@ -56,16 +51,6 @@ public class PlainTextUserAuthenticator extends DefaultUserAuthenticator { return true; } - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - if (name == null) { - name = "PLAINTEXT"; - } - super.configure(name, params); - return true; - } - @Override public String encode(String password) { // Plaintext so no encoding at all diff --git a/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/module.properties b/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/module.properties new file mode 100644 index 00000000000..c70a2f516ec --- /dev/null +++ b/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=sha256salted +parent=api \ No newline at end of file diff --git a/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml b/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml new file mode 100644 index 00000000000..e379718549e --- /dev/null +++ b/plugins/user-authenticators/sha256salted/resources/META-INF/cloudstack/sha256salted/spring-sha256salted-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java index 91be922c9a9..3592ddc8169 100644 --- a/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java +++ b/plugins/user-authenticators/sha256salted/src/com/cloud/server/auth/SHA256SaltedUserAuthenticator.java @@ -24,7 +24,6 @@ import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.bouncycastle.util.encoders.Base64; @@ -42,16 +41,6 @@ public class SHA256SaltedUserAuthenticator extends DefaultUserAuthenticator { private UserAccountDao _userAccountDao; private static final int s_saltlen = 32; - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - if (name == null) { - name = "SHA256SALT"; - } - super.configure(name, params); - return true; - } - /* (non-Javadoc) * @see com.cloud.server.auth.UserAuthenticator#authenticate(java.lang.String, java.lang.String, java.lang.Long, java.util.Map) */ diff --git a/pom.xml b/pom.xml index 6d9a30f101b..679acedac80 100644 --- a/pom.xml +++ b/pom.xml @@ -73,7 +73,7 @@ 1.2 1.0-20081010.060147 5.1 - 3.1.2.RELEASE + 3.2.4.RELEASE 1.9.5 1.3.22 2.6 @@ -81,7 +81,7 @@ 0.9.8 0.10 build/replace.properties - 0.5.0 + 0.5.1 0.1.3 target 1.0.10 @@ -168,11 +168,12 @@ deps/XenServerJava engine plugins - patches framework test client services + maven-standard + quickcloud @@ -359,7 +360,12 @@ org.aspectj aspectjtools - 1.6.2 + 1.7.0 + + + org.aspectj + aspectjweaver + 1.7.0 org.apache.axis @@ -381,6 +387,16 @@ wsdl4j 1.4 + + org.slf4j + slf4j-api + 1.7.5 + + + org.slf4j + slf4j-log4j12 + 1.7.5 + @@ -414,6 +430,11 @@ install src test + + + ${basedir}/resources + + test/resources @@ -434,6 +455,19 @@ **/* + + dist + + **/* + + + + ${basedir} + + ${cs.target.dir} + dist + + @@ -540,7 +574,7 @@ tools/devcloud/basebuild/puppet-devcloudinitial/files/network.conf tools/appliance/definitions/devcloud/* tools/appliance/definitions/systemvmtemplate/* - tools/appliance/definitions/systemvmtemplate64/* + tools/appliance/definitions/systemvm64template/* tools/appliance/definitions/builtin/* tools/cli/cloudmonkey.egg-info/* tools/devcloud/src/deps/boxes/basebox-build/definition.rb @@ -571,34 +605,37 @@ ui/lib/reset.css ui/lib/require.js waf - patches/systemvm/debian/systemvm.vmx - patches/systemvm/debian/config/root/.ssh/authorized_keys - patches/systemvm/debian/config/etc/apache2/httpd.conf - patches/systemvm/debian/config/etc/apache2/ports.conf - patches/systemvm/debian/config/etc/apache2/sites-available/default - patches/systemvm/debian/config/etc/apache2/sites-available/default-ssl - patches/systemvm/debian/config/etc/apache2/vhostexample.conf - patches/systemvm/debian/config/etc/dnsmasq.conf.tmpl - patches/systemvm/debian/config/etc/vpcdnsmasq.conf - patches/systemvm/debian/config/etc/ssh/sshd_config - patches/systemvm/debian/config/etc/rsyslog.conf - patches/systemvm/debian/config/etc/logrotate.conf - patches/systemvm/debian/config/etc/logrotate.d/* - patches/systemvm/debian/config/etc/sysctl.conf - patches/systemvm/debian/config/root/redundant_router/keepalived.conf.templ - patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ - patches/systemvm/debian/config/root/redundant_router/conntrackd.conf.templ - patches/systemvm/debian/vpn/etc/ipsec.conf - patches/systemvm/debian/vpn/etc/ppp/options.xl2tpd - patches/systemvm/debian/vpn/etc/xl2tpd/xl2tpd.conf - patches/systemvm/debian/vpn/etc/ipsec.secrets - patches/systemvm/debian/config/etc/haproxy/haproxy.cfg - patches/systemvm/debian/config/etc/cloud-nic.rules - patches/systemvm/debian/config/etc/modprobe.d/aesni_intel - patches/systemvm/debian/config/etc/rc.local - patches/systemvm/debian/config/var/www/html/userdata/.htaccess - patches/systemvm/debian/config/var/www/html/latest/.htaccess - patches/systemvm/debian/vpn/etc/ipsec.d/l2tp.conf + systemvm/conf/agent.properties + systemvm/conf/environment.properties + systemvm/js/jquery.js + systemvm/patches/debian/systemvm.vmx + systemvm/patches/debian/config/root/.ssh/authorized_keys + systemvm/patches/debian/config/etc/apache2/httpd.conf + systemvm/patches/debian/config/etc/apache2/ports.conf + systemvm/patches/debian/config/etc/apache2/sites-available/default + systemvm/patches/debian/config/etc/apache2/sites-available/default-ssl + systemvm/patches/debian/config/etc/apache2/vhostexample.conf + systemvm/patches/debian/config/etc/dnsmasq.conf.tmpl + systemvm/patches/debian/config/etc/vpcdnsmasq.conf + systemvm/patches/debian/config/etc/ssh/sshd_config + systemvm/patches/debian/config/etc/rsyslog.conf + systemvm/patches/debian/config/etc/logrotate.conf + systemvm/patches/debian/config/etc/logrotate.d/* + systemvm/patches/debian/config/etc/sysctl.conf + systemvm/patches/debian/config/root/redundant_router/keepalived.conf.templ + systemvm/patches/debian/config/root/redundant_router/arping_gateways.sh.templ + systemvm/patches/debian/config/root/redundant_router/conntrackd.conf.templ + systemvm/patches/debian/vpn/etc/ipsec.conf + systemvm/patches/debian/vpn/etc/ppp/options.xl2tpd + systemvm/patches/debian/vpn/etc/xl2tpd/xl2tpd.conf + systemvm/patches/debian/vpn/etc/ipsec.secrets + systemvm/patches/debian/config/etc/haproxy/haproxy.cfg + systemvm/patches/debian/config/etc/cloud-nic.rules + systemvm/patches/debian/config/etc/modprobe.d/aesni_intel + systemvm/patches/debian/config/etc/rc.local + systemvm/patches/debian/config/var/www/html/userdata/.htaccess + systemvm/patches/debian/config/var/www/html/latest/.htaccess + systemvm/patches/debian/vpn/etc/ipsec.d/l2tp.conf tools/transifex/.tx/config tools/marvin/marvin/sandbox/advanced/sandbox.cfg tools/ngui/static/bootstrap/* @@ -674,6 +711,17 @@ awsapi + + systemvm + + + systemvm + + + + systemvm + + eclipse @@ -704,7 +752,7 @@ vmware - nonoss + noredist diff --git a/python/lib/cloudutils/networkConfig.py b/python/lib/cloudutils/networkConfig.py index 405a3be519c..41ef9d93ed2 100644 --- a/python/lib/cloudutils/networkConfig.py +++ b/python/lib/cloudutils/networkConfig.py @@ -35,6 +35,11 @@ class networkConfig: self.method = None @staticmethod + def listNetworks(): + devs = os.listdir("/sys/class/net/") + devs = filter(networkConfig.isBridge, devs) + return devs + @staticmethod def getDefaultNetwork(): cmd = bash("route -n|awk \'/^0.0.0.0/ {print $2,$8}\'") if not cmd.isSuccess(): diff --git a/python/lib/cloudutils/serviceConfig.py b/python/lib/cloudutils/serviceConfig.py index d129e00c45b..4ed9a57079c 100755 --- a/python/lib/cloudutils/serviceConfig.py +++ b/python/lib/cloudutils/serviceConfig.py @@ -388,7 +388,8 @@ class nfsConfig(serviceCfgBase): return True cfo = configFileOps("/etc/nfsmount.conf") - cfo.addEntry("AC", "False") + cfo.addEntry("Ac", "False") + cfo.addEntry("actimeo", "0") cfo.save() self.syscfg.svo.enableService("rpcbind") @@ -726,7 +727,7 @@ class sudoersConfig(serviceCfgBase): def config(self): try: cfo = configFileOps("/etc/sudoers", self) - cfo.addEntry("cloud ALL ", "NOPASSWD : ALL") + cfo.addEntry("cloud ALL ", "NOPASSWD : /bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount") cfo.rmEntry("Defaults", "requiretty", " ") cfo.save() return True diff --git a/quickcloud/pom.xml b/quickcloud/pom.xml new file mode 100644 index 00000000000..1b9975e8e42 --- /dev/null +++ b/quickcloud/pom.xml @@ -0,0 +1,30 @@ + + + 4.0.0 + cloud-quickcloud + Apache CloudStack Framework - QuickCloud + + org.apache.cloudstack + cloud-maven-standard + 4.3.0-SNAPSHOT + ../maven-standard/pom.xml + + diff --git a/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml b/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml new file mode 100644 index 00000000000..35e4cbe54e1 --- /dev/null +++ b/quickcloud/src/main/resources/META-INF/cloudstack/core/spring-quickcloud-core-context-override.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/scripts/storage/qcow2/managesnapshot.sh b/scripts/storage/qcow2/managesnapshot.sh index 368ff549ee6..42bd1eb2613 100755 --- a/scripts/storage/qcow2/managesnapshot.sh +++ b/scripts/storage/qcow2/managesnapshot.sh @@ -42,11 +42,11 @@ fi is_lv() { # Must be a block device - if [ -b "${1}" ]; then + if [ -b "${1}" -o -L "{1}" ]; then # But not a volume group or physical volume lvm vgs "${1}" > /dev/null 2>&1 && return 1 # And a logical volume - lvm lvs "${1}" > /dev/null 2>&1 && return 0 + lvm lvs "${1}" > /dev/null 2>&1 && return 1 fi return 0 } diff --git a/scripts/vm/hypervisor/xenserver/s3xen b/scripts/vm/hypervisor/xenserver/s3xen index 372a6daaddc..bf81bbd34a6 100644 --- a/scripts/vm/hypervisor/xenserver/s3xen +++ b/scripts/vm/hypervisor/xenserver/s3xen @@ -34,6 +34,7 @@ import base64 import hmac import traceback import urllib2 +from xml.dom.minidom import parseString import XenAPIPlugin sys.path.extend(["/opt/xensource/sm/"]) @@ -260,15 +261,73 @@ class S3Client(object): sha).digest())[:-1] return signature, request_date + + def getText(self, nodelist): + rc = [] + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + rc.append(node.data) + return ''.join(rc) - def put(self, bucket, key, src_filename): + def multiUpload(self, bucket, key, src_fileName, chunkSize=5 * 1024 * 1024): + uploadId={} + def readInitalMultipart(response): + data = response.read() + xmlResult = parseString(data) + result = xmlResult.getElementsByTagName("InitiateMultipartUploadResult")[0] + upload = result.getElementsByTagName("UploadId")[0] + uploadId["0"] = upload.childNodes[0].data + + self.do_operation('POST', bucket, key + "?uploads", fn_read=readInitalMultipart) + + fileSize = os.path.getsize(src_fileName) + parts = fileSize / chunkSize + ((fileSize % chunkSize) and 1) + part = 1 + srcFile = open(src_fileName, 'rb') + etags = [] + while part <= parts: + offset = part - 1 + size = min(fileSize - offset * chunkSize, chunkSize) + headers = { + self.HEADER_CONTENT_LENGTH: size + } + def send_body(connection): + srcFile.seek(offset * chunkSize) + block = srcFile.read(size) + connection.send(block) + def read_multiPart(response): + etag = response.getheader('ETag') + etags.append((part, etag)) + self.do_operation("PUT", bucket, "%s?partNumber=%s&uploadId=%s"%(key, part, uploadId["0"]), headers, send_body, read_multiPart) + part = part + 1 + srcFile.close() + + data = [] + partXml = "%i%s" + for etag in etags: + data.append(partXml%etag) + msg = "%s"%("".join(data)) + size = len(msg) + headers = { + self.HEADER_CONTENT_LENGTH: size + } + def send_complete_multipart(connection): + connection.send(msg) + self.do_operation("POST", bucket, "%s?uploadId=%s"%(key, uploadId["0"]), headers, send_complete_multipart) + + def put(self, bucket, key, src_filename, maxSingleUpload): if not os.path.isfile(src_filename): raise Exception( "Attempt to put " + src_filename + " that does not exist.") + size = os.path.getsize(src_filename) + if size > maxSingleUpload or maxSingleUpload == 0: + return self.multiUpload(bucket, key, src_filename) + headers = { self.HEADER_CONTENT_MD5: compute_md5(src_filename), + self.HEADER_CONTENT_TYPE: 'application/octet-stream', self.HEADER_CONTENT_LENGTH: str(os.stat(src_filename).st_size), } @@ -323,6 +382,7 @@ def parseArguments(args): bucket = args['bucket'] key = args['key'] filename = args['filename'] + maxSingleUploadBytes = int(args["maxSingleUploadSizeInBytes"]) if is_blank(operation): raise ValueError('An operation must be specified.') @@ -336,18 +396,18 @@ def parseArguments(args): if is_blank(filename): raise ValueError('A filename must be specified.') - return client, operation, bucket, key, filename + return client, operation, bucket, key, filename, maxSingleUploadBytes @echo def s3(session, args): - client, operation, bucket, key, filename = parseArguments(args) + client, operation, bucket, key, filename, maxSingleUploadBytes = parseArguments(args) try: if operation == 'put': - client.put(bucket, key, filename) + client.put(bucket, key, filename, maxSingleUploadBytes) elif operation == 'get': client.get(bucket, key, filename) elif operation == 'delete': diff --git a/scripts/vm/hypervisor/xenserver/setupxenserver.sh b/scripts/vm/hypervisor/xenserver/setupxenserver.sh index e4b6f3e3372..311f2738bb3 100755 --- a/scripts/vm/hypervisor/xenserver/setupxenserver.sh +++ b/scripts/vm/hypervisor/xenserver/setupxenserver.sh @@ -49,7 +49,7 @@ sed -i 's/0\.0\.0\.0/127\.0\.0\.1/' /opt/xensource/libexec/qemu-dm-wrapper 2>&1 sed -i /NOZEROCONF/d /etc/sysconfig/network echo "NOZEROCONF=yes" >> /etc/sysconfig/network -[ -f /etc/cron.hourly/logrotate ] || mv /etc/cron.daily/logrotate /etc/cron.hourly 2>&1 +mv -n /etc/cron.daily/logrotate /etc/cron.hourly 2>&1 # more aio thread echo 1048576 >/proc/sys/fs/aio-max-nr diff --git a/scripts/vm/hypervisor/xenserver/vmops b/scripts/vm/hypervisor/xenserver/vmops index 83efc67b002..3f11960aab1 100755 --- a/scripts/vm/hypervisor/xenserver/vmops +++ b/scripts/vm/hypervisor/xenserver/vmops @@ -495,12 +495,8 @@ def allow_egress_traffic(session): devs = [] for pif in session.xenapi.PIF.get_all(): pif_rec = session.xenapi.PIF.get_record(pif) - vlan = pif_rec.get('VLAN') dev = pif_rec.get('device') - if vlan == '-1': - devs.append(dev) - else: - devs.append(dev + "." + vlan) + devs.append(dev + "+") for d in devs: try: util.pread2(['/bin/bash', '-c', "iptables -n -L FORWARD | grep '%s '" % d]) @@ -804,8 +800,6 @@ def default_network_rules_systemvm(session, args): except: util.pread2(['iptables', '-F', vmchain]) - allow_egress_traffic(session) - for vif in vifs: try: util.pread2(['iptables', '-A', 'BRIDGE-FIREWALL', '-m', 'physdev', '--physdev-is-bridged', '--physdev-out', vif, '-j', vmchain]) @@ -1464,7 +1458,7 @@ def network_rules(session, args): vm_mac = args.get('vmMAC') signature = args.pop('signature') seqno = args.pop('seqno') - sec_ips = args.pop("secIps") + sec_ips = args.get("secIps") deflated = 'false' if 'deflated' in args: deflated = args.pop('deflated') diff --git a/scripts/vm/hypervisor/xenserver/xcposs/vmops b/scripts/vm/hypervisor/xenserver/xcposs/vmops index c5a9f943b51..20725e4ec85 100644 --- a/scripts/vm/hypervisor/xenserver/xcposs/vmops +++ b/scripts/vm/hypervisor/xenserver/xcposs/vmops @@ -1389,6 +1389,9 @@ def setDNATRule(session, args): @echo def createISOVHD(session, args): + # Should not create the VDI if the systemvm.iso does not exist + if not os.path.exists('/usr/share/xcp/packages/iso/systemvm.iso'): + return "Failed" #hack for XCP on ubuntu 12.04, as can't attach iso to a vm vdis = session.xenapi.VDI.get_by_name_label("systemvm-vdi"); util.SMlog(vdis) diff --git a/scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot b/scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot index 31f26ad3c3e..53f31a99eed 100644 --- a/scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot +++ b/scripts/vm/hypervisor/xenserver/xcposs/vmopsSnapshot @@ -372,8 +372,16 @@ def unmountSnapshotsDir(session, args): return "1" -def getPrimarySRPath(primaryStorageSRUuid, isISCSI): - if isISCSI: +def getPrimarySRPath(session, primaryStorageSRUuid, isISCSI): + sr = session.xenapi.SR.get_by_uuid(primaryStorageSRUuid) + srrec = session.xenapi.SR.get_record(sr) + srtype = srrec["type"] + if srtype == "file": + pbd = session.xenapi.SR.get_PBDs(sr)[0] + pbdrec = session.xenapi.PBD.get_record(pbd) + primarySRPath = pbdrec["device_config"]["location"] + return primarySRPath + elif isISCSI: primarySRDir = lvhdutil.VG_PREFIX + primaryStorageSRUuid return os.path.join(lvhdutil.VG_LOCATION, primarySRDir) else: @@ -472,7 +480,7 @@ def getVhdParent(session, args): snapshotUuid = args['snapshotUuid'] isISCSI = getIsTrueString(args['isISCSI']) - primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + primarySRPath = getPrimarySRPath(session, primaryStorageSRUuid, isISCSI) util.SMlog("primarySRPath: " + primarySRPath) baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI) @@ -490,7 +498,7 @@ def backupSnapshot(session, args): isISCSI = getIsTrueString(args['isISCSI']) path = args['path'] localMountPoint = args['localMountPoint'] - primarySRPath = getPrimarySRPath(primaryStorageSRUuid, isISCSI) + primarySRPath = getPrimarySRPath(session, primaryStorageSRUuid, isISCSI) util.SMlog("primarySRPath: " + primarySRPath) baseCopyUuid = getParentOfSnapshot(snapshotUuid, primarySRPath, isISCSI) diff --git a/scripts/vm/network/security_group.py b/scripts/vm/network/security_group.py index 0e0fafb5c9e..a7c64b0984e 100755 --- a/scripts/vm/network/security_group.py +++ b/scripts/vm/network/security_group.py @@ -322,8 +322,8 @@ def default_network_rules_systemvm(vm_name, localbrname): for bridge in bridges: if bridge != localbrname: if not addFWFramework(bridge): - return False - brfw = "BF-" + bridge + return False + brfw = getBrfw(bridge) vifs = getVifsForBridge(vm_name, bridge) for vif in vifs: try: @@ -429,7 +429,7 @@ def default_network_rules(vm_name, vm_id, vm_ip, vm_mac, vif, brname, sec_ips): return False vmName = vm_name - brfw = "BF-" + brname + brfw = getBrfw(brname) domID = getvmId(vm_name) delete_rules_for_vm_in_bridge_firewall_chain(vmName) vmchain = vm_name @@ -619,7 +619,7 @@ def network_rules_for_rebooted_vm(vmName): if brName is None or brName is "": brName = "cloudbr0" else: - brName = re.sub("^BF-", "", brName) + brName = execute("iptables-save |grep physdev-is-bridged |grep FORWARD |grep BF |grep '\-o' |awk '{print $4}' | head -1").strip() if 1 in [ vm_name.startswith(c) for c in ['r-', 's-', 'v-'] ]: @@ -632,8 +632,8 @@ def network_rules_for_rebooted_vm(vmName): vifs = getVifs(vmName) logging.debug(vifs, brName) for v in vifs: - execute("iptables -A " + "BF-" + brName + "-IN " + " -m physdev --physdev-is-bridged --physdev-in " + v + " -j "+ vmchain_default) - execute("iptables -A " + "BF-" + brName + "-OUT " + " -m physdev --physdev-is-bridged --physdev-out " + v + " -j "+ vmchain_default) + execute("iptables -A " + getBrfw(brName) + "-IN " + " -m physdev --physdev-is-bridged --physdev-in " + v + " -j "+ vmchain_default) + execute("iptables -A " + getBrfw(brName) + "-OUT " + " -m physdev --physdev-is-bridged --physdev-out " + v + " -j "+ vmchain_default) #change antispoof rule in vmchain try: @@ -939,6 +939,13 @@ def getvmId(vmName): return dom.ID() +def getBrfw(brname): + cmd = "iptables-save |grep physdev-is-bridged |grep FORWARD |grep BF |grep '\-o' | grep -w " + brname + "|awk '{print $9}' | head -1" + brfwname = bash("-c", cmd).stdout.strip() + if brfwname == "": + brfwname = "BF-" + brname + return brfwname + def addFWFramework(brname): try: cfo = configFileOps("/etc/sysctl.conf") @@ -952,7 +959,7 @@ def addFWFramework(brname): logging.debug("failed to turn on bridge netfilter") return False - brfw = "BF-" + brname + brfw = getBrfw(brname) try: execute("iptables -L " + brfw) except: diff --git a/scripts/vm/network/vnet/modifyvxlan.sh b/scripts/vm/network/vnet/modifyvxlan.sh new file mode 100755 index 00000000000..f7d08f1d8c4 --- /dev/null +++ b/scripts/vm/network/vnet/modifyvxlan.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# modifyvxlan.sh -- adds and deletes VXLANs from a Routing Server +# set -x + +## TODO(VXLAN): MTU, IPv6 underlying + +usage() { + printf "Usage: %s: -o (add | delete) -v -p -b \n" +} + +addVxlan() { + local vxlanId=$1 + local pif=$2 + local vxlanDev=vxlan$vxlanId + local vxlanBr=$3 + local mcastGrp="239.$(( ($vxlanId >> 16) % 256 )).$(( ($vxlanId >> 8) % 256 )).$(( $vxlanId % 256 ))" + + ## TODO(VXLAN): $brif (trafficlabel) should be passed from caller because we cannot assume 1:1 mapping between pif and brif. + # lookup bridge interface + local sysfs_dir=/sys/devices/virtual/net/ + local brif=`find ${sysfs_dir}*/brif/ -name $pif | sed -e "s,$sysfs_dir,," | sed -e 's,/brif/.*$,,'` + + if [ "$brif " == " " ] + then + if [ -d "/sys/class/net/${pif}" ] + then + # if bridge is not found, but matches a pif, use it + brif=$pif + else + printf "Failed to lookup bridge interface which includes pif: $pif." + return 1 + fi + else + # confirm ip address of $brif + ip addr show $brif | grep -w inet + if [ $? -gt 0 ] + then + printf "Failed to find vxlan multicast source ip address on brif: $brif." + return 1 + fi + fi + + # mcast route + ## TODO(VXLAN): Can we assume there're only one IP address which can be multicast src IP on the IF? + ip route get $mcastGrp | grep -w "dev $brif" + if [ $? -gt 0 ] + then + ip route add $mcastGrp/32 dev $brif + if [ $? -gt 0 ] + then + printf "Failed to add vxlan multicast route on brif: $brif." + return 1 + fi + fi + + if [ ! -d /sys/class/net/$vxlanDev ] + then + ip link add $vxlanDev type vxlan id $vxlanId group $mcastGrp ttl 10 dev $brif + + if [ $? -gt 0 ] + then + # race condition that someone already creates the vxlan + if [ ! -d /sys/class/net/$vxlanDev ] + then + printf "Failed to create vxlan $vxlanId on brif: $brif." + return 1 + fi + fi + fi + + # is up? + ip link show $vxlanDev | grep -w UP > /dev/null + if [ $? -gt 0 ] + then + ip link set $vxlanDev up > /dev/null + fi + + if [ ! -d /sys/class/net/$vxlanBr ] + then + brctl addbr $vxlanBr > /dev/null + + if [ $? -gt 0 ] + then + if [ ! -d /sys/class/net/$vxlanBr ] + then + printf "Failed to create br: $vxlanBr" + return 2 + fi + fi + + brctl setfd $vxlanBr 0 + fi + + #pif is eslaved into vxlanBr? + ls /sys/class/net/$vxlanBr/brif/ | grep -w "$vxlanDev" > /dev/null + if [ $? -gt 0 ] + then + brctl addif $vxlanBr $vxlanDev > /dev/null + if [ $? -gt 0 ] + then + ls /sys/class/net/$vxlanBr/brif/ | grep -w "$vxlanDev" > /dev/null + if [ $? -gt 0 ] + then + printf "Failed to add vxlan: $vxlanDev to $vxlanBr" + return 3 + fi + fi + fi + + # is vxlanBr up? + ip link show $vxlanBr | grep -w UP > /dev/null + if [ $? -gt 0 ] + then + ip link set $vxlanBr up + fi + + return 0 +} + +deleteVxlan() { + local vxlanId=$1 + local pif=$2 + local vxlanDev=vxlan$vxlanId + local vxlanBr=$3 + local mcastGrp="239.$(( ($vxlanId >> 16) % 256 )).$(( ($vxlanId >> 8) % 256 )).$(( $vxlanId % 256 ))" + + local sysfs_dir=/sys/devices/virtual/net/ + local brif=`find ${sysfs_dir}*/brif/ -name $pif | sed -e "s,$sysfs_dir,," | sed -e 's,/brif/.*$,,'` + + ip route del $mcastGrp/32 dev $brif + + ip link delete $vxlanDev + + if [ $? -gt 0 ] + then + printf "Failed to del vxlan: $vxlanId" + printf "Continue..." + fi + + ip link set $vxlanBr down + + if [ $? -gt 0 ] + then + return 1 + fi + + brctl delbr $vxlanBr + + if [ $? -gt 0 ] + then + printf "Failed to del bridge $vxlanBr" + return 1 + fi + + return 0 +} + +op= +vxlanId= +option=$@ + +while getopts 'o:v:p:b:' OPTION +do + case $OPTION in + o) oflag=1 + op="$OPTARG" + ;; + v) vflag=1 + vxlanId="$OPTARG" + ;; + p) pflag=1 + pif="$OPTARG" + ;; + b) bflag=1 + brName="$OPTARG" + ;; + ?) usage + exit 2 + ;; + esac +done + +# Check that all arguments were passed in +if [ "$oflag$vflag$pflag$bflag" != "1111" ] +then + usage + exit 2 +fi + +# Do we support Vxlan? +lsmod|grep ^vxlan >& /dev/null +if [ $? -gt 0 ] +then + modprobe=`modprobe vxlan 2>&1` + if [ $? -gt 0 ] + then + printf "Failed to load vxlan kernel module: $modprobe" + exit 1 + fi +fi + +if [ "$op" == "add" ] +then + # Add the vxlan + addVxlan $vxlanId $pif $brName + + # If the add fails then return failure + if [ $? -gt 0 ] + then + exit 1 + fi +else + if [ "$op" == "delete" ] + then + # Delete the vxlan + deleteVxlan $vxlanId $pif $brName + + # Always exit with success + exit 0 + fi +fi + diff --git a/server/conf/cloudstack-sudoers.in b/server/conf/cloudstack-sudoers.in index dcfb17b3ddf..069016434c7 100644 --- a/server/conf/cloudstack-sudoers.in +++ b/server/conf/cloudstack-sudoers.in @@ -18,5 +18,5 @@ # The CloudStack management server needs sudo permissions # without a password. -@MSUSER@ ALL =NOPASSWD : ALL +@MSUSER@ ALL =NOPASSWD : /bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount diff --git a/server/pom.xml b/server/pom.xml index 6446b7eca28..c7978e2dd38 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -81,6 +81,12 @@ org.apache.cloudstack cloud-utils ${project.version} + + + xml-apis + xml-apis + + org.apache.cloudstack @@ -92,6 +98,12 @@ org.reflections reflections + + + xml-apis + xml-apis + + org.apache.cloudstack @@ -128,9 +140,6 @@ resources - - **/*.xml - diff --git a/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml new file mode 100644 index 00000000000..9553340ea87 --- /dev/null +++ b/server/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml b/server/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml new file mode 100644 index 00000000000..fd2f5fbb3fd --- /dev/null +++ b/server/resources/META-INF/cloudstack/core/spring-server-core-misc-context.xml @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-backend/module.properties b/server/resources/META-INF/cloudstack/server-alert-adapter-backend/module.properties new file mode 100644 index 00000000000..120c91d3f41 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-backend/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-alert-adapter-backend +parent=backend \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml b/server/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml new file mode 100644 index 00000000000..f7670e5810b --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-backend/spring-server-alert-adapter-backend-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-compute/module.properties b/server/resources/META-INF/cloudstack/server-alert-adapter-compute/module.properties new file mode 100644 index 00000000000..12213f44910 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-alert-adapter-compute +parent=compute \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml b/server/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml new file mode 100644 index 00000000000..e918ce08e40 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-compute/spring-server-alert-adapter-compute-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-storage/module.properties b/server/resources/META-INF/cloudstack/server-alert-adapter-storage/module.properties new file mode 100644 index 00000000000..c156009acbd --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-alert-adapter-storage +parent=storage \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml b/server/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml new file mode 100644 index 00000000000..713d9e363cc --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-alert-adapter-storage/spring-server-alert-adapter-storage-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-allocator/module.properties b/server/resources/META-INF/cloudstack/server-allocator/module.properties new file mode 100644 index 00000000000..f69c483deb6 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-allocator/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-allocator +parent=allocator \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml b/server/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml new file mode 100644 index 00000000000..cc2924e5e02 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-allocator/spring-server-allocator-context.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-api/module.properties b/server/resources/META-INF/cloudstack/server-api/module.properties new file mode 100644 index 00000000000..74a9c504207 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-api/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-api +parent=api \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-api/spring-server-api-context.xml b/server/resources/META-INF/cloudstack/server-api/spring-server-api-context.xml new file mode 100644 index 00000000000..20e2f460933 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-api/spring-server-api-context.xml @@ -0,0 +1,33 @@ + + + + + + + \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-compute/module.properties b/server/resources/META-INF/cloudstack/server-compute/module.properties new file mode 100644 index 00000000000..7b42a910871 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-compute/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-compute +parent=compute \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-compute/spring-server-compute-context.xml b/server/resources/META-INF/cloudstack/server-compute/spring-server-compute-context.xml new file mode 100644 index 00000000000..1311902ff5d --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-compute/spring-server-compute-context.xml @@ -0,0 +1,38 @@ + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-discoverer/module.properties b/server/resources/META-INF/cloudstack/server-discoverer/module.properties new file mode 100644 index 00000000000..0c4f5e1c3e5 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-discoverer +parent=discoverer \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-discoverer/spring-server-discoverer-context.xml b/server/resources/META-INF/cloudstack/server-discoverer/spring-server-discoverer-context.xml new file mode 100644 index 00000000000..90666d13143 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-discoverer/spring-server-discoverer-context.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-fencer/module.properties b/server/resources/META-INF/cloudstack/server-fencer/module.properties new file mode 100644 index 00000000000..b4a868498bf --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-fencer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-fencer +parent=compute \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml b/server/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml new file mode 100644 index 00000000000..3312d5fb288 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-fencer/spring-server-fencer-context.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-investigator/module.properties b/server/resources/META-INF/cloudstack/server-investigator/module.properties new file mode 100644 index 00000000000..85e68824cf3 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-investigator/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-investigator +parent=compute \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml b/server/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml new file mode 100644 index 00000000000..24e6e33c456 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-investigator/spring-server-investigator-context.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-network/module.properties b/server/resources/META-INF/cloudstack/server-network/module.properties new file mode 100644 index 00000000000..95a7d1bacfb --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-network/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-network +parent=network \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml b/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml new file mode 100644 index 00000000000..553ae44eacb --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-network/spring-server-network-context.xml @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-planner/module.properties b/server/resources/META-INF/cloudstack/server-planner/module.properties new file mode 100644 index 00000000000..541b7693be0 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-planner/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-planner +parent=planner \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-planner/spring-server-planner-context.xml b/server/resources/META-INF/cloudstack/server-planner/spring-server-planner-context.xml new file mode 100644 index 00000000000..36f3ed02a3f --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-planner/spring-server-planner-context.xml @@ -0,0 +1,34 @@ + + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-storage/module.properties b/server/resources/META-INF/cloudstack/server-storage/module.properties new file mode 100644 index 00000000000..9eaf2d0a684 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-storage/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-storage +parent=storage \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-storage/spring-server-storage-context.xml b/server/resources/META-INF/cloudstack/server-storage/spring-server-storage-context.xml new file mode 100644 index 00000000000..8b90200a97e --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-storage/spring-server-storage-context.xml @@ -0,0 +1,34 @@ + + + + + + + diff --git a/server/resources/META-INF/cloudstack/server-template-adapter/module.properties b/server/resources/META-INF/cloudstack/server-template-adapter/module.properties new file mode 100644 index 00000000000..85ba3176b80 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-template-adapter/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=server-template-adapter +parent=storage \ No newline at end of file diff --git a/server/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml b/server/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml new file mode 100644 index 00000000000..1eeb27c7099 --- /dev/null +++ b/server/resources/META-INF/cloudstack/server-template-adapter/spring-server-template-adapter-context.xml @@ -0,0 +1,32 @@ + + + + + + diff --git a/server/resources/META-INF/cloudstack/system/spring-server-system-context.xml b/server/resources/META-INF/cloudstack/system/spring-server-system-context.xml new file mode 100644 index 00000000000..a14a0738413 --- /dev/null +++ b/server/resources/META-INF/cloudstack/system/spring-server-system-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + + \ No newline at end of file diff --git a/server/src/com/cloud/alert/AlertManagerImpl.java b/server/src/com/cloud/alert/AlertManagerImpl.java index 6f5d25a61c1..ed0c9fdcb46 100755 --- a/server/src/com/cloud/alert/AlertManagerImpl.java +++ b/server/src/com/cloud/alert/AlertManagerImpl.java @@ -48,6 +48,7 @@ import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; @@ -455,9 +456,9 @@ public class AlertManagerImpl extends ManagerBase implements AlertManager, Confi } - class CapacityChecker extends TimerTask { + class CapacityChecker extends ManagedContextTimerTask { @Override - public void run() { + protected void runInContext() { try { s_logger.debug("Running Capacity Checker ... "); checkForAlerts(); diff --git a/server/src/com/cloud/api/ApiAsyncJobDispatcher.java b/server/src/com/cloud/api/ApiAsyncJobDispatcher.java index 7092ef3779f..22ccb891e62 100644 --- a/server/src/com/cloud/api/ApiAsyncJobDispatcher.java +++ b/server/src/com/cloud/api/ApiAsyncJobDispatcher.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobDispatcher; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.managed.context.ManagedContext; import com.cloud.user.Account; import com.cloud.user.User; @@ -51,12 +52,23 @@ public class ApiAsyncJobDispatcher extends AdapterBase implements AsyncJobDispat @Inject private AsyncJobManager _asyncJobMgr; @Inject private EntityManager _entityMgr; + @Inject + ManagedContext _managedContext; public ApiAsyncJobDispatcher() { } - @Override - public void runJob(AsyncJob job) { + @Override + public void runJob(final AsyncJob job) { + _managedContext.runWithContext(new Runnable() { + @Override + public void run() { + runJobInContext(job); + } + }); + } + + protected void runJobInContext(AsyncJob job) { BaseAsyncCmd cmdObj = null; try { Class cmdClass = Class.forName(job.getCmd()); diff --git a/server/src/com/cloud/api/ApiDBUtils.java b/server/src/com/cloud/api/ApiDBUtils.java index 96db8b7de91..013aa5789cd 100755 --- a/server/src/com/cloud/api/ApiDBUtils.java +++ b/server/src/com/cloud/api/ApiDBUtils.java @@ -57,6 +57,7 @@ import org.apache.cloudstack.api.response.UserResponse; import org.apache.cloudstack.api.response.UserVmResponse; import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; @@ -132,6 +133,7 @@ import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.VlanDao; import com.cloud.domain.DomainVO; @@ -225,8 +227,9 @@ import com.cloud.region.ha.GlobalLoadBalancingRulesService; import com.cloud.resource.ResourceManager; import com.cloud.server.Criteria; import com.cloud.server.ManagementServer; +import com.cloud.server.ResourceMetaDataService; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.StatsCollector; import com.cloud.server.TaggedResourceService; import com.cloud.service.ServiceOfferingVO; @@ -407,11 +410,10 @@ public class ApiDBUtils { static AffinityGroupJoinDao _affinityGroupJoinDao; static GlobalLoadBalancingRulesService _gslbService; static NetworkACLDao _networkACLDao; - static ServiceOfferingDetailsDao _serviceOfferingDetailsDao; static AccountService _accountService; static AclRoleJoinDao _aclRoleJoinDao; static AclGroupJoinDao _aclGroupJoinDao; - + static ResourceMetaDataService _resourceDetailsService; @Inject private ManagementServer ms; @@ -525,11 +527,12 @@ public class ApiDBUtils { @Inject private ServiceOfferingDetailsDao serviceOfferingDetailsDao; @Inject private AccountService accountService; @Inject - private ConfigurationManager configMgr; - @Inject private AclRoleJoinDao aclRoleJoinDao; @Inject private AclGroupJoinDao aclGroupJoinDao; + @Inject private ConfigurationManager configMgr; + @Inject private DataCenterDetailsDao zoneDetailsDao; + @Inject private ResourceMetaDataService resourceDetailsService; @PostConstruct void init() { @@ -639,10 +642,10 @@ public class ApiDBUtils { // Note: stats collector should already have been initialized by this time, otherwise a null instance is returned _statsCollector = StatsCollector.getInstance(); _networkACLDao = networkACLDao; - _serviceOfferingDetailsDao = serviceOfferingDetailsDao; _accountService = accountService; _aclRoleJoinDao = aclRoleJoinDao; _aclGroupJoinDao = aclGroupJoinDao; + _resourceDetailsService = resourceDetailsService; } // /////////////////////////////////////////////////////////// @@ -899,7 +902,7 @@ public class ApiDBUtils { public static VMTemplateVO findTemplateById(Long templateId) { VMTemplateVO template = _templateDao.findByIdIncludingRemoved(templateId); if(template != null) { - Map details = _templateDetailsDao.findDetails(templateId); + Map details = _templateDetailsDao.listDetailsKeyPairs(templateId); if(details != null && !details.isEmpty()) { template.setDetails(details); } @@ -1166,11 +1169,11 @@ public class ApiDBUtils { return vmSnapshot; } - public static String getUuid(String resourceId, TaggedResourceType resourceType) { + public static String getUuid(String resourceId, ResourceObjectType resourceType) { return _taggedResourceService.getUuid(resourceId, resourceType); } - public static List listByResourceTypeAndId(TaggedResourceType type, long resourceId) { + public static List listByResourceTypeAndId(ResourceObjectType type, long resourceId) { return _taggedResourceService.listByResourceTypeAndId(type, resourceId); } public static List getAutoScalePolicyConditions(long policyId) @@ -1720,12 +1723,21 @@ public class ApiDBUtils { return providerDnsName; } - public static Map getServiceOfferingDetails(long serviceOfferingId) { - Map details = _serviceOfferingDetailsDao.findDetails(serviceOfferingId); + public static Map getResourceDetails(long resourceId, ResourceObjectType resourceType) { + Map details = null; + if (isAdmin(CallContext.current().getCallingAccount())) { + details = _resourceDetailsService.getDetailsMap(resourceId, resourceType, null); + } else { + details = _resourceDetailsService.getDetailsMap(resourceId, resourceType, true); + } return details.isEmpty() ? null : details; } public static boolean isAdmin(Account account) { return _accountService.isAdmin(account.getType()); } + + public static List listResourceTagViewByResourceUUID(String resourceUUID, ResourceObjectType resourceType){ + return _tagJoinDao.listBy(resourceUUID, resourceType); + } } diff --git a/server/src/com/cloud/api/ApiResponseHelper.java b/server/src/com/cloud/api/ApiResponseHelper.java index be574a0fd30..abb30152127 100755 --- a/server/src/com/cloud/api/ApiResponseHelper.java +++ b/server/src/com/cloud/api/ApiResponseHelper.java @@ -139,6 +139,8 @@ import org.apache.cloudstack.api.response.VpnUsersResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.network.lb.ApplicationLoadBalancerRule; @@ -262,8 +264,9 @@ import com.cloud.projects.ProjectInvitation; import com.cloud.region.ha.GlobalLoadBalancerRule; import com.cloud.server.Criteria; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.service.ServiceOfferingVO; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOS; import com.cloud.storage.GuestOSCategoryVO; @@ -288,6 +291,7 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; import com.cloud.vm.ConsoleProxyVO; @@ -319,6 +323,8 @@ public class ApiResponseHelper implements ResponseGenerator { protected AsyncJobManager _jobMgr; @Inject ConfigurationManager _configMgr; + @Inject + SnapshotDataFactory snapshotfactory; @Override public UserResponse createUserResponse(User user) { @@ -454,8 +460,21 @@ public class ApiResponseHelper implements ResponseGenerator { snapshotResponse.setIntervalType(ApiDBUtils.getSnapshotIntervalTypes(snapshot.getId())); snapshotResponse.setState(snapshot.getState()); + SnapshotInfo snapshotInfo = null; + if (!(snapshot instanceof SnapshotInfo)) { + snapshotInfo = snapshotfactory.getSnapshot(snapshot.getId(), DataStoreRole.Image); + } else { + snapshotInfo = (SnapshotInfo)snapshot; + } + + if (snapshotInfo == null) { + throw new CloudRuntimeException("Unable to find info for image store snapshot with uuid '"+snapshot.getUuid()+"'"); + } + + snapshotResponse.setRevertable(snapshotInfo.isRevertable()); + // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.Snapshot, snapshot.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Snapshot, snapshot.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -708,7 +727,7 @@ public class ApiResponseHelper implements ResponseGenerator { ipResponse.setPortable(ipAddr.isPortable()); //set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.PublicIpAddress, ipAddr.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.PublicIpAddress, ipAddr.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -748,7 +767,7 @@ public class ApiResponseHelper implements ResponseGenerator { } //set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.LoadBalancer, loadBalancer.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.LoadBalancer, loadBalancer.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -816,7 +835,7 @@ public class ApiResponseHelper implements ResponseGenerator { for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityType(capacity.getCapacityType()); - capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); + capacityResponse.setCapacityUsed(capacity.getUsedCapacity() + capacity.getReservedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, pod.getId(), null); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); @@ -853,7 +872,7 @@ public class ApiResponseHelper implements ResponseGenerator { for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityType(capacity.getCapacityType()); - capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); + capacityResponse.setCapacityUsed(capacity.getUsedCapacity() + capacity.getReservedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(zoneId, null, null); capacityResponse.setCapacityTotal(capacity.getTotalCapacity() - c.get(0).getTotalCapacity()); @@ -974,7 +993,7 @@ public class ApiResponseHelper implements ResponseGenerator { for (SummedCapacity capacity : capacities) { CapacityResponse capacityResponse = new CapacityResponse(); capacityResponse.setCapacityType(capacity.getCapacityType()); - capacityResponse.setCapacityUsed(capacity.getUsedCapacity()); + capacityResponse.setCapacityUsed(capacity.getUsedCapacity() + capacity.getReservedCapacity()); if (capacity.getCapacityType() == Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED) { List c = ApiDBUtils.findNonSharedStorageForClusterPodZone(null, null, cluster.getId()); @@ -1036,7 +1055,7 @@ public class ApiResponseHelper implements ResponseGenerator { } // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.PortForwardingRule, fwRule.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.PortForwardingRule, fwRule.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -2198,8 +2217,11 @@ public class ApiResponseHelper implements ResponseGenerator { String broadcastUri = network.getBroadcastUri().toString(); response.setBroadcastUri(broadcastUri); String vlan = "N/A"; - if (BroadcastDomainType.Vlan.scheme().equals(BroadcastDomainType.getSchemeValue(network.getBroadcastUri()))) { + switch (BroadcastDomainType.getSchemeValue(network.getBroadcastUri())){ + case Vlan: + case Vxlan: vlan = BroadcastDomainType.getValue(network.getBroadcastUri()); + break; } // return vlan information only to Root admin response.setVlan(vlan); @@ -2308,7 +2330,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setCanUseForDeploy(ApiDBUtils.canUseForDeploy(network)); // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.Network, network.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Network, network.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -2382,7 +2404,7 @@ public class ApiResponseHelper implements ResponseGenerator { response.setIcmpType(fwRule.getIcmpType()); // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.FirewallRule, fwRule.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.FirewallRule, fwRule.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -2432,7 +2454,7 @@ public class ApiResponseHelper implements ResponseGenerator { } //set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.NetworkACL, aclItem.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.NetworkACL, aclItem.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -2699,8 +2721,8 @@ public class ApiResponseHelper implements ResponseGenerator { @Override public VirtualRouterProviderResponse createVirtualRouterProviderResponse(VirtualRouterProvider result) { //generate only response of the VR/VPCVR provider type - if (!(result.getType() == VirtualRouterProvider.VirtualRouterProviderType.VirtualRouter - || result.getType() == VirtualRouterProvider.VirtualRouterProviderType.VPCVirtualRouter)) { + if (!(result.getType() == VirtualRouterProvider.Type.VirtualRouter + || result.getType() == VirtualRouterProvider.Type.VPCVirtualRouter)) { return null; } VirtualRouterProviderResponse response = new VirtualRouterProviderResponse(); @@ -2945,7 +2967,7 @@ public class ApiResponseHelper implements ResponseGenerator { populateOwner(response, vpc); // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.Vpc, vpc.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.Vpc, vpc.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -3136,7 +3158,7 @@ public class ApiResponseHelper implements ResponseGenerator { populateDomain(response, result.getDomainId()); // set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.StaticRoute, result.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.StaticRoute, result.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -3642,7 +3664,7 @@ public class ApiResponseHelper implements ResponseGenerator { lbResponse.setLbInstances(instanceResponses); //set tag information - List tags = ApiDBUtils.listByResourceTypeAndId(TaggedResourceType.LoadBalancer, lb.getId()); + List tags = ApiDBUtils.listByResourceTypeAndId(ResourceObjectType.LoadBalancer, lb.getId()); List tagResponses = new ArrayList(); for (ResourceTag tag : tags) { ResourceTagResponse tagResponse = createResourceTagResponse(tag, true); @@ -3769,7 +3791,7 @@ public class ApiResponseHelper implements ResponseGenerator { @Override public InternalLoadBalancerElementResponse createInternalLbElementResponse(VirtualRouterProvider result) { - if (result.getType() != VirtualRouterProvider.VirtualRouterProviderType.InternalLbVm) { + if (result.getType() != VirtualRouterProvider.Type.InternalLbVm) { return null; } InternalLoadBalancerElementResponse response = new InternalLoadBalancerElementResponse(); diff --git a/server/src/com/cloud/api/ApiServer.java b/server/src/com/cloud/api/ApiServer.java index 846cd530560..45a2cba344a 100755 --- a/server/src/com/cloud/api/ApiServer.java +++ b/server/src/com/cloud/api/ApiServer.java @@ -44,7 +44,6 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import javax.annotation.PostConstruct; import javax.crypto.Mac; import javax.crypto.spec.SecretKeySpec; import javax.inject.Inject; @@ -52,37 +51,6 @@ import javax.naming.ConfigurationException; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; -import org.apache.commons.codec.binary.Base64; -import org.apache.http.ConnectionClosedException; -import org.apache.http.HttpException; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.HttpServerConnection; -import org.apache.http.HttpStatus; -import org.apache.http.NameValuePair; -import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.entity.BasicHttpEntity; -import org.apache.http.impl.DefaultHttpResponseFactory; -import org.apache.http.impl.DefaultHttpServerConnection; -import org.apache.http.impl.NoConnectionReuseStrategy; -import org.apache.http.impl.SocketHttpServerConnection; -import org.apache.http.params.BasicHttpParams; -import org.apache.http.params.CoreConnectionPNames; -import org.apache.http.params.CoreProtocolPNames; -import org.apache.http.params.HttpParams; -import org.apache.http.protocol.BasicHttpContext; -import org.apache.http.protocol.BasicHttpProcessor; -import org.apache.http.protocol.HttpContext; -import org.apache.http.protocol.HttpRequestHandler; -import org.apache.http.protocol.HttpRequestHandlerRegistry; -import org.apache.http.protocol.HttpService; -import org.apache.http.protocol.ResponseConnControl; -import org.apache.http.protocol.ResponseContent; -import org.apache.http.protocol.ResponseDate; -import org.apache.http.protocol.ResponseServer; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.acl.APIChecker; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiErrorCode; @@ -120,6 +88,37 @@ import org.apache.cloudstack.framework.config.impl.ConfigurationVO; import org.apache.cloudstack.framework.jobs.AsyncJob; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.commons.codec.binary.Base64; +import org.apache.http.ConnectionClosedException; +import org.apache.http.HttpException; +import org.apache.http.HttpRequest; +import org.apache.http.HttpResponse; +import org.apache.http.HttpServerConnection; +import org.apache.http.HttpStatus; +import org.apache.http.NameValuePair; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.entity.BasicHttpEntity; +import org.apache.http.impl.DefaultHttpResponseFactory; +import org.apache.http.impl.DefaultHttpServerConnection; +import org.apache.http.impl.NoConnectionReuseStrategy; +import org.apache.http.impl.SocketHttpServerConnection; +import org.apache.http.params.BasicHttpParams; +import org.apache.http.params.CoreConnectionPNames; +import org.apache.http.params.CoreProtocolPNames; +import org.apache.http.params.HttpParams; +import org.apache.http.protocol.BasicHttpContext; +import org.apache.http.protocol.BasicHttpProcessor; +import org.apache.http.protocol.HttpContext; +import org.apache.http.protocol.HttpRequestHandler; +import org.apache.http.protocol.HttpRequestHandlerRegistry; +import org.apache.http.protocol.HttpService; +import org.apache.http.protocol.ResponseConnControl; +import org.apache.http.protocol.ResponseContent; +import org.apache.http.protocol.ResponseDate; +import org.apache.http.protocol.ResponseServer; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.response.ApiResponseSerializer; import com.cloud.configuration.Config; @@ -149,7 +148,7 @@ import com.cloud.utils.component.PluggableService; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExceptionProxyObject; @@ -170,8 +169,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer @Inject private EntityManager _entityMgr; - @Inject List _pluggableServices; - @Inject List _apiAccessCheckers; + List _pluggableServices; + List _apiAccessCheckers; @Inject protected ApiAsyncJobDispatcher _asyncDispatcher; @@ -184,18 +183,13 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer public ApiServer() { } - @PostConstruct - void initComponent() { - CallContext.init(_entityMgr); - } - @Override public boolean configure(String name, Map params) throws ConfigurationException { - init(); return true; } - public void init() { + @Override + public boolean start() { Integer apiPort = null; // api port, null by default SearchCriteria sc = _configDao.createSearchCriteria(); sc.addAnd("name", SearchCriteria.Op.EQ, Config.IntegrationAPIPort.key()); @@ -251,6 +245,8 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer ListenerThread listenerThread = new ListenerThread(this, apiPort); listenerThread.start(); } + + return true; } // NOTE: handle() only handles over the wire (OTW) requests from integration.api.port 8096 @@ -714,7 +710,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); User user = null; // verify there is a user with this api key @@ -1002,7 +998,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } } - static class WorkerTask implements Runnable { + static class WorkerTask extends ManagedContextRunnable { private final HttpService _httpService; private final HttpServerConnection _conn; @@ -1012,7 +1008,7 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } @Override - public void run() { + protected void runInContext() { HttpContext context = new BasicHttpContext(null); try { while (!Thread.interrupted() && _conn.isOpen()) { @@ -1122,4 +1118,22 @@ public class ApiServer extends ManagerBase implements HttpRequestHandler, ApiSer } return responseText; } + + public List getPluggableServices() { + return _pluggableServices; + } + + @Inject + public void setPluggableServices(List _pluggableServices) { + this._pluggableServices = _pluggableServices; + } + + public List getApiAccessCheckers() { + return _apiAccessCheckers; + } + + @Inject + public void setApiAccessCheckers(List _apiAccessCheckers) { + this._apiAccessCheckers = _apiAccessCheckers; + } } diff --git a/server/src/com/cloud/api/ApiServlet.java b/server/src/com/cloud/api/ApiServlet.java index 552327c83d1..def18d0919d 100755 --- a/server/src/com/cloud/api/ApiServlet.java +++ b/server/src/com/cloud/api/ApiServlet.java @@ -34,11 +34,11 @@ import javax.servlet.http.HttpSession; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import org.springframework.web.context.support.SpringBeanAutowiringSupport; - import org.apache.cloudstack.api.ApiErrorCode; import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.managed.context.ManagedContext; import com.cloud.exception.CloudAuthenticationException; import com.cloud.user.Account; @@ -57,6 +57,8 @@ public class ApiServlet extends HttpServlet { @Inject AccountService _accountMgr; @Inject EntityManager _entityMgr; + @Inject + ManagedContext _managedContext; public ApiServlet() { } @@ -105,8 +107,16 @@ public class ApiServlet extends HttpServlet { } } - @SuppressWarnings("unchecked") - private void processRequest(HttpServletRequest req, HttpServletResponse resp) { + private void processRequest(final HttpServletRequest req, final HttpServletResponse resp) { + _managedContext.runWithContext(new Runnable() { + @Override + public void run() { + processRequestInContext(req, resp); + } + }); + } + + private void processRequestInContext(HttpServletRequest req, HttpServletResponse resp) { StringBuffer auditTrailSb = new StringBuffer(); auditTrailSb.append(" " + req.getRemoteAddr()); auditTrailSb.append(" -- " + req.getMethod() + " "); diff --git a/server/src/com/cloud/api/query/QueryManagerImpl.java b/server/src/com/cloud/api/query/QueryManagerImpl.java index 80a2227b983..7ab22984f8e 100644 --- a/server/src/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/com/cloud/api/query/QueryManagerImpl.java @@ -19,7 +19,6 @@ package com.cloud.api.query; import java.util.ArrayList; import java.util.Date; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -44,6 +43,7 @@ import org.apache.cloudstack.affinity.AffinityGroupVMMapVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.api.BaseListProjectAndAccountResourcesCmd; +import org.apache.cloudstack.api.ResourceDetail; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.admin.host.ListHostsCmd; import org.apache.cloudstack.api.command.admin.internallb.ListInternalLBVMsCmd; @@ -145,6 +145,7 @@ import com.cloud.api.query.vo.UserAccountJoinVO; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; import com.cloud.dc.DedicatedResourceVO; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.domain.Domain; import com.cloud.domain.DomainVO; @@ -155,6 +156,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.ha.HighAvailabilityManager; import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.network.dao.NetworkDetailsDao; import com.cloud.network.security.SecurityGroupVMMapVO; import com.cloud.network.security.dao.SecurityGroupVMMapDao; import com.cloud.org.Grouping; @@ -167,7 +169,7 @@ import com.cloud.projects.dao.ProjectDao; import com.cloud.resource.ResourceManager; import com.cloud.server.ResourceMetaDataService; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.TaggedResourceService; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; @@ -178,9 +180,10 @@ import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.Volume; -import com.cloud.storage.VolumeDetailVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDetailsDao; +import com.cloud.tags.ResourceTagVO; +import com.cloud.tags.dao.ResourceTagDao; import com.cloud.template.VirtualMachineTemplate.TemplateFilter; import com.cloud.user.Account; import com.cloud.user.AccountManager; @@ -192,17 +195,18 @@ import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.Filter; +import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.vm.DomainRouterVO; -import com.cloud.vm.NicDetailVO; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.dao.DomainRouterDao; -import com.cloud.vm.dao.NicDetailDao; +import com.cloud.vm.dao.NicDetailsDao; import com.cloud.vm.dao.UserVmDao; +import com.cloud.vm.dao.UserVmDetailsDao; @Component @Local(value = { QueryService.class }) @@ -306,7 +310,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { private VolumeDetailsDao _volumeDetailDao; @Inject - private NicDetailDao _nicDetailDao; + private NicDetailsDao _nicDetailDao; + + @Inject + UserVmDetailsDao _userVmDetailDao; @Inject private HighAvailabilityManager _haMgr; @@ -334,6 +341,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { @Inject private DedicatedResourceDao _dedicatedDao; + @Inject + DataCenterDetailsDao _dcDetailsDao; + @Inject DomainManager _domainMgr; @@ -354,6 +364,11 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { @Inject AclService _aclService; + + @Inject NetworkDetailsDao _networkDetailsDao; + + @Inject + ResourceTagDao _resourceTagDao; /* * (non-Javadoc) @@ -769,9 +784,14 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { Object zoneId = cmd.getZoneId(); Object keyword = cmd.getKeyword(); boolean isAdmin = false; + boolean isRootAdmin = false; if (_accountMgr.isAdmin(caller.getType())) { isAdmin = true; } + if (_accountMgr.isRootAdmin(caller.getId())) { + isRootAdmin = true; + } + Object groupId = cmd.getGroupId(); Object networkId = cmd.getNetworkId(); if (HypervisorType.getType(hypervisor) == HypervisorType.None && hypervisor != null) { @@ -792,6 +812,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { storageId = adCmd.getStorageId(); } + sb.and("displayName", sb.entity().getDisplayName(), SearchCriteria.Op.LIKE); sb.and("id", sb.entity().getId(), SearchCriteria.Op.EQ); sb.and("name", sb.entity().getName(), SearchCriteria.Op.LIKE); @@ -827,6 +848,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } + if (!isRootAdmin) { + sb.and("displayVm", sb.entity().isDisplayVm(), SearchCriteria.Op.EQ); + } // populate the search criteria with the values passed in SearchCriteria sc = sb.create(); @@ -927,6 +951,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } } + if(!isRootAdmin){ + sc.setParameters("displayVm", 1); + } // search vm details by ids Pair, Integer> uniqueVmPair = _userVmJoinDao.searchAndCount(sc, searchFilter); Integer count = uniqueVmPair.second(); @@ -1142,6 +1169,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("instanceName", SearchCriteria.Op.LIKE, "%" + keyword + "%"); ssc.addOr("state", SearchCriteria.Op.LIKE, "%" + keyword + "%"); + ssc.addOr("networkName", SearchCriteria.Op.LIKE, "%" + keyword + "%"); sc.addAnd("instanceName", SearchCriteria.Op.SC, ssc); } @@ -1569,7 +1597,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } if (haHosts != null && haTag != null && !haTag.isEmpty()) { - sc.setJoinParameters("hostTagSearch", "tag", haTag); + sc.setParameters("tag", haTag); } // search host details by ids @@ -1612,6 +1640,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { String keyword = cmd.getKeyword(); String type = cmd.getType(); Map tags = cmd.getTags(); + boolean isRootAdmin = _accountMgr.isRootAdmin(caller.getType()); Long zoneId = cmd.getZoneId(); Long podId = null; @@ -1653,6 +1682,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // display UserVM volumes only sb.and().op("type", sb.entity().getVmType(), SearchCriteria.Op.NIN); sb.or("nulltype", sb.entity().getVmType(), SearchCriteria.Op.NULL); + if(!isRootAdmin){ + sb.and("displayVolume", sb.entity().isDisplayVolume(), SearchCriteria.Op.EQ); + } sb.cp(); @@ -1703,6 +1735,10 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { sc.setParameters("podId", podId); } + if(!isRootAdmin){ + sc.setParameters("displayVolume", 1); + } + // Don't return DomR and ConsoleProxy volumes sc.setParameters("type", VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm, VirtualMachine.Type.DomainRouter); @@ -2226,6 +2262,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { Object id = cmd.getId(); Object keyword = cmd.getKeyword(); Long domainId = cmd.getDomainId(); + Boolean isRootAdmin = _accountMgr.isRootAdmin(account.getType()); // Keeping this logic consistent with domain specific zones // if a domainId is provided, we just return the disk offering // associated with this domain @@ -2234,6 +2271,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { // check if the user's domain == do's domain || user's domain is // a child of so's domain for non-root users sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); + if(!isRootAdmin){ + sc.addAnd("displayOffering", SearchCriteria.Op.EQ, 1); + } return _diskOfferingJoinDao.searchAndCount(sc, searchFilter); } else { throw new PermissionDeniedException("The account:" + account.getAccountName() @@ -2266,6 +2306,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { spc.addOr("domainId", SearchCriteria.Op.NULL); // include public // offering as where sc.addAnd("domainId", SearchCriteria.Op.SC, spc); + sc.addAnd("displayOffering", SearchCriteria.Op.EQ, 1); sc.addAnd("systemUse", SearchCriteria.Op.EQ, false); // non-root // users should // not see @@ -2340,7 +2381,6 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { isAscending = (isAscending == null ? true : isAscending); Filter searchFilter = new Filter(ServiceOfferingJoinVO.class, "sortKey", isAscending, cmd.getStartIndex(), cmd.getPageSizeVal()); - SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); Account caller = CallContext.current().getCallingAccount(); Object name = cmd.getServiceOfferingName(); @@ -2351,6 +2391,7 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { Boolean isSystem = cmd.getIsSystem(); String vmTypeStr = cmd.getSystemVmType(); + SearchCriteria sc = _srvOfferingJoinDao.createSearchCriteria(); if (!_accountMgr.isRootAdmin(caller.getId()) && isSystem) { throw new InvalidParameterValueException("Only ROOT admins can access system's offering"); } @@ -2367,6 +2408,27 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } } + if (vmId != null) { + UserVmVO vmInstance = _userVmDao.findById(vmId); + if ((vmInstance == null) || (vmInstance.getRemoved() != null)) { + InvalidParameterValueException ex = new InvalidParameterValueException( + "unable to find a virtual machine with specified id"); + ex.addProxyObject(vmId.toString(), "vmId"); + throw ex; + } + + _accountMgr.checkAccess(caller, null, true, vmInstance); + + ServiceOfferingVO offering = _srvOfferingDao.findByIdIncludingRemoved(vmInstance.getServiceOfferingId()); + sc.addAnd("id", SearchCriteria.Op.NEQ, offering.getId()); + + // Only return offerings with the same Guest IP type and storage + // pool preference + // sc.addAnd("guestIpType", SearchCriteria.Op.EQ, + // offering.getGuestIpType()); + sc.addAnd("useLocalStorage", SearchCriteria.Op.EQ, offering.getUseLocalStorage()); + } + // boolean includePublicOfferings = false; if ((caller.getType() == Account.ACCOUNT_TYPE_NORMAL || caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN) || caller.getType() == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { @@ -2376,11 +2438,20 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } // find all domain Id up to root domain for this account List domainIds = new ArrayList(); - DomainVO domainRecord = _domainDao.findById(caller.getDomainId()); - if (domainRecord == null) { + DomainVO domainRecord; + if (vmId != null) { + UserVmVO vmInstance = _userVmDao.findById(vmId); + domainRecord = _domainDao.findById(vmInstance.getDomainId()); + if ( domainRecord == null ){ + s_logger.error("Could not find the domainId for vmId:" + vmId); + throw new CloudAuthenticationException("Could not find the domainId for vmId:" + vmId); + } + } else { + domainRecord = _domainDao.findById(caller.getDomainId()); + if ( domainRecord == null ){ s_logger.error("Could not find the domainId for account:" + caller.getAccountName()); - throw new CloudAuthenticationException("Could not find the domainId for account:" - + caller.getAccountName()); + throw new CloudAuthenticationException("Could not find the domainId for account:" + caller.getAccountName()); + } } domainIds.add(domainRecord.getId()); while (domainRecord.getParent() != null) { @@ -2410,25 +2481,6 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { ssc.addOr("name", SearchCriteria.Op.LIKE, "%" + keyword + "%"); sc.addAnd("name", SearchCriteria.Op.SC, ssc); - } else if (vmId != null) { - UserVmVO vmInstance = _userVmDao.findById(vmId); - if ((vmInstance == null) || (vmInstance.getRemoved() != null)) { - InvalidParameterValueException ex = new InvalidParameterValueException( - "unable to find a virtual machine with specified id"); - ex.addProxyObject(vmId.toString(), "vmId"); - throw ex; - } - - _accountMgr.checkAccess(caller, null, true, vmInstance); - - ServiceOfferingVO offering = _srvOfferingDao.findByIdIncludingRemoved(vmInstance.getServiceOfferingId()); - sc.addAnd("id", SearchCriteria.Op.NEQ, offering.getId()); - - // Only return offerings with the same Guest IP type and storage - // pool preference - // sc.addAnd("guestIpType", SearchCriteria.Op.EQ, - // offering.getGuestIpType()); - sc.addAnd("useLocalStorage", SearchCriteria.Op.EQ, offering.getUseLocalStorage()); } if (id != null) { @@ -2450,7 +2502,6 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } return _srvOfferingJoinDao.searchAndCount(sc, searchFilter); - } @Override @@ -2470,9 +2521,23 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { String keyword = cmd.getKeyword(); String name = cmd.getName(); String networkType = cmd.getNetworkType(); + Map resourceTags = cmd.getTags(); + + SearchBuilder sb = _dcJoinDao.createSearchBuilder(); + if (resourceTags != null && !resourceTags.isEmpty()) { + SearchBuilder tagSearch = _resourceTagDao.createSearchBuilder(); + for (int count=0; count < resourceTags.size(); count++) { + tagSearch.or().op("key" + String.valueOf(count), tagSearch.entity().getKey(), SearchCriteria.Op.EQ); + tagSearch.and("value" + String.valueOf(count), tagSearch.entity().getValue(), SearchCriteria.Op.EQ); + tagSearch.cp(); + } + tagSearch.and("resourceType", tagSearch.entity().getResourceType(), SearchCriteria.Op.EQ); + sb.groupBy(sb.entity().getId()); + sb.join("tagSearch", tagSearch, sb.entity().getId(), tagSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER); + } Filter searchFilter = new Filter(DataCenterJoinVO.class, null, false, cmd.getStartIndex(), cmd.getPageSizeVal()); - SearchCriteria sc = _dcJoinDao.createSearchCriteria(); + SearchCriteria sc = sb.create(); if (networkType != null) { sc.addAnd("networkType", SearchCriteria.Op.EQ, networkType); @@ -2622,6 +2687,16 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } } + if (resourceTags != null && !resourceTags.isEmpty()) { + int count = 0; + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.Zone.toString()); + for (String key : resourceTags.keySet()) { + sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); + sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), resourceTags.get(key)); + count++; + } + } + return _dcJoinDao.searchAndCount(sc, searchFilter); } @@ -2808,7 +2883,8 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } // get all child domain ID's - if (_accountMgr.isAdmin(account.getType())) { + if (_accountMgr.isAdmin(account.getType()) + || (templateFilter == TemplateFilter.featured || templateFilter == TemplateFilter.community)) { List allChildDomains = _domainDao.findAllChildren(accountDomain.getPath(), accountDomain.getId()); for (DomainVO childDomain : allChildDomains) { @@ -2870,9 +2946,9 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { scTag.addAnd("tagKey", SearchCriteria.Op.EQ, key); scTag.addAnd("tagValue", SearchCriteria.Op.EQ, tags.get(key)); if (isIso) { - scTag.addAnd("tagResourceType", SearchCriteria.Op.EQ, TaggedResourceType.ISO); + scTag.addAnd("tagResourceType", SearchCriteria.Op.EQ, ResourceObjectType.ISO); } else { - scTag.addAnd("tagResourceType", SearchCriteria.Op.EQ, TaggedResourceType.Template); + scTag.addAnd("tagResourceType", SearchCriteria.Op.EQ, ResourceObjectType.Template); } scc.addOr("tagKey", SearchCriteria.Op.SC, scTag); count++; @@ -3185,63 +3261,48 @@ public class QueryManagerImpl extends ManagerBase implements QueryService { } @Override - public List listResource(ListResourceDetailsCmd cmd) { - + public List listResourceDetails(ListResourceDetailsCmd cmd) { String key = cmd.getKey(); - ResourceTag.TaggedResourceType resourceType = cmd.getResourceType(); - String resourceId = cmd.getResourceId(); - Long id = _taggedResourceMgr.getResourceId(resourceId, resourceType); + Boolean forDisplay = cmd.forDisplay(); + ResourceTag.ResourceObjectType resourceType = cmd.getResourceType(); + String resourceIdStr = cmd.getResourceId(); + long resourceId = _taggedResourceMgr.getResourceId(resourceIdStr, resourceType); + List detailList = new ArrayList(); + ResourceDetail requestedDetail = null; - if (resourceType == ResourceTag.TaggedResourceType.Volume) { - - List volumeDetailList; if (key == null) { - volumeDetailList = _volumeDetailDao.findDetails(id); + detailList = _resourceMetaDataMgr.getDetailsList(resourceId, resourceType, forDisplay); } else { - VolumeDetailVO volumeDetail = _volumeDetailDao.findDetail(id, key); - volumeDetailList = new LinkedList(); - volumeDetailList.add(volumeDetail); + requestedDetail = _resourceMetaDataMgr.getDetail(resourceId, resourceType, key); + if (forDisplay != null && requestedDetail.isDisplay() != forDisplay) { + requestedDetail = null; + } } - List volumeDetailResponseList = new ArrayList(); - for (VolumeDetailVO volumeDetail : volumeDetailList) { - ResourceDetailResponse volumeDetailResponse = new ResourceDetailResponse(); - volumeDetailResponse.setResourceId(id.toString()); - volumeDetailResponse.setName(volumeDetail.getName()); - volumeDetailResponse.setValue(volumeDetail.getValue()); - volumeDetailResponse.setResourceType(ResourceTag.TaggedResourceType.Volume.toString()); - volumeDetailResponse.setObjectName("volumedetail"); - volumeDetailResponseList.add(volumeDetailResponse); - } - - return volumeDetailResponseList; - + List responseList = new ArrayList(); + if (requestedDetail != null) { + ResourceDetailResponse detailResponse = createResourceDetailsResponse(requestedDetail, resourceType); + responseList.add(detailResponse); } else { - - List nicDetailList; - if (key == null) { - nicDetailList = _nicDetailDao.findDetails(id); - } else { - NicDetailVO nicDetail = _nicDetailDao.findDetail(id, key); - nicDetailList = new LinkedList(); - nicDetailList.add(nicDetail); + for (ResourceDetail detail : detailList) { + ResourceDetailResponse detailResponse = createResourceDetailsResponse(detail, resourceType); + responseList.add(detailResponse); + } } - List nicDetailResponseList = new ArrayList(); - for (NicDetailVO nicDetail : nicDetailList) { - ResourceDetailResponse nicDetailResponse = new ResourceDetailResponse(); - // String uuid = ApiDBUtils.findN - nicDetailResponse.setName(nicDetail.getName()); - nicDetailResponse.setValue(nicDetail.getValue()); - nicDetailResponse.setResourceType(ResourceTag.TaggedResourceType.Nic.toString()); - nicDetailResponse.setObjectName("nicdetail"); - nicDetailResponseList.add(nicDetailResponse); - } - - return nicDetailResponseList; - + return responseList; } + + protected ResourceDetailResponse createResourceDetailsResponse(ResourceDetail requestedDetail, ResourceTag.ResourceObjectType resourceType) { + ResourceDetailResponse resourceDetailResponse = new ResourceDetailResponse(); + resourceDetailResponse.setResourceId(String.valueOf(requestedDetail.getResourceId())); + resourceDetailResponse.setName(requestedDetail.getName()); + resourceDetailResponse.setValue(requestedDetail.getValue()); + resourceDetailResponse.setForDisplay(requestedDetail.isDisplay()); + resourceDetailResponse.setResourceType(resourceType.toString().toString()); + resourceDetailResponse.setObjectName("resourcedetail"); + return resourceDetailResponse; } @Override diff --git a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java index 63bf563bec6..b28ace05b1f 100644 --- a/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DataCenterJoinDaoImpl.java @@ -21,24 +21,24 @@ import java.util.List; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.cloudstack.api.response.ResourceTagResponse; +import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.DataCenterJoinVO; +import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.dc.DataCenter; - -import org.apache.cloudstack.api.response.ZoneResponse; -import org.apache.cloudstack.context.CallContext; - +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import org.springframework.stereotype.Component; - @Component @Local(value={DataCenterJoinDao.class}) public class DataCenterJoinDaoImpl extends GenericDaoBase implements DataCenterJoinDao { @@ -101,6 +101,16 @@ public class DataCenterJoinDaoImpl extends GenericDaoBase resourceTags = ApiDBUtils.listResourceTagViewByResourceUUID(dataCenter.getUuid(), ResourceObjectType.Zone); + for (ResourceTagJoinVO resourceTag : resourceTags) { + ResourceTagResponse tagResponse = ApiDBUtils.newResourceTagResponse(resourceTag, false); + zoneResponse.addTag(tagResponse); + } + + zoneResponse.setResourceDetails(ApiDBUtils.getResourceDetails(dataCenter.getId(), ResourceObjectType.Zone)); + zoneResponse.setObjectName("zone"); return zoneResponse; } diff --git a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java index 385ca3625dc..e29bb2bc04a 100644 --- a/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/DiskOfferingJoinDaoImpl.java @@ -21,20 +21,17 @@ import java.util.List; import javax.ejb.Local; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + +import org.apache.cloudstack.api.response.DiskOfferingResponse; import com.cloud.api.query.vo.DiskOfferingJoinVO; -import org.apache.cloudstack.api.response.DiskOfferingResponse; import com.cloud.offering.DiskOffering; import com.cloud.offering.ServiceOffering; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.DiskOfferingVO.Type; import com.cloud.utils.db.Attribute; -import com.cloud.utils.db.Filter; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.SearchCriteria.Op; -import org.springframework.stereotype.Component; @Component @Local(value={DiskOfferingJoinDao.class}) @@ -42,7 +39,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase dofIdSearch; + private final SearchBuilder dofIdSearch; private final Attribute _typeAttr; protected DiskOfferingJoinDaoImpl() { @@ -53,7 +50,7 @@ public class DiskOfferingJoinDaoImpl extends GenericDaoBase searchIncludingRemoved(SearchCriteria sc, final Filter filter, final Boolean lock, final boolean cache) { - sc.addAnd(_typeAttr, Op.EQ, Type.Disk); - return super.searchIncludingRemoved(sc, filter, lock, cache); - } - - @Override - public List customSearchIncludingRemoved(SearchCriteria sc, final Filter filter) { - sc.addAnd(_typeAttr, Op.EQ, Type.Disk); - return super.customSearchIncludingRemoved(sc, filter); - } } diff --git a/server/src/com/cloud/api/query/dao/ResourceTagJoinDao.java b/server/src/com/cloud/api/query/dao/ResourceTagJoinDao.java index 57fc130de55..769bc9b1e6e 100644 --- a/server/src/com/cloud/api/query/dao/ResourceTagJoinDao.java +++ b/server/src/com/cloud/api/query/dao/ResourceTagJoinDao.java @@ -22,6 +22,7 @@ import org.apache.cloudstack.api.response.ResourceTagResponse; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.server.ResourceTag; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.utils.db.GenericDao; public interface ResourceTagJoinDao extends GenericDao { @@ -31,4 +32,6 @@ public interface ResourceTagJoinDao extends GenericDao ResourceTagJoinVO newResourceTagView(ResourceTag vr); List searchByIds(Long... ids); + + List listBy(String resourceUUID, ResourceObjectType resourceType); } diff --git a/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java index 06821db99cd..27a76af01c5 100644 --- a/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ResourceTagJoinDaoImpl.java @@ -24,16 +24,17 @@ import javax.inject.Inject; import org.apache.cloudstack.api.response.ResourceTagResponse; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.api.ApiResponseHelper; import com.cloud.api.query.vo.ResourceTagJoinVO; import com.cloud.server.ResourceTag; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.SearchCriteria.Op; @Component @Local(value={ResourceTagJoinDao.class}) @@ -46,6 +47,9 @@ public class ResourceTagJoinDaoImpl extends GenericDaoBase tagSearch; private final SearchBuilder tagIdSearch; + + private final SearchBuilder AllFieldsSearch; + protected ResourceTagJoinDaoImpl() { @@ -58,12 +62,14 @@ public class ResourceTagJoinDaoImpl extends GenericDaoBase listBy(String resourceUUID, ResourceObjectType resourceType) { + SearchCriteria sc = AllFieldsSearch.create(); + sc.setParameters("uuid", resourceUUID); + sc.setParameters("resourceType", resourceType); + return listBy(sc); + } @Override diff --git a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java index 945e67b406b..3bc6c78c812 100644 --- a/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/ServiceOfferingJoinDaoImpl.java @@ -17,22 +17,20 @@ package com.cloud.api.query.dao; import java.util.List; -import java.util.Map; import javax.ejb.Local; +import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.ServiceOfferingJoinVO; -import org.apache.cloudstack.api.response.ServiceOfferingResponse; - import com.cloud.offering.ServiceOffering; -import com.cloud.offering.NetworkOffering.Detail; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import org.springframework.stereotype.Component; @Component @Local(value={ServiceOfferingJoinDao.class}) @@ -48,7 +46,7 @@ public class ServiceOfferingJoinDaoImpl extends GenericDaoBase implem } } + // set resource details map + // only hypervisortoolsversion can be returned to the end user } + if (userVm.getDetailName() != null && userVm.getDetailName().equalsIgnoreCase(VmDetailConstants.HYPERVISOR_TOOLS_VERSION)){ + Map resourceDetails = new HashMap(); + resourceDetails.put(userVm.getDetailName(), userVm.getDetailValue()); + userVmResponse.setDetails(resourceDetails); + } + userVmResponse.setObjectName(objectName); if (userVm.isDynamicallyScalable() == null) { userVmResponse.setDynamicallyScalable(false); diff --git a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java index 5ca168c92b4..fdee69006c6 100644 --- a/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java +++ b/server/src/com/cloud/api/query/dao/VolumeJoinDaoImpl.java @@ -147,6 +147,10 @@ public class VolumeJoinDaoImpl extends GenericDaoBase implem } } + if (caller.getType() == Account.ACCOUNT_TYPE_ADMIN){ + volResponse.setPath(volume.getPath()); + } + // populate owner. ApiResponseHelper.populateOwner(volResponse, volume); @@ -183,6 +187,7 @@ public class VolumeJoinDaoImpl extends GenericDaoBase implem Long poolId = volume.getPoolId(); String poolName = (poolId == null) ? "none" : volume.getPoolName(); volResponse.setStoragePoolName(poolName); + volResponse.setStoragePoolId(volume.getPoolUuid()); } volResponse.setAttached(volume.getAttached()); diff --git a/server/src/com/cloud/api/query/vo/AccountJoinVO.java b/server/src/com/cloud/api/query/vo/AccountJoinVO.java index fbcc9342b22..2ec45d3f7e0 100644 --- a/server/src/com/cloud/api/query/vo/AccountJoinVO.java +++ b/server/src/com/cloud/api/query/vo/AccountJoinVO.java @@ -17,17 +17,20 @@ package com.cloud.api.query.vo; import java.util.Date; + import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.user.Account.State; -import com.cloud.utils.db.GenericDao; + import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.user.Account.State; +import com.cloud.utils.db.GenericDao; + @Entity @Table(name="account_view") public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Identity { @@ -45,7 +48,6 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="type") private short type; - @Column(name="state") @Enumerated(value=EnumType.STRING) private State state; @@ -59,7 +61,6 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="network_domain") private String networkDomain; - @Column(name="domain_id") private long domainId; @@ -72,7 +73,6 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="domain_path") private String domainPath = null; - @Column(name="data_center_id") private long dataCenterId; @@ -94,7 +94,6 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="vmTotal") private Long vmTotal; - @Column(name="ipLimit") private Long ipLimit; @@ -134,42 +133,36 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="projectTotal") private Long projectTotal; - @Column(name="networkLimit") private Long networkLimit; @Column(name="networkTotal") private Long networkTotal; - @Column(name="vpcLimit") private Long vpcLimit; @Column(name="vpcTotal") private Long vpcTotal; - @Column(name="cpuLimit") private Long cpuLimit; @Column(name="cpuTotal") private Long cpuTotal; - @Column(name="memoryLimit") private Long memoryLimit; @Column(name="memoryTotal") private Long memoryTotal; - @Column(name="primaryStorageLimit") private Long primaryStorageLimit; @Column(name="primaryStorageTotal") private Long primaryStorageTotal; - @Column(name="secondaryStorageLimit") private Long secondaryStorageLimit; @@ -184,504 +177,204 @@ public class AccountJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="job_status") private int jobStatus; - + @Column(name = "default") boolean isDefault; public AccountJoinVO() { } - @Override public long getId() { return id; } - - @Override - public void setId(long id) { - this.id = id; - } - - @Override public String getUuid() { return uuid; } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public String getAccountName() { return accountName; } - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - public short getType() { return type; } - - public void setType(short type) { - this.type = type; - } - - public State getState() { return state; } - - public void setState(State state) { - this.state = state; - } - - public Date getRemoved() { return removed; } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public boolean isNeedsCleanup() { return needsCleanup; } - - public void setNeedsCleanup(boolean needsCleanup) { - this.needsCleanup = needsCleanup; - } - - public String getNetworkDomain() { return networkDomain; } - - public void setNetworkDomain(String networkDomain) { - this.networkDomain = networkDomain; - } - - public long getDomainId() { return domainId; } - - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - public String getDomainUuid() { return domainUuid; } - - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - public String getDomainName() { return domainName; } - - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - public String getDomainPath() { return domainPath; } - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - public long getDataCenterId() { return dataCenterId; } - - public void setDataCenterId(long dataCenterId) { - this.dataCenterId = dataCenterId; - } - - public String getDataCenterUuid() { return dataCenterUuid; } - - public void setDataCenterUuid(String dataCenterUuid) { - this.dataCenterUuid = dataCenterUuid; - } - - public String getDataCenterName() { return dataCenterName; } - - public void setDataCenterName(String dataCenterName) { - this.dataCenterName = dataCenterName; - } - - public Long getBytesReceived() { return bytesReceived; } - - public void setBytesReceived(Long bytesReceived) { - this.bytesReceived = bytesReceived; - } - - public Long getBytesSent() { return bytesSent; } - - public void setBytesSent(Long bytesSent) { - this.bytesSent = bytesSent; - } - - - - public Long getVmTotal() { return vmTotal; } - - public void setVmTotal(Long vmTotal) { - this.vmTotal = vmTotal; - } - - - - - public Long getIpTotal() { return ipTotal; } - - public void setIpTotal(Long ipTotal) { - this.ipTotal = ipTotal; - } - - public Long getIpFree() { return ipFree; } - - public void setIpFree(Long ipFree) { - this.ipFree = ipFree; - } - - - public Long getVolumeTotal() { return volumeTotal; } - - public void setVolumeTotal(Long volumeTotal) { - this.volumeTotal = volumeTotal; - } - - - public Long getSnapshotTotal() { return snapshotTotal; } - - public void setSnapshotTotal(Long snapshotTotal) { - this.snapshotTotal = snapshotTotal; - } - - - - public Long getTemplateTotal() { return templateTotal; } - - public void setTemplateTotal(Long templateTotal) { - this.templateTotal = templateTotal; - } - - public Integer getVmStopped() { return vmStopped; } - - public void setVmStopped(Integer vmStopped) { - this.vmStopped = vmStopped; - } - - public Integer getVmRunning() { return vmRunning; } - - public void setVmRunning(Integer vmRunning) { - this.vmRunning = vmRunning; - } - - - public Long getProjectTotal() { return projectTotal; } - - public void setProjectTotal(Long projectTotal) { - this.projectTotal = projectTotal; - } - - - public Long getNetworkTotal() { return networkTotal; } - - public void setNetworkTotal(Long networkTotal) { - this.networkTotal = networkTotal; - } - - public Long getVpcTotal() { return vpcTotal; } - - public void setVpcTotal(Long vpcTotal) { - this.vpcTotal = vpcTotal; - } - - public Long getCpuTotal() { return cpuTotal; } - - public void setCpuTotal(Long cpuTotal) { - this.cpuTotal = cpuTotal; - } - - public Long getMemoryTotal() { return memoryTotal; } - - public void setMemoryTotal(Long memoryTotal) { - this.memoryTotal = memoryTotal; - } - - public Long getPrimaryStorageTotal() { return primaryStorageTotal; } - - public void setPrimaryStorageTotal(Long primaryStorageTotal) { - this.primaryStorageTotal = primaryStorageTotal; - } - public Long getSecondaryStorageTotal() { return secondaryStorageTotal; } - - public void setSecondaryStorageTotal(Long secondaryStorageTotal) { - this.secondaryStorageTotal = secondaryStorageTotal; - } - - public Long getVmLimit() { return vmLimit; } - - public void setVmLimit(Long vmLimit) { - this.vmLimit = vmLimit; - } - - public Long getIpLimit() { return ipLimit; } - - public void setIpLimit(Long ipLimit) { - this.ipLimit = ipLimit; - } - - public Long getVolumeLimit() { return volumeLimit; } - - public void setVolumeLimit(Long volumeLimit) { - this.volumeLimit = volumeLimit; - } - - public Long getSnapshotLimit() { return snapshotLimit; } - - public void setSnapshotLimit(Long snapshotLimit) { - this.snapshotLimit = snapshotLimit; - } - - public Long getTemplateLimit() { return templateLimit; } - - public void setTemplateLimit(Long templateLimit) { - this.templateLimit = templateLimit; - } - - public Long getProjectLimit() { return projectLimit; } - - public void setProjectLimit(Long projectLimit) { - this.projectLimit = projectLimit; - } - - public Long getNetworkLimit() { return networkLimit; } - - public void setNetworkLimit(Long networkLimit) { - this.networkLimit = networkLimit; - } - - public Long getVpcLimit() { return vpcLimit; } - - public void setVpcLimit(Long vpcLimit) { - this.vpcLimit = vpcLimit; - } - - public Long getCpuLimit() { return cpuLimit; } - - public void setCpuLimit(Long cpuLimit) { - this.cpuLimit = cpuLimit; - } - - public Long getMemoryLimit() { return memoryLimit; } - - public void setMemoryLimit(Long memoryLimit) { - this.memoryLimit = memoryLimit; - } - - public Long getPrimaryStorageLimit() { return primaryStorageLimit; } - - public void setPrimaryStorageLimit(Long primaryStorageLimit) { - this.primaryStorageLimit = primaryStorageLimit; - } - - public Long getSecondaryStorageLimit() { return secondaryStorageLimit; } - - public void setSecondaryStorageLimit(Long secondaryStorageLimit) { - this.secondaryStorageLimit = secondaryStorageLimit; - } - - public Long getJobId() { return jobId; } - - public void setJobId(Long jobId) { - this.jobId = jobId; - } - - public String getJobUuid() { return jobUuid; } - - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - - public int getJobStatus() { return jobStatus; } - - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - - public boolean isDefault() { return isDefault; } - - - public void setDefault(boolean isDefault) { - this.isDefault = isDefault; - } - } diff --git a/server/src/com/cloud/api/query/vo/AclGroupJoinVO.java b/server/src/com/cloud/api/query/vo/AclGroupJoinVO.java index 0a32f4d16ba..5d67e348c21 100644 --- a/server/src/com/cloud/api/query/vo/AclGroupJoinVO.java +++ b/server/src/com/cloud/api/query/vo/AclGroupJoinVO.java @@ -105,10 +105,6 @@ public class AclGroupJoinVO extends BaseViewVO { return id; } - @Override - public void setId(long id) { - this.id = id; - } public String getName() { return name; diff --git a/server/src/com/cloud/api/query/vo/AclRoleJoinVO.java b/server/src/com/cloud/api/query/vo/AclRoleJoinVO.java index 97809ef3537..a18f0342f03 100644 --- a/server/src/com/cloud/api/query/vo/AclRoleJoinVO.java +++ b/server/src/com/cloud/api/query/vo/AclRoleJoinVO.java @@ -73,10 +73,6 @@ public class AclRoleJoinVO extends BaseViewVO { return id; } - @Override - public void setId(long id) { - this.id = id; - } public String getName() { return name; diff --git a/server/src/com/cloud/api/query/vo/AffinityGroupJoinVO.java b/server/src/com/cloud/api/query/vo/AffinityGroupJoinVO.java index ae63a8a6627..3710957cbec 100644 --- a/server/src/com/cloud/api/query/vo/AffinityGroupJoinVO.java +++ b/server/src/com/cloud/api/query/vo/AffinityGroupJoinVO.java @@ -91,7 +91,6 @@ public class AffinityGroupJoinVO extends BaseViewVO implements ControlledViewEnt @Enumerated(value = EnumType.STRING) ControlledEntity.ACLType aclType; - public AffinityGroupJoinVO() { } @@ -100,157 +99,83 @@ public class AffinityGroupJoinVO extends BaseViewVO implements ControlledViewEnt return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public String getType() { return type; } - public void setType(String type) { - this.type = type; - } - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public String getDescription() { return description; } - public void setDescription(String description) { - this.description = description; - } - public long getVmId() { return vmId; } - public void setVmId(long vmId) { - this.vmId = vmId; - } - public String getVmUuid() { return vmUuid; } - public void setVmUuid(String vmUuid) { - this.vmUuid = vmUuid; - } - public String getVmName() { return vmName; } - public void setVmName(String vmName) { - this.vmName = vmName; - } - public String getVmDisplayName() { return vmDisplayName; } - public void setVmDisplayName(String vmDisplayName) { - this.vmDisplayName = vmDisplayName; - } - public VirtualMachine.State getVmState() { return vmState; } - public void setVmState(VirtualMachine.State vmState) { - this.vmState = vmState; - } - @Override public String getProjectUuid() { // TODO Auto-generated method stub @@ -267,9 +192,5 @@ public class AffinityGroupJoinVO extends BaseViewVO implements ControlledViewEnt return aclType; } - public void setAclType(ControlledEntity.ACLType aclType) { - this.aclType = aclType; - } - } diff --git a/server/src/com/cloud/api/query/vo/AsyncJobJoinVO.java b/server/src/com/cloud/api/query/vo/AsyncJobJoinVO.java index c45be1cc642..a30e253f4d5 100644 --- a/server/src/com/cloud/api/query/vo/AsyncJobJoinVO.java +++ b/server/src/com/cloud/api/query/vo/AsyncJobJoinVO.java @@ -17,6 +17,7 @@ package com.cloud.api.query.vo; import java.util.Date; + import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.EnumType; @@ -24,12 +25,12 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.utils.db.GenericDao; - import org.apache.cloudstack.api.ApiCommandJobType; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.utils.db.GenericDao; + @Entity @Table(name="async_job_view") public class AsyncJobJoinVO extends BaseViewVO implements InternalIdentity, Identity { @@ -53,7 +54,6 @@ public class AsyncJobJoinVO extends BaseViewVO implements InternalIdentity, Iden @Column(name="account_type") private short accountType; - @Column(name="domain_id") private long domainId; @@ -66,7 +66,6 @@ public class AsyncJobJoinVO extends BaseViewVO implements InternalIdentity, Iden @Column(name="domain_path") private String domainPath = null; - @Column(name="user_id") private long userId; @@ -104,231 +103,96 @@ public class AsyncJobJoinVO extends BaseViewVO implements InternalIdentity, Iden @Column(name="instance_uuid") private String instanceUuid; - public AsyncJobJoinVO() { } - @Override public long getId() { return id; } - - @Override - public void setId(long id) { - this.id = id; - } - - @Override public String getUuid() { return uuid; } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public long getAccountId() { return accountId; } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - public String getAccountUuid() { return accountUuid; } - - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - public String getAccountName() { return accountName; } - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - public short getAccountType() { return accountType; } - - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - public long getDomainId() { return domainId; } - - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - public String getDomainUuid() { return domainUuid; } - - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - public String getDomainName() { return domainName; } - - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - public String getDomainPath() { return domainPath; } - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - public long getUserId() { return userId; } - - public void setUserId(long userId) { - this.userId = userId; - } - - public String getUserUuid() { return userUuid; } - - public void setUserUuid(String userUuid) { - this.userUuid = userUuid; - } - - public String getCmd() { return cmd; } - - public void setCmd(String cmd) { - this.cmd = cmd; - } - - public int getStatus() { return status; } - - public void setStatus(int status) { - this.status = status; - } - - public int getProcessStatus() { return processStatus; } - - public void setProcessStatus(int processStatus) { - this.processStatus = processStatus; - } - - public int getResultCode() { return resultCode; } - - public void setResultCode(int resultCode) { - this.resultCode = resultCode; - } - - public String getResult() { return result; } - - public void setResult(String result) { - this.result = result; - } - - public Date getCreated() { return created; } - - public void setCreated(Date created) { - this.created = created; - } - - public Date getRemoved() { return removed; } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public ApiCommandJobType getInstanceType() { return instanceType; } - - public void setInstanceType(ApiCommandJobType instanceType) { - this.instanceType = instanceType; - } - - public Long getInstanceId() { return instanceId; } - - public void setInstanceId(Long instanceId) { - this.instanceId = instanceId; - } - - public String getInstanceUuid() { return instanceUuid; } - - - public void setInstanceUuid(String instanceUuid) { - this.instanceUuid = instanceUuid; - } - } diff --git a/server/src/com/cloud/api/query/vo/BaseViewVO.java b/server/src/com/cloud/api/query/vo/BaseViewVO.java index 6b1ddd6561a..923bfd95dcf 100644 --- a/server/src/com/cloud/api/query/vo/BaseViewVO.java +++ b/server/src/com/cloud/api/query/vo/BaseViewVO.java @@ -20,9 +20,6 @@ public abstract class BaseViewVO { public abstract long getId(); - public abstract void setId(long id); - - @Override public int hashCode() { final int prime = 31; diff --git a/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java b/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java index c6a80e7d5b2..70fc35db837 100644 --- a/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java +++ b/server/src/com/cloud/api/query/vo/DataCenterJoinVO.java @@ -25,13 +25,13 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + import com.cloud.dc.DataCenter.NetworkType; import com.cloud.org.Grouping.AllocationState; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.api.InternalIdentity; - @Entity @Table(name="data_center_view") public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Identity { @@ -117,7 +117,6 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id @Column(name = "account_id") private long accountId; - public DataCenterJoinVO() { } @@ -126,191 +125,94 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - - - public String getDescription() { return description; } - public void setDescription(String description) { - this.description = description; - } - public String getDns1() { return dns1; } - public void setDns1(String dns1) { - this.dns1 = dns1; - } - public String getDns2() { return dns2; } - public void setDns2(String dns2) { - this.dns2 = dns2; - } - public String getInternalDns1() { return internalDns1; } - public void setInternalDns1(String internalDns1) { - this.internalDns1 = internalDns1; - } - public String getInternalDns2() { return internalDns2; } - public void setInternalDns2(String internalDns2) { - this.internalDns2 = internalDns2; - } - public String getGuestNetworkCidr() { return guestNetworkCidr; } - public void setGuestNetworkCidr(String guestNetworkCidr) { - this.guestNetworkCidr = guestNetworkCidr; - } - public String getDomain() { return domain; } - public void setDomain(String domain) { - this.domain = domain; - } - public NetworkType getNetworkType() { return networkType; } - public void setNetworkType(NetworkType networkType) { - this.networkType = networkType; - } - public String getDhcpProvider() { return dhcpProvider; } - public void setDhcpProvider(String dhcpProvider) { - this.dhcpProvider = dhcpProvider; - } - public String getZoneToken() { return zoneToken; } - public void setZoneToken(String zoneToken) { - this.zoneToken = zoneToken; - } - public AllocationState getAllocationState() { return allocationState; } - public void setAllocationState(AllocationState allocationState) { - this.allocationState = allocationState; - } - public boolean isSecurityGroupEnabled() { return securityGroupEnabled; } - public void setSecurityGroupEnabled(boolean securityGroupEnabled) { - this.securityGroupEnabled = securityGroupEnabled; - } - - public boolean isLocalStorageEnabled() { return localStorageEnabled; } - public void setLocalStorageEnabled(boolean localStorageEnabled) { - this.localStorageEnabled = localStorageEnabled; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; + public String getIp6Dns1() { + return ip6Dns1; } - public String getIp6Dns1() { - return ip6Dns1; - } - - public void setIp6Dns1(String ip6Dns1) { - this.ip6Dns1 = ip6Dns1; - } - - public String getIp6Dns2() { - return ip6Dns2; - } - - public void setIp6Dns2(String ip6Dns2) { - this.ip6Dns2 = ip6Dns2; - } - + public String getIp6Dns2() { + return ip6Dns2; + } public String getAffinityGroupUuid() { return affinityGroupUuid; @@ -319,8 +221,4 @@ public class DataCenterJoinVO extends BaseViewVO implements InternalIdentity, Id public long getAccountId() { return accountId; } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } } diff --git a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java index 58e8370644c..9a679de1f25 100644 --- a/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java +++ b/server/src/com/cloud/api/query/vo/DiskOfferingJoinVO.java @@ -23,12 +23,12 @@ import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.storage.DiskOfferingVO.Type; -import com.cloud.utils.db.GenericDao; - import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.storage.DiskOfferingVO.Type; +import com.cloud.utils.db.GenericDao; + @Entity @Table(name="disk_offering_view") public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity, Identity { @@ -109,7 +109,6 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity, @Column(name="display_offering") boolean displayOffering; - public DiskOfferingJoinVO() { } @@ -118,202 +117,100 @@ public class DiskOfferingJoinVO extends BaseViewVO implements InternalIdentity, return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public String getDisplayText() { return displayText; } - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - public long getDiskSize() { return diskSize; } - public void setDiskSize(long diskSize) { - this.diskSize = diskSize; - } - public String getTags() { return tags; } - public void setTags(String tags) { - this.tags = tags; - } - public boolean isUseLocalStorage() { return useLocalStorage; } - public void setUseLocalStorage(boolean useLocalStorage) { - this.useLocalStorage = useLocalStorage; - } - public boolean isSystemUse() { return systemUse; } - public void setSystemUse(boolean systemUse) { - this.systemUse = systemUse; - } - public boolean isCustomized() { return customized; } - public void setCustomized(boolean customized) { - this.customized = customized; - } - public Boolean isCustomizedIops() { return customizedIops; } - public void setCustomizedIops(Boolean customizedIops) { - this.customizedIops = customizedIops; - } - public Long getMinIops() { return minIops; } - public void setMinIops(Long minIops) { - this.minIops = minIops; - } - public Long getMaxIops() { return maxIops; } - public void setMaxIops(Long maxIops) { - this.maxIops = maxIops; - } - public boolean isDisplayOffering() { return displayOffering; } - public void setDisplayOffering(boolean displayOffering) { - this.displayOffering = displayOffering; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public int getSortKey() { return sortKey; } - public void setSortKey(int sortKey) { - this.sortKey = sortKey; - } - public Type getType() { return type; } - public void setType(Type type) { - this.type = type; - } - - public void setBytesReadRate(Long bytesReadRate) { - this.bytesReadRate = bytesReadRate; - } - public Long getBytesReadRate() { return bytesReadRate; } - public void setBytesWriteRate(Long bytesWriteRate) { - this.bytesWriteRate = bytesWriteRate; - } - public Long getBytesWriteRate() { return bytesWriteRate; } - public void setIopsReadRate(Long iopsReadRate) { - this.iopsReadRate = iopsReadRate; - } - public Long getIopsReadRate() { return iopsReadRate; } - public void setIopsWriteRate(Long iopsWriteRate) { - this.iopsWriteRate = iopsWriteRate; - } - public Long getIopsWriteRate() { return iopsWriteRate; } - } diff --git a/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java b/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java index bfe44869b2a..33865e258b9 100644 --- a/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java +++ b/server/src/com/cloud/api/query/vo/DomainRouterJoinVO.java @@ -44,7 +44,6 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name="name", updatable=false, nullable=false, length=255) private String name = null; - @Column(name="account_id") private long accountId; @@ -138,7 +137,6 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name="service_offering_name") private String serviceOfferingName; - @Column(name = "vpc_id") private long vpcId; @@ -197,7 +195,6 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Enumerated(value=EnumType.STRING) private TrafficType trafficType; - @Column(name="project_id") private long projectId; @@ -216,7 +213,6 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name="job_status") private int jobStatus; - @Column(name="uuid") private String uuid; @@ -236,772 +232,275 @@ public class DomainRouterJoinVO extends BaseViewVO implements ControlledViewEnti @Column(name="guest_type") @Enumerated(value=EnumType.STRING) private GuestType guestType; - + @Column(name="role") @Enumerated(value=EnumType.STRING) private VirtualRouter.Role role; - public DomainRouterJoinVO() { } - @Override public long getId() { return id; } - - @Override - public void setId(long id) { - this.id = id; - - } - - - - @Override public String getUuid() { return uuid; } - - - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - - public String getName() { return name; } - - public void setName(String name) { - this.name = name; - } - - - - @Override public long getAccountId() { return accountId; } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - @Override public String getAccountUuid() { return accountUuid; } - - - - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - - - @Override public String getAccountName() { return accountName; } - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - @Override public short getAccountType() { return accountType; } - - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - @Override public long getDomainId() { return domainId; } - - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - - - - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - - - @Override public String getDomainName() { return domainName; } - - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - public State getState() { return state; } - - public void setState(State state) { - this.state = state; - } - - public Date getCreated() { return created; } - - public void setCreated(Date created) { - this.created = created; - } - - public Date getRemoved() { return removed; } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public String getInstanceName() { return instanceName; } - - public void setInstanceName(String instanceName) { - this.instanceName = instanceName; - } - - public String getPodUuid() { return podUuid; } - - - - public void setPodUuid(String podUuid) { - this.podUuid = podUuid; - } - - public String getDataCenterUuid() { return dataCenterUuid; } - public void setDataCenterUuid(String zoneUuid) { - this.dataCenterUuid = zoneUuid; - } - public String getDataCenterName() { return dataCenterName; } - - public void setDataCenterName(String zoneName) { - this.dataCenterName = zoneName; - } - - public Long getHostId() { return hostId; } - - public void setHostId(long hostId) { - this.hostId = hostId; - } - - public String getHostUuid() { return hostUuid; } - - - - public void setHostUuid(String hostUuid) { - this.hostUuid = hostUuid; - } - - - - public String getHostName() { return hostName; } - - public void setHostName(String hostName) { - this.hostName = hostName; - } - - public long getTemplateId() { return templateId; } - - public void setTemplateId(long templateId) { - this.templateId = templateId; - } - - - public String getTemplateUuid() { return templateUuid; } - - - - public void setTemplateUuid(String templateUuid) { - this.templateUuid = templateUuid; - } - - - - - public String getServiceOfferingUuid() { return serviceOfferingUuid; } - - public void setServiceOfferingUuid(String serviceOfferingUuid) { - this.serviceOfferingUuid = serviceOfferingUuid; - } - - - - public String getServiceOfferingName() { return serviceOfferingName; } - - public void setServiceOfferingName(String serviceOfferingName) { - this.serviceOfferingName = serviceOfferingName; - } - public long getVpcId() { return vpcId; } - public void setVpcId(long vpcId) { - this.vpcId = vpcId; - } - - - - public long getNicId() { return nicId; } - - public void setNicId(long nicId) { - this.nicId = nicId; - } - - public boolean isDefaultNic() { return isDefaultNic; } - - public void setDefaultNic(boolean isDefaultNic) { - this.isDefaultNic = isDefaultNic; - } - - public String getIpAddress() { return ipAddress; } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public String getGateway() { return gateway; } - - public void setGateway(String gateway) { - this.gateway = gateway; - } - - public String getNetmask() { return netmask; } - - public void setNetmask(String netmask) { - this.netmask = netmask; - } - - public String getMacAddress() { return macAddress; } - - public void setMacAddress(String macAddress) { - this.macAddress = macAddress; - } - - public URI getBroadcastUri() { return broadcastUri; } - - public void setBroadcastUri(URI broadcastUri) { - this.broadcastUri = broadcastUri; - } - - public URI getIsolationUri() { return isolationUri; } - - public void setIsolationUri(URI isolationUri) { - this.isolationUri = isolationUri; - } - - public long getNetworkId() { return networkId; } - - public void setNetworkId(long networkId) { - this.networkId = networkId; - } - - public String getNetworkName() { return networkName; } - - - - public void setNetworkName(String networkName) { - this.networkName = networkName; - } - - - - public String getNetworkDomain() { return networkDomain; } - - - - public void setNetworkDomain(String networkDomain) { - this.networkDomain = networkDomain; - } - - - - public TrafficType getTrafficType() { return trafficType; } - - public void setTrafficType(TrafficType trafficType) { - this.trafficType = trafficType; - } - - - public long getServiceOfferingId() { return serviceOfferingId; } - - - - public void setServiceOfferingId(long serviceOfferingId) { - this.serviceOfferingId = serviceOfferingId; - } - - - - public long getProjectId() { return projectId; } - - - - public void setProjectId(long projectId) { - this.projectId = projectId; - } - - - - @Override public String getProjectUuid() { return projectUuid; } - - - - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - - - - @Override public String getProjectName() { return projectName; } - - - - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - - - - public String getVpcUuid() { return vpcUuid; } - - - - public void setVpcUuid(String vpcUuid) { - this.vpcUuid = vpcUuid; - } - - - - public String getNicUuid() { return nicUuid; } - - - - public void setNicUuid(String nicUuid) { - this.nicUuid = nicUuid; - } - - - - public String getNetworkUuid() { return networkUuid; } - - - - public void setNetworkUuid(String networkUuid) { - this.networkUuid = networkUuid; - } - - public Long getJobId() { return jobId; } - - - - public void setJobId(Long jobId) { - this.jobId = jobId; - } - - - - public String getJobUuid() { return jobUuid; } - - - - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - - - - public int getJobStatus() { return jobStatus; } - - - - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - - - public Long getPodId() { return podId; } - - - - public void setPodId(Long podId) { - this.podId = podId; - } - - - - public long getDataCenterId() { return dataCenterId; } - - - - public void setDataCenterId(long zoneId) { - this.dataCenterId = zoneId; - } - - - - public String getDns1() { return dns1; } - - - - public void setDns1(String dns1) { - this.dns1 = dns1; - } - - - - public String getDns2() { return dns2; } - - - - public void setDns2(String dns2) { - this.dns2 = dns2; - } - - - - public String getTemplateVersion() { return templateVersion; } - - - - public void setTemplateVersion(String templateVersion) { - this.templateVersion = templateVersion; - } - - - - public String getScriptsVersion() { return scriptsVersion; } - - - - public void setScriptsVersion(String scriptsVersion) { - this.scriptsVersion = scriptsVersion; - } - - - - public RedundantState getRedundantState() { return redundantState; } - - - - public void setRedundantState(RedundantState redundantState) { - this.redundantState = redundantState; - } - - - - public boolean isRedundantRouter() { return isRedundantRouter; } - - - - public void setRedundantRouter(boolean isRedundantRouter) { - this.isRedundantRouter = isRedundantRouter; - } - - - - public GuestType getGuestType() { return guestType; } - - - - public void setGuestType(GuestType guestType) { - this.guestType = guestType; + public String getIp6Address() { + return ip6Address; } + public String getIp6Gateway() { + return ip6Gateway; + } + public String getIp6Cidr() { + return ip6Cidr; + } + public String getIp6Dns1() { + return ip6Dns1; + } - public String getIp6Address() { - return ip6Address; - } - - - - - public void setIp6Address(String ip6Address) { - this.ip6Address = ip6Address; - } - - - - - public String getIp6Gateway() { - return ip6Gateway; - } - - - - - public void setIp6Gateway(String ip6Gateway) { - this.ip6Gateway = ip6Gateway; - } - - - - - public String getIp6Cidr() { - return ip6Cidr; - } - - - - - public void setIp6Cidr(String ip6Cidr) { - this.ip6Cidr = ip6Cidr; - } - - - public String getIp6Dns1() { - return ip6Dns1; - } - - public void setIp6Dns1(String ip6Dns1) { - this.ip6Dns1 = ip6Dns1; - } - - public String getIp6Dns2() { - return ip6Dns2; - } - - public void setIp6Dns2(String ip6Dns2) { - this.ip6Dns2 = ip6Dns2; - } - + public String getIp6Dns2() { + return ip6Dns2; + } public VirtualRouter.Role getRole() { return role; } - - - public void setRole(VirtualRouter.Role role) { - this.role = role; - } } diff --git a/server/src/com/cloud/api/query/vo/EventJoinVO.java b/server/src/com/cloud/api/query/vo/EventJoinVO.java index 12d7e5ae4d0..87d20b9ea04 100644 --- a/server/src/com/cloud/api/query/vo/EventJoinVO.java +++ b/server/src/com/cloud/api/query/vo/EventJoinVO.java @@ -25,8 +25,8 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.utils.db.GenericDao; import com.cloud.event.Event.State; +import com.cloud.utils.db.GenericDao; @Entity @Table(name="event_view") @@ -70,7 +70,6 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="parameters", length=1024) private String parameters; - @Column(name="account_id") private long accountId; @@ -107,7 +106,6 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="archived") private boolean archived; - public EventJoinVO() { } @@ -116,211 +114,106 @@ public class EventJoinVO extends BaseViewVO implements ControlledViewEntity { return id; } - @Override - public void setId(long id) { - this.id = id; - - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - @Override public String getProjectUuid() { return projectUuid; } - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - @Override public String getProjectName() { return projectName; } - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - - - public String getType() { return type; } - public void setType(String type) { - this.type = type; - } - public State getState() { return state; } - public void setState(State state) { - this.state = state; - } - public String getDescription() { return description; } - public void setDescription(String description) { - this.description = description; - } - public Date getCreateDate() { return createDate; } - public void setCreateDate(Date createDate) { - this.createDate = createDate; - } - public long getUserId() { return userId; } - public void setUserId(long userId) { - this.userId = userId; - } - public String getUserName() { return userName; } - public void setUserName(String userName) { - this.userName = userName; - } - public String getLevel() { return level; } - public void setLevel(String level) { - this.level = level; - } - public long getStartId() { return startId; } - public void setStartId(long startId) { - this.startId = startId; - } - - public String getStartUuid() { return startUuid; } - public void setStartUuid(String startUuid) { - this.startUuid = startUuid; - } - public String getParameters() { return parameters; } - public void setParameters(String parameters) { - this.parameters = parameters; - } - public boolean getArchived() { return archived; } - - public void setArchived(Boolean archived) { - this.archived = archived; - } - } diff --git a/server/src/com/cloud/api/query/vo/HostJoinVO.java b/server/src/com/cloud/api/query/vo/HostJoinVO.java index cf3cfdc4486..fbc59cd5884 100644 --- a/server/src/com/cloud/api/query/vo/HostJoinVO.java +++ b/server/src/com/cloud/api/query/vo/HostJoinVO.java @@ -27,16 +27,16 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; -import com.cloud.host.Status; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + import com.cloud.host.Host.Type; +import com.cloud.host.Status; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Cluster; import com.cloud.resource.ResourceState; import com.cloud.utils.db.GenericDao; -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.api.InternalIdentity; - /** * Host DB view. * @@ -55,7 +55,6 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity @Column(name="name") private String name; - @Column(name="status") private Status status = null; @@ -139,7 +138,6 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity @Column(name="pod_name") private String podName; - @Column(name="guest_os_category_id") private long osCategoryId; @@ -173,336 +171,165 @@ public class HostJoinVO extends BaseViewVO implements InternalIdentity, Identity @Column(name="job_status") private int jobStatus; - - /* (non-Javadoc) - * @see com.cloud.api.query.vo.BaseViewVO#getId() - */ @Override public long getId() { return this.id; } - /* (non-Javadoc) - * @see com.cloud.api.query.vo.BaseViewVO#setId(long) - */ - @Override - public void setId(long id) { - this.id = id; - } - - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getVersion() { return version; } - public void setVersion(String version) { - this.version = version; - } - public long getZoneId() { return zoneId; } - public void setZoneId(long zoneId) { - this.zoneId = zoneId; - } - public String getZoneUuid() { return zoneUuid; } - public void setZoneUuid(String zoneUuid) { - this.zoneUuid = zoneUuid; - } - public String getZoneName() { return zoneName; } - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public Status getStatus() { return status; } - public void setStatus(Status status) { - this.status = status; - } - public Type getType() { return type; } - public void setType(Type type) { - this.type = type; - } - public String getPrivateIpAddress() { return privateIpAddress; } - public void setPrivateIpAddress(String privateIpAddress) { - this.privateIpAddress = privateIpAddress; - } - public Date getDisconnectedOn() { return disconnectedOn; } - public void setDisconnectedOn(Date disconnectedOn) { - this.disconnectedOn = disconnectedOn; - } - public HypervisorType getHypervisorType() { return hypervisorType; } - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } - public String getHypervisorVersion() { return hypervisorVersion; } - public void setHypervisorVersion(String hypervisorVersion) { - this.hypervisorVersion = hypervisorVersion; - } - public String getCapabilities() { return caps; } - public void setCapabilities(String caps) { - this.caps = caps; - } - public long getLastPinged() { return lastPinged; } - public void setLastPinged(long lastPinged) { - this.lastPinged = lastPinged; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public ResourceState getResourceState() { return resourceState; } - public void setResourceState(ResourceState resourceState) { - this.resourceState = resourceState; - } - public Long getManagementServerId() { return managementServerId; } - public void setManagementServerId(Long managementServerId) { - this.managementServerId = managementServerId; - } - public Integer getCpus() { return cpus; } - public void setCpus(Integer cpus) { - this.cpus = cpus; - } - public Long getSpeed() { return speed; } - public void setSpeed(Long speed) { - this.speed = speed; - } - public long getTotalMemory() { return totalMemory; } - public void setTotalMemory(long totalMemory) { - this.totalMemory = totalMemory; - } - public long getClusterId() { return clusterId; } - public void setClusterId(long clusterId) { - this.clusterId = clusterId; - } - public String getClusterUuid() { return clusterUuid; } - public void setClusterUuid(String clusterUuid) { - this.clusterUuid = clusterUuid; - } - public String getClusterName() { return clusterName; } - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - public Cluster.ClusterType getClusterType() { return clusterType; } - public void setClusterType(Cluster.ClusterType clusterType) { - this.clusterType = clusterType; - } - public long getOsCategoryId() { return osCategoryId; } - public void setOsCategoryId(long osCategoryId) { - this.osCategoryId = osCategoryId; - } - public String getOsCategoryUuid() { return osCategoryUuid; } - public void setOsCategoryUuid(String osCategoryUuid) { - this.osCategoryUuid = osCategoryUuid; - } - public String getOsCategoryName() { return osCategoryName; } - public void setOsCategoryName(String osCategoryName) { - this.osCategoryName = osCategoryName; - } - public Long getJobId() { return jobId; } - public void setJobId(Long jobId) { - this.jobId = jobId; - } - public String getJobUuid() { return jobUuid; } - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - public int getJobStatus() { return jobStatus; } - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - public long getPodId() { return podId; } - public void setPodId(long podId) { - this.podId = podId; - } - public String getPodUuid() { return podUuid; } - public void setPodUuid(String podUuid) { - this.podUuid = podUuid; - } - public String getPodName() { return podName; } - public void setPodName(String podName) { - this.podName = podName; - } - public long getMemUsedCapacity() { return memUsedCapacity; } - public void setMemUsedCapacity(long memUsedCapacity) { - this.memUsedCapacity = memUsedCapacity; - } - public long getMemReservedCapacity() { return memReservedCapacity; } - public void setMemReservedCapacity(long memReservedCapacity) { - this.memReservedCapacity = memReservedCapacity; - } - public long getCpuUsedCapacity() { return cpuUsedCapacity; } - public void setCpuUsedCapacity(long cpuUsedCapacity) { - this.cpuUsedCapacity = cpuUsedCapacity; - } - public long getCpuReservedCapacity() { return cpuReservedCapacity; } - public void setCpuReservedCapacity(long cpuReservedCapacity) { - this.cpuReservedCapacity = cpuReservedCapacity; - } - public String getTag() { return tag; } - - public void setTag(String tag) { - this.tag = tag; - } - - } diff --git a/server/src/com/cloud/api/query/vo/ImageStoreJoinVO.java b/server/src/com/cloud/api/query/vo/ImageStoreJoinVO.java index ac161afe47d..5003ea7290e 100644 --- a/server/src/com/cloud/api/query/vo/ImageStoreJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ImageStoreJoinVO.java @@ -25,14 +25,13 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ImageStore; -import com.cloud.storage.ScopeType; -import com.cloud.utils.db.GenericDao; - import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ScopeType; +import com.cloud.utils.db.GenericDao; + /** * Image Data Store DB view. * @@ -91,117 +90,56 @@ public class ImageStoreJoinVO extends BaseViewVO implements InternalIdentity, Id return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - - public long getZoneId() { return zoneId; } - public void setZoneId(long zoneId) { - this.zoneId = zoneId; - } - public String getZoneUuid() { return zoneUuid; } - public void setZoneUuid(String zoneUuid) { - this.zoneUuid = zoneUuid; - } - public String getZoneName() { return zoneName; } - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - public String getUrl() { return url; } - public void setUrl(String url) { - this.url = url; - } - public String getProtocol() { return protocol; } - public void setProtocol(String protocol) { - this.protocol = protocol; - } - public String getProviderName() { return providerName; } - public void setProviderName(String providerName) { - this.providerName = providerName; - } - public ScopeType getScope() { return scope; } - public void setScope(ScopeType scope) { - this.scope = scope; - } - - public void setName(String name) { - this.name = name; - } - public String getDetailName() { return detailName; } - public void setDetailName(String detailName) { - this.detailName = detailName; - } - public String getDetailValue() { return detailValue; } - public void setDetailValue(String detailValue) { - this.detailValue = detailValue; - } - public DataStoreRole getRole() { return role; } - public void setRole(DataStoreRole role) { - this.role = role; - } - public Date getRemoved() { return removed; } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - } diff --git a/server/src/com/cloud/api/query/vo/InstanceGroupJoinVO.java b/server/src/com/cloud/api/query/vo/InstanceGroupJoinVO.java index 3fb43094ad3..f4ce8d64e8d 100644 --- a/server/src/com/cloud/api/query/vo/InstanceGroupJoinVO.java +++ b/server/src/com/cloud/api/query/vo/InstanceGroupJoinVO.java @@ -39,14 +39,12 @@ public class InstanceGroupJoinVO extends BaseViewVO implements ControlledViewEnt @Column(name="name") String name; - @Column(name=GenericDao.REMOVED_COLUMN) private Date removed; @Column(name=GenericDao.CREATED_COLUMN) private Date created; - @Column(name="account_id") private long accountId; @@ -80,8 +78,6 @@ public class InstanceGroupJoinVO extends BaseViewVO implements ControlledViewEnt @Column(name="project_name") private String projectName; - - public InstanceGroupJoinVO() { } @@ -90,144 +86,74 @@ public class InstanceGroupJoinVO extends BaseViewVO implements ControlledViewEnt return id; } - - @Override - public void setId(long id) { - this.id = id; - - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - @Override public String getProjectUuid() { return projectUuid; } - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - @Override public String getProjectName() { return projectName; } - public void setProjectName(String projectName) { - this.projectName = projectName; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public Date getCreated() { return created; } - - public void setCreated(Date created) { - this.created = created; - } - - } diff --git a/server/src/com/cloud/api/query/vo/ProjectAccountJoinVO.java b/server/src/com/cloud/api/query/vo/ProjectAccountJoinVO.java index 1a8818a22e4..91ff561b2a6 100644 --- a/server/src/com/cloud/api/query/vo/ProjectAccountJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ProjectAccountJoinVO.java @@ -23,9 +23,10 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.projects.ProjectAccount.Role; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.projects.ProjectAccount.Role; + @Entity @Table(name="project_account_view") public class ProjectAccountJoinVO extends BaseViewVO implements InternalIdentity { @@ -71,7 +72,6 @@ public class ProjectAccountJoinVO extends BaseViewVO implements InternalIdentity @Column(name="project_name") private String projectName; - public ProjectAccountJoinVO() { } @@ -80,137 +80,51 @@ public class ProjectAccountJoinVO extends BaseViewVO implements InternalIdentity return id; } - - @Override - public void setId(long id) { - this.id = id; - - } - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - - public String getAccountUuid() { return accountUuid; } - - - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - - public String getAccountName() { return accountName; } - - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - - public short getAccountType() { return accountType; } - - - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - - public Role getAccountRole() { return accountRole; } - - - public void setAccountRole(Role accountRole) { - this.accountRole = accountRole; - } - - - public long getProjectId() { return projectId; } - - - public void setProjectId(long projectId) { - this.projectId = projectId; - } - - - public String getProjectUuid() { return projectUuid; } - - - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - - - public String getProjectName() { return projectName; } - - - - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - } diff --git a/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java b/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java index f6e67609c79..b598ba18845 100644 --- a/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ProjectInvitationJoinVO.java @@ -91,146 +91,74 @@ public class ProjectInvitationJoinVO extends BaseViewVO implements ControlledVie return id; } - - @Override - public void setId(long id) { - this.id = id; - - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - - public State getState() { return state; } - public void setState(State state) { - this.state = state; - } - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - public String getEmail() { return email; } - public void setEmail(String email) { - this.email = email; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - @Override public String getProjectUuid() { return projectUuid; } - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - @Override public String getProjectName() { return projectName; } - public void setProjectName(String projectName) { - this.projectName = projectName; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public String getDomainPath() { return domainPath; } - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - } diff --git a/server/src/com/cloud/api/query/vo/ProjectJoinVO.java b/server/src/com/cloud/api/query/vo/ProjectJoinVO.java index 3885fa087d4..8addabeb1ff 100644 --- a/server/src/com/cloud/api/query/vo/ProjectJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ProjectJoinVO.java @@ -25,13 +25,13 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.projects.Project.State; -import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.utils.db.GenericDao; - import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; +import com.cloud.projects.Project.State; +import com.cloud.server.ResourceTag.ResourceObjectType; +import com.cloud.utils.db.GenericDao; + @Entity @Table(name="project_view") public class ProjectJoinVO extends BaseViewVO implements InternalIdentity, Identity { @@ -103,14 +103,13 @@ public class ProjectJoinVO extends BaseViewVO implements InternalIdentity, Ident @Column(name="tag_resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType tagResourceType; + private ResourceObjectType tagResourceType; @Column(name="tag_customer") private String tagCustomer; - + @Column(name="project_account_id") private long projectAccountId; - public ProjectJoinVO() { } @@ -120,190 +119,95 @@ public class ProjectJoinVO extends BaseViewVO implements InternalIdentity, Ident return id; } - - @Override - public void setId(long id) { - this.id = id; - - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public State getState() { return state; } - public void setState(State state) { - this.state = state; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public String getDisplayText() { return displayText; } - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - public String getOwner() { return owner; } - public void setOwner(String owner) { - this.owner = owner; - } - public long getTagId() { return tagId; } - public void setTagId(long tagId) { - this.tagId = tagId; - } - public String getTagUuid() { return tagUuid; } - public void setTagUuid(String tagUuid) { - this.tagUuid = tagUuid; - } - public String getTagKey() { return tagKey; } - public void setTagKey(String tagKey) { - this.tagKey = tagKey; - } - public String getTagValue() { return tagValue; } - public void setTagValue(String tagValue) { - this.tagValue = tagValue; - } - public long getTagDomainId() { return tagDomainId; } - public void setTagDomainId(long tagDomainId) { - this.tagDomainId = tagDomainId; - } - public long getTagAccountId() { return tagAccountId; } - public void setTagAccountId(long tagAccountId) { - this.tagAccountId = tagAccountId; - } - public long getTagResourceId() { return tagResourceId; } - public void setTagResourceId(long tagResourceId) { - this.tagResourceId = tagResourceId; - } - public String getTagResourceUuid() { return tagResourceUuid; } - public void setTagResourceUuid(String tagResourceUuid) { - this.tagResourceUuid = tagResourceUuid; - } - - public TaggedResourceType getTagResourceType() { + public ResourceObjectType getTagResourceType() { return tagResourceType; } - public void setTagResourceType(TaggedResourceType tagResourceType) { - this.tagResourceType = tagResourceType; - } - public String getTagCustomer() { return tagCustomer; } - public void setTagCustomer(String tagCustomer) { - this.tagCustomer = tagCustomer; - } - public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - public long getProjectAccountId() { return projectAccountId; } diff --git a/server/src/com/cloud/api/query/vo/ResourceTagJoinVO.java b/server/src/com/cloud/api/query/vo/ResourceTagJoinVO.java index 9ce9555d1be..cd94ba5987c 100644 --- a/server/src/com/cloud/api/query/vo/ResourceTagJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ResourceTagJoinVO.java @@ -23,7 +23,7 @@ import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; @Entity @Table(name="resource_tag_view") @@ -42,7 +42,6 @@ public class ResourceTagJoinVO extends BaseViewVO implements ControlledViewEntit @Column(name="value") String value; - @Column(name="resource_id") long resourceId; @@ -51,13 +50,11 @@ public class ResourceTagJoinVO extends BaseViewVO implements ControlledViewEntit @Column(name="resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType resourceType; + private ResourceObjectType resourceType; @Column(name="customer") String customer; - - @Column(name="account_id") private long accountId; @@ -91,8 +88,6 @@ public class ResourceTagJoinVO extends BaseViewVO implements ControlledViewEntit @Column(name="project_name") private String projectName; - - public ResourceTagJoinVO() { } @@ -101,168 +96,86 @@ public class ResourceTagJoinVO extends BaseViewVO implements ControlledViewEntit return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - @Override public long getId() { return id; } - @Override - public void setId(long id) { - this.id = id; - } - - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - @Override public String getProjectUuid() { return projectUuid; } - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - @Override public String getProjectName() { return projectName; } - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - public String getKey() { return key; } - public void setKey(String key) { - this.key = key; - } - public String getValue() { return value; } - public void setValue(String value) { - this.value = value; - } - public long getResourceId() { return resourceId; } - public void setResourceId(long resourceId) { - this.resourceId = resourceId; - } - public String getResourceUuid() { return resourceUuid; } - public void setResourceUuid(String resourceUuid) { - this.resourceUuid = resourceUuid; - } - - public TaggedResourceType getResourceType() { + public ResourceObjectType getResourceType() { return resourceType; } - public void setResourceType(TaggedResourceType resourceType) { - this.resourceType = resourceType; - } - public String getCustomer() { return customer; } - - public void setCustomer(String customer) { - this.customer = customer; - } - - } diff --git a/server/src/com/cloud/api/query/vo/SecurityGroupJoinVO.java b/server/src/com/cloud/api/query/vo/SecurityGroupJoinVO.java index 258b6136224..ca1fa85c3ac 100644 --- a/server/src/com/cloud/api/query/vo/SecurityGroupJoinVO.java +++ b/server/src/com/cloud/api/query/vo/SecurityGroupJoinVO.java @@ -24,7 +24,7 @@ import javax.persistence.Id; import javax.persistence.Table; import com.cloud.network.security.SecurityRule.SecurityRuleType; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; @Entity @Table(name="security_group_view") @@ -135,7 +135,7 @@ public class SecurityGroupJoinVO extends BaseViewVO implements ControlledViewEnt @Column(name="tag_resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType tagResourceType; + private ResourceObjectType tagResourceType; @Column(name="tag_customer") private String tagCustomer; @@ -148,199 +148,105 @@ public class SecurityGroupJoinVO extends BaseViewVO implements ControlledViewEnt return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - @Override public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - @Override public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - @Override public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - @Override public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - @Override public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - @Override public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - @Override public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - @Override public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - @Override public String getProjectUuid() { return projectUuid; } - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - @Override public String getProjectName() { return projectName; } - public void setProjectName(String projectName) { - this.projectName = projectName; - } - public Long getJobId() { return jobId; } - public void setJobId(Long jobId) { - this.jobId = jobId; - } - public String getJobUuid() { return jobUuid; } - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - public int getJobStatus() { return jobStatus; } - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - public String getDescription() { return description; } - public void setDescription(String description) { - this.description = description; - } - public Long getRuleId() { return ruleId; } - public void setRuleId(Long ruleId) { - this.ruleId = ruleId; - } - public String getRuleUuid() { return ruleUuid; } - public void setRuleUuid(String ruleUuid) { - this.ruleUuid = ruleUuid; - } - public int getRuleStartPort() { return ruleStartPort; } - public void setRuleStartPort(int ruleStartPort) { - this.ruleStartPort = ruleStartPort; - } - public int getRuleEndPort() { return ruleEndPort; } - public void setRuleEndPort(int ruleEndPort) { - this.ruleEndPort = ruleEndPort; - } - public String getRuleProtocol() { return ruleProtocol; } - public void setRuleProtocol(String ruleProtocol) { - this.ruleProtocol = ruleProtocol; - } - public SecurityRuleType getRuleType() { if ("ingress".equalsIgnoreCase(ruleType)) { return SecurityRuleType.IngressRule; @@ -349,104 +255,51 @@ public class SecurityGroupJoinVO extends BaseViewVO implements ControlledViewEnt } } - public void setRuleType(String ruleType) { - this.ruleType = ruleType; - } - public Long getRuleAllowedNetworkId() { return ruleAllowedNetworkId; } - public void setRuleAllowedNetworkId(Long ruleAllowedNetworkId) { - this.ruleAllowedNetworkId = ruleAllowedNetworkId; - } - public String getRuleAllowedSourceIpCidr() { return ruleAllowedSourceIpCidr; } - public void setRuleAllowedSourceIpCidr(String ruleAllowedSourceIpCidr) { - this.ruleAllowedSourceIpCidr = ruleAllowedSourceIpCidr; - } - public long getTagId() { return tagId; } - public void setTagId(long tagId) { - this.tagId = tagId; - } - public String getTagUuid() { return tagUuid; } - public void setTagUuid(String tagUuid) { - this.tagUuid = tagUuid; - } - public String getTagKey() { return tagKey; } - public void setTagKey(String tagKey) { - this.tagKey = tagKey; - } - public String getTagValue() { return tagValue; } - public void setTagValue(String tagValue) { - this.tagValue = tagValue; - } - public long getTagDomainId() { return tagDomainId; } - public void setTagDomainId(long tagDomainId) { - this.tagDomainId = tagDomainId; - } - public long getTagAccountId() { return tagAccountId; } - public void setTagAccountId(long tagAccountId) { - this.tagAccountId = tagAccountId; - } - public long getTagResourceId() { return tagResourceId; } - public void setTagResourceId(long tagResourceId) { - this.tagResourceId = tagResourceId; - } - public String getTagResourceUuid() { return tagResourceUuid; } - public void setTagResourceUuid(String tagResourceUuid) { - this.tagResourceUuid = tagResourceUuid; - } - - public TaggedResourceType getTagResourceType() { + public ResourceObjectType getTagResourceType() { return tagResourceType; } - public void setTagResourceType(TaggedResourceType tagResourceType) { - this.tagResourceType = tagResourceType; - } - public String getTagCustomer() { return tagCustomer; } - - public void setTagCustomer(String tagCustomer) { - this.tagCustomer = tagCustomer; - } - } diff --git a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java index 05ff5f3f44a..3c843bbef34 100644 --- a/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java +++ b/server/src/com/cloud/api/query/vo/ServiceOfferingJoinVO.java @@ -132,242 +132,119 @@ public class ServiceOfferingJoinVO extends BaseViewVO implements InternalIdentit return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public String getDisplayText() { return displayText; } - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - - public String getTags() { return tags; } - public void setTags(String tags) { - this.tags = tags; - } - public boolean isUseLocalStorage() { return useLocalStorage; } - public void setUseLocalStorage(boolean useLocalStorage) { - this.useLocalStorage = useLocalStorage; - } - public boolean isSystemUse() { return systemUse; } - public void setSystemUse(boolean systemUse) { - this.systemUse = systemUse; - } - - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public int getSortKey() { return sortKey; } - public void setSortKey(int sortKey) { - this.sortKey = sortKey; - } - public int getCpu() { return cpu; } - public void setCpu(int cpu) { - this.cpu = cpu; - } - public int getSpeed() { return speed; } - public void setSpeed(int speed) { - this.speed = speed; - } - public int getRamSize() { return ramSize; } - public void setRamSize(int ramSize) { - this.ramSize = ramSize; - } - public Integer getRateMbps() { return rateMbps; } - public void setRateMbps(Integer rateMbps) { - this.rateMbps = rateMbps; - } - public Integer getMulticastRateMbps() { return multicastRateMbps; } - public void setMulticastRateMbps(Integer multicastRateMbps) { - this.multicastRateMbps = multicastRateMbps; - } - public boolean isOfferHA() { return offerHA; } - public void setOfferHA(boolean offerHA) { - this.offerHA = offerHA; - } - public boolean isLimitCpuUse() { return limitCpuUse; } - public void setLimitCpuUse(boolean limitCpuUse) { - this.limitCpuUse = limitCpuUse; - } - public String getHostTag() { return hostTag; } - public void setHostTag(String hostTag) { - this.hostTag = hostTag; - } - public boolean isDefaultUse() { return default_use; } - public void setDefaultUse(boolean default_use) { - this.default_use = default_use; - } - public String getSystemVmType() { return vm_type; } - public void setSystemVmType(String vm_type) { - this.vm_type = vm_type; - } - public String getDeploymentPlanner() { return deploymentPlanner; } - public void setDeploymentPlanner(String deploymentPlanner) { - this.deploymentPlanner = deploymentPlanner; - } - public boolean getVolatileVm() { return volatileVm; } - public void setVolatileVm(boolean volatileVm) { - this.volatileVm = volatileVm; - } - - public void setBytesReadRate(Long bytesReadRate) { - this.bytesReadRate = bytesReadRate; - } - public Long getBytesReadRate() { return bytesReadRate; } - public void setBytesWriteRate(Long bytesWriteRate) { - this.bytesWriteRate = bytesWriteRate; - } - public Long getBytesWriteRate() { return bytesWriteRate; } - public void setIopsReadRate(Long iopsReadRate) { - this.iopsReadRate = iopsReadRate; - } - public Long getIopsReadRate() { return iopsReadRate; } - public void setIopsWriteRate(Long iopsWriteRate) { - this.iopsWriteRate = iopsWriteRate; - } - public Long getIopsWriteRate() { return iopsWriteRate; } diff --git a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java index 69f2204d0e3..260d6ae71ad 100644 --- a/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java +++ b/server/src/com/cloud/api/query/vo/StoragePoolJoinVO.java @@ -24,17 +24,16 @@ import javax.persistence.EnumType; import javax.persistence.Enumerated; import javax.persistence.Id; import javax.persistence.Table; -import com.cloud.org.Cluster; -import com.cloud.storage.ScopeType; -import com.cloud.storage.StoragePoolStatus; -import com.cloud.storage.Storage.StoragePoolType; -import com.cloud.utils.db.GenericDao; import org.apache.cloudstack.api.Identity; import org.apache.cloudstack.api.InternalIdentity; - import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.org.Cluster; +import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.StoragePoolStatus; +import com.cloud.utils.db.GenericDao; /** * Storage Pool DB view. @@ -137,6 +136,9 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I @Enumerated(value = EnumType.STRING) private HypervisorType hypervisor; + @Column(name = "storage_provider_name") + private String storageProviderName; + /** * @return the scope */ @@ -144,239 +146,121 @@ public class StoragePoolJoinVO extends BaseViewVO implements InternalIdentity, I return scope; } - /** - * @param scope the scope to set - */ - public void setScope(ScopeType scope) { - this.scope = scope; - } - public HypervisorType getHypervisor() { return hypervisor; } - public void setHypervisor(HypervisorType hypervisor) { - this.hypervisor = hypervisor; - } - @Override public long getId() { return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - public String getPath() { return path; } - public void setPath(String path) { - this.path = path; - } - public String getHostAddress() { return hostAddress; } - public void setHostAddress(String hostAddress) { - this.hostAddress = hostAddress; - } - public StoragePoolStatus getStatus() { return status; } - public void setStatus(StoragePoolStatus status) { - this.status = status; - } - public StoragePoolType getPoolType() { return poolType; } - public void setPoolType(StoragePoolType poolType) { - this.poolType = poolType; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public long getCapacityBytes() { return capacityBytes; } - public void setCapacityBytes(long capacityBytes) { - this.capacityBytes = capacityBytes; - } - public Long getCapacityIops() { return capacityIops; } - public void setCapacityIops(Long capacityIops) { - this.capacityIops = capacityIops; - } - public long getClusterId() { return clusterId; } - public void setClusterId(long clusterId) { - this.clusterId = clusterId; - } - public String getClusterUuid() { return clusterUuid; } - public void setClusterUuid(String clusterUuid) { - this.clusterUuid = clusterUuid; - } - public String getClusterName() { return clusterName; } - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - public Cluster.ClusterType getClusterType() { return clusterType; } - public void setClusterType(Cluster.ClusterType clusterType) { - this.clusterType = clusterType; - } - public long getZoneId() { return zoneId; } - public void setZoneId(long zoneId) { - this.zoneId = zoneId; - } - public String getZoneUuid() { return zoneUuid; } - public void setZoneUuid(String zoneUuid) { - this.zoneUuid = zoneUuid; - } - public String getZoneName() { return zoneName; } - public void setZoneName(String zoneName) { - this.zoneName = zoneName; - } - public long getPodId() { return podId; } - public void setPodId(long podId) { - this.podId = podId; - } - public String getPodUuid() { return podUuid; } - public void setPodUuid(String podUuid) { - this.podUuid = podUuid; - } - public String getPodName() { return podName; } - public void setPodName(String podName) { - this.podName = podName; - } - public String getTag() { return tag; } - public void setTag(String tag) { - this.tag = tag; - } - public long getUsedCapacity() { return usedCapacity; } - public void setUsedCapacity(long usedCapacity) { - this.usedCapacity = usedCapacity; - } - public long getReservedCapacity() { return reservedCapacity; } - public void setReservedCapacity(long reservedCapacity) { - this.reservedCapacity = reservedCapacity; - } - public Long getJobId() { return jobId; } - public void setJobId(Long jobId) { - this.jobId = jobId; - } - public String getJobUuid() { return jobUuid; } - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - public int getJobStatus() { return jobStatus; } - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; + public String getStorageProviderName() { + return storageProviderName; } - - } diff --git a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java index bb1cfedd781..ca5963efb63 100644 --- a/server/src/com/cloud/api/query/vo/TemplateJoinVO.java +++ b/server/src/com/cloud/api/query/vo/TemplateJoinVO.java @@ -30,7 +30,7 @@ import javax.persistence.TemporalType; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; @@ -97,7 +97,7 @@ public class TemplateJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="dynamically_scalable") private boolean dynamicallyScalable; - + @Column(name="guest_os_id") private long guestOSId; @@ -129,7 +129,6 @@ public class TemplateJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="source_template_uuid") private String sourceTemplateUuid; - @Column(name="template_tag") private String templateTag; @@ -213,7 +212,6 @@ public class TemplateJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="detail_value") private String detailValue; - @Column(name="tag_id") private long tagId; @@ -240,7 +238,7 @@ public class TemplateJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="tag_resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType tagResourceType; + private ResourceObjectType tagResourceType; @Column(name="tag_customer") private String tagCustomer; @@ -255,821 +253,283 @@ public class TemplateJoinVO extends BaseViewVO implements ControlledViewEntity { public TemplateJoinVO() { } - - @Override public long getId() { return id; } - - - @Override - public void setId(long id) { - this.id = id; - } - - - @Override public String getUuid() { return uuid; } - - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - - public String getName() { return name; } - - - public void setName(String name) { - this.name = name; - } - - public Date getCreated() { return created; } - - - public void setCreated(Date created) { - this.created = created; - } - - - public Date getRemoved() { return removed; } - - - public void setRemoved(Date removed) { - this.removed = removed; - } - - - @Override public long getAccountId() { return accountId; } - - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - - @Override public String getAccountUuid() { return accountUuid; } - - - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - - @Override public String getAccountName() { return accountName; } - - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - - @Override public short getAccountType() { return accountType; } - - - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - - @Override public long getDomainId() { return domainId; } - - - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - - @Override public String getDomainUuid() { return domainUuid; } - - - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - - @Override public String getDomainName() { return domainName; } - - - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - - @Override public String getDomainPath() { return domainPath; } - - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - - public long getProjectId() { return projectId; } - - - public void setProjectId(long projectId) { - this.projectId = projectId; - } - - - @Override public String getProjectUuid() { return projectUuid; } - - - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - - - @Override public String getProjectName() { return projectName; } - - - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - - public boolean isExtractable() { return extractable; } - - - public void setExtractable(boolean extractable) { - this.extractable = extractable; - } - - - public Storage.TemplateType getTemplateType() { return templateType; } - - - public void setTemplateType(Storage.TemplateType templateType) { - this.templateType = templateType; - } - - - - - - public long getTagId() { return tagId; } - - - public void setTagId(long tagId) { - this.tagId = tagId; - } - - - public String getTagUuid() { return tagUuid; } - - - public void setTagUuid(String tagUuid) { - this.tagUuid = tagUuid; - } - - - public String getTagKey() { return tagKey; } - - - public void setTagKey(String tagKey) { - this.tagKey = tagKey; - } - - - public String getTagValue() { return tagValue; } - - - public void setTagValue(String tagValue) { - this.tagValue = tagValue; - } - - - public long getTagDomainId() { return tagDomainId; } - - - public void setTagDomainId(long tagDomainId) { - this.tagDomainId = tagDomainId; - } - - - public long getTagAccountId() { return tagAccountId; } - - - public void setTagAccountId(long tagAccountId) { - this.tagAccountId = tagAccountId; - } - - - public long getTagResourceId() { return tagResourceId; } - - - public void setTagResourceId(long tagResourceId) { - this.tagResourceId = tagResourceId; - } - - - public String getTagResourceUuid() { return tagResourceUuid; } - - - public void setTagResourceUuid(String tagResourceUuid) { - this.tagResourceUuid = tagResourceUuid; - } - - - - public TaggedResourceType getTagResourceType() { + public ResourceObjectType getTagResourceType() { return tagResourceType; } - - - public void setTagResourceType(TaggedResourceType tagResourceType) { - this.tagResourceType = tagResourceType; - } - - - public String getTagCustomer() { return tagCustomer; } - - - public void setTagCustomer(String tagCustomer) { - this.tagCustomer = tagCustomer; - } - - - public long getDataCenterId() { return dataCenterId; } - - - public void setDataCenterId(long dataCenterId) { - this.dataCenterId = dataCenterId; - } - - - public String getDataCenterUuid() { return dataCenterUuid; } - - - public void setDataCenterUuid(String dataCenterUuid) { - this.dataCenterUuid = dataCenterUuid; - } - - - public String getDataCenterName() { return dataCenterName; } - - - public void setDataCenterName(String dataCenterName) { - this.dataCenterName = dataCenterName; - } - - - public String getUniqueName() { return uniqueName; } - - - public void setUniqueName(String uniqueName) { - this.uniqueName = uniqueName; - } - - - public boolean isPublicTemplate() { return publicTemplate; } - - - public void setPublicTemplate(boolean publicTemplate) { - this.publicTemplate = publicTemplate; - } - - - public boolean isFeatured() { return featured; } - - - public void setFeatured(boolean featured) { - this.featured = featured; - } - - - public String getUrl() { return url; } - - - public void setUrl(String url) { - this.url = url; - } - - - public boolean isRequiresHvm() { return requiresHvm; } - - - public void setRequiresHvm(boolean requiresHvm) { - this.requiresHvm = requiresHvm; - } - - - public int getBits() { return bits; } - - - public void setBits(int bits) { - this.bits = bits; - } - - - public String getChecksum() { return checksum; } - - - public void setChecksum(String checksum) { - this.checksum = checksum; - } - - - public String getDisplayText() { return displayText; } - - - public void setDisplayText(String displayText) { - this.displayText = displayText; - } - - - public boolean isEnablePassword() { return enablePassword; } - - - public void setEnablePassword(boolean enablePassword) { - this.enablePassword = enablePassword; - } - - - public boolean isDynamicallyScalable() { return dynamicallyScalable; } - public void setDynamicallyScalable(boolean dynamicallyScalable) { - this.dynamicallyScalable = dynamicallyScalable; - } - - - public long getGuestOSId() { return guestOSId; } - - - public void setGuestOSId(long guestOSId) { - this.guestOSId = guestOSId; - } - - - public String getGuestOSUuid() { return guestOSUuid; } - - - public void setGuestOSUuid(String guestOSUuid) { - this.guestOSUuid = guestOSUuid; - } - - - public String getGuestOSName() { return guestOSName; } - - - public void setGuestOSName(String guestOSName) { - this.guestOSName = guestOSName; - } - - - public boolean isBootable() { return bootable; } - - - public void setBootable(boolean bootable) { - this.bootable = bootable; - } - - - public boolean isPrepopulate() { return prepopulate; } - - - public void setPrepopulate(boolean prepopulate) { - this.prepopulate = prepopulate; - } - - - public boolean isCrossZones() { return crossZones; } - - - public void setCrossZones(boolean crossZones) { - this.crossZones = crossZones; - } - - - public HypervisorType getHypervisorType() { return hypervisorType; } - - - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } - - - public Long getSourceTemplateId() { return sourceTemplateId; } - - - public void setSourceTemplateId(Long sourceTemplateId) { - this.sourceTemplateId = sourceTemplateId; - } - - - public String getSourceTemplateUuid() { return sourceTemplateUuid; } - - - public void setSourceTemplateUuid(String sourceTemplateUuid) { - this.sourceTemplateUuid = sourceTemplateUuid; - } - - - public String getTemplateTag() { return templateTag; } - - - public void setTemplateTag(String templateTag) { - this.templateTag = templateTag; - } - - - public int getSortKey() { return sortKey; } - - - public void setSortKey(int sortKey) { - this.sortKey = sortKey; - } - - - public boolean isEnableSshKey() { return enableSshKey; } - - - public void setEnableSshKey(boolean enableSshKey) { - this.enableSshKey = enableSshKey; - } - - - public Status getDownloadState() { return downloadState; } - - - public void setDownloadState(Status downloadState) { - this.downloadState = downloadState; - } - - - public long getSize() { return size; } - - - public void setSize(long size) { - this.size = size; - } - - - public boolean isDestroyed() { return destroyed; } - - - public void setDestroyed(boolean destroyed) { - this.destroyed = destroyed; - } - - - public Long getSharedAccountId() { return sharedAccountId; } - - - public void setSharedAccountId(Long sharedAccountId) { - this.sharedAccountId = sharedAccountId; - } - - - public String getDetailName() { return detailName; } - - - public void setDetailName(String detailName) { - this.detailName = detailName; - } - - - public String getDetailValue() { return detailValue; } - - - public void setDetailValue(String detailValue) { - this.detailValue = detailValue; - } - - - public Date getCreatedOnStore() { return createdOnStore; } - - - public void setCreatedOnStore(Date createdOnStore) { - this.createdOnStore = createdOnStore; - } - - - public Storage.ImageFormat getFormat() { return format; } - - - public void setFormat(Storage.ImageFormat format) { - this.format = format; - } - - - public int getDownloadPercent() { return downloadPercent; } - - - public void setDownloadPercent(int downloadPercent) { - this.downloadPercent = downloadPercent; - } - - - public String getErrorString() { return errorString; } - - - public void setErrorString(String errorString) { - this.errorString = errorString; - } - - - public Long getDataStoreId() { return dataStoreId; } - - - public void setDataStoreId(Long dataStoreId) { - this.dataStoreId = dataStoreId; - } - - - public ObjectInDataStoreStateMachine.State getState() { return state; } - - - public void setState(ObjectInDataStoreStateMachine.State state) { - this.state = state; - } - - - public ScopeType getDataStoreScope() { return dataStoreScope; } - - public void setDataStoreScope(ScopeType dataStoreScope) { - this.dataStoreScope = dataStoreScope; - } - - public String getTempZonePair() { return tempZonePair; } - - - - public void setTempZonePair(String tempZonePair) { - this.tempZonePair = tempZonePair; - } - - } diff --git a/server/src/com/cloud/api/query/vo/UserAccountJoinVO.java b/server/src/com/cloud/api/query/vo/UserAccountJoinVO.java index c44027b8bc0..c020c01798a 100644 --- a/server/src/com/cloud/api/query/vo/UserAccountJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserAccountJoinVO.java @@ -83,7 +83,6 @@ public class UserAccountJoinVO extends BaseViewVO implements InternalIdentity, I @Column (name="incorrect_login_attempts") int loginAttempts; - @Column(name="account_id") private long accountId; @@ -116,7 +115,7 @@ public class UserAccountJoinVO extends BaseViewVO implements InternalIdentity, I @Column(name="job_status") private int jobStatus; - + @Column(name = "default") boolean isDefault; @@ -128,233 +127,112 @@ public class UserAccountJoinVO extends BaseViewVO implements InternalIdentity, I return id; } - @Override - public void setId(long id) { - this.id = id; - } - @Override public String getUuid() { return uuid; } - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public long getAccountId() { return accountId; } - public void setAccountId(long accountId) { - this.accountId = accountId; - } - public String getAccountUuid() { return accountUuid; } - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - public String getAccountName() { return accountName; } - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - public short getAccountType() { return accountType; } - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - public long getDomainId() { return domainId; } - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - public String getDomainUuid() { return domainUuid; } - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - public String getDomainName() { return domainName; } - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - public String getDomainPath() { return domainPath; } - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - public String getUsername() { return username; } - public void setUsername(String username) { - this.username = username; - } - public String getPassword() { return password; } - public void setPassword(String password) { - this.password = password; - } - public String getFirstname() { return firstname; } - public void setFirstname(String firstname) { - this.firstname = firstname; - } - public String getLastname() { return lastname; } - public void setLastname(String lastname) { - this.lastname = lastname; - } - public String getEmail() { return email; } - public void setEmail(String email) { - this.email = email; - } - public String getState() { return state; } - public void setState(String state) { - this.state = state; - } - public String getApiKey() { return apiKey; } - public void setApiKey(String apiKey) { - this.apiKey = apiKey; - } - public String getSecretKey() { return secretKey; } - public void setSecretKey(String secretKey) { - this.secretKey = secretKey; - } - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - public String getTimezone() { return timezone; } - public void setTimezone(String timezone) { - this.timezone = timezone; - } - public String getRegistrationToken() { return registrationToken; } - public void setRegistrationToken(String registrationToken) { - this.registrationToken = registrationToken; - } - public boolean isRegistered() { return registered; } - public void setRegistered(boolean registered) { - this.registered = registered; - } - public int getLoginAttempts() { return loginAttempts; } - public void setLoginAttempts(int loginAttempts) { - this.loginAttempts = loginAttempts; - } - public Long getJobId() { return jobId; } - public void setJobId(Long jobId) { - this.jobId = jobId; - } - public String getJobUuid() { return jobUuid; } - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - public int getJobStatus() { return jobStatus; } - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - public boolean isDefault() { return isDefault; } - - public void setDefault(boolean isDefault) { - this.isDefault = isDefault; - } - - } diff --git a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java index 745db566a74..5aae820d810 100644 --- a/server/src/com/cloud/api/query/vo/UserVmJoinVO.java +++ b/server/src/com/cloud/api/query/vo/UserVmJoinVO.java @@ -31,9 +31,9 @@ import javax.persistence.Transient; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.network.Network.GuestType; import com.cloud.network.Networks.TrafficType; -import com.cloud.server.ResourceTag.TaggedResourceType; -import com.cloud.storage.Volume; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.Storage.StoragePoolType; +import com.cloud.storage.Volume; import com.cloud.utils.db.Encrypt; import com.cloud.utils.db.GenericDao; import com.cloud.vm.VirtualMachine; @@ -366,7 +366,7 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="tag_resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType tagResourceType; + private ResourceObjectType tagResourceType; @Column(name="tag_customer") private String tagCustomer; @@ -393,6 +393,12 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="dynamically_scalable") private boolean isDynamicallyScalable; + + @Column(name="detail_name") + private String detailName; + + @Column(name="detail_value") + private String detailValue; public UserVmJoinVO() { } @@ -403,25 +409,11 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return id; } - @Override - public void setId(long id) { - this.id = id; - } - - @Override public String getUuid() { return uuid; } - - - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public String getPassword() { return password; } @@ -430,839 +422,314 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { this.password = password; } - public String getName() { return name; } - - public void setName(String name) { - this.name = name; - } - - public String getDisplayName() { return displayName; } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - @Override public long getAccountId() { return accountId; } - - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - @Override public String getAccountUuid() { return accountUuid; } - - - - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - - - @Override public String getAccountName() { return accountName; } - - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - @Override public short getAccountType() { return accountType; } - - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - @Override public long getDomainId() { return domainId; } - - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - @Override public String getDomainUuid() { return domainUuid; } - - - - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - - - @Override public String getDomainName() { return domainName; } - - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - @Override public String getDomainPath() { return domainPath; } - - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - - - public long getInstanceGroupId() { return instanceGroupId; } - - public void setInstanceGroupId(long instanceGroupId) { - this.instanceGroupId = instanceGroupId; - } - - public String getInstanceGroupUuid() { return instanceGroupUuid; } - - - - public void setInstanceGroupUuid(String instanceGroupUuid) { - this.instanceGroupUuid = instanceGroupUuid; - } - - - - public String getInstanceGroupName() { return instanceGroupName; } - - public void setInstanceGroupName(String instanceGroupName) { - this.instanceGroupName = instanceGroupName; - } - - public VirtualMachine.Type getType() { return type; } - - - - public void setType(VirtualMachine.Type type) { - this.type = type; - } - - - - public State getState() { return state; } - - public void setState(State state) { - this.state = state; - } - - public Date getCreated() { return created; } - - public void setCreated(Date created) { - this.created = created; - } - - public Date getRemoved() { return removed; } - - public void setRemoved(Date removed) { - this.removed = removed; - } - - public String getInstanceName() { return instanceName; } - - public void setInstanceName(String instanceName) { - this.instanceName = instanceName; - } - - public long getGuestOSId() { return guestOsId; } - - public void setGuestOSId(long guestOSId) { - this.guestOsId = guestOSId; - } - - public String getGuestOsUuid() { return guestOsUuid; } - - - - public void setGuestOsUuid(String guestOsUuid) { - this.guestOsUuid = guestOsUuid; - } - - - - public HypervisorType getHypervisorType() { return hypervisorType; } - - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } - - public boolean isHaEnabled() { return haEnabled; } - - public void setHaEnabled(boolean haEnabled) { - this.haEnabled = haEnabled; - } - - public void setVncPassword(String vncPassword) { - this.vncPassword = vncPassword; - } - public String getVncPassword() { return vncPassword; } - - - public String getPrivateIpAddress() { return privateIpAddress; } - - - - public void setPrivateIpAddress(String privateIpAddress) { - this.privateIpAddress = privateIpAddress; - } - - - - public String getPrivateMacAddress() { return privateMacAddress; } - - - - public void setPrivateMacAddress(String privateMacAddress) { - this.privateMacAddress = privateMacAddress; - } - - - - public Long getLastHostId() { return lastHostId; } - - - - public void setLastHostId(Long lastHostId) { - this.lastHostId = lastHostId; - } - - - - - - public Long getPodId() { return podId; } - - - - public void setPodId(Long podIdToDeployIn) { - this.podId = podIdToDeployIn; - } - - - - public String getPodUuid() { return podUuid; } - - - - public void setPodUuid(String podUuid) { - this.podUuid = podUuid; - } - - - - public long getDataCenterId() { return dataCenterId; } - - - - public void setDataCenterId(long dataCenterIdToDeployIn) { - this.dataCenterId = dataCenterIdToDeployIn; - } - - public boolean limitCpuUse() { return limitCpuUse; } - public void setLimitCpuUse(boolean value) { - limitCpuUse = value; - } - public boolean isDisplayVm() { return displayVm; } - public void setDisplayVm(boolean displayVm) { - this.displayVm = displayVm; - } - public String getDataCenterUuid() { return dataCenterUuid; } - - - - public void setDataCenterUuid(String zoneUuid) { - this.dataCenterUuid = zoneUuid; - } - - - - public String getDataCenterName() { return dataCenterName; } - - public void setDataCenterName(String zoneName) { - this.dataCenterName = zoneName; - } - - public boolean isSecurityGroupEnabled() { return securityGroupEnabled; } - - public void setSecurityGroupEnabled(boolean securityGroupEnabled) { - this.securityGroupEnabled = securityGroupEnabled; - } - - public Long getHostId() { return hostId; } - - public void setHostId(long hostId) { - this.hostId = hostId; - } - - public String getHostUuid() { return hostUuid; } - - - - public void setHostUuid(String hostUuid) { - this.hostUuid = hostUuid; - } - - - - public String getHostName() { return hostName; } - - public void setHostName(String hostName) { - this.hostName = hostName; - } - - public long getTemplateId() { return templateId; } - - public void setTemplateId(long templateId) { - this.templateId = templateId; - } - - - public String getTemplateUuid() { return templateUuid; } - - - - public void setTemplateUuid(String templateUuid) { - this.templateUuid = templateUuid; - } - - - - public String getTemplateName() { return templateName; } - - public void setTemplateName(String templateName) { - this.templateName = templateName; - } - - public String getTemplateDisplayText() { return templateDisplayText; } - - public void setTemplateDisplayText(String templateDisplayText) { - this.templateDisplayText = templateDisplayText; - } - - public boolean isPasswordEnabled() { return passwordEnabled; } - - public void setPasswordEnabled(boolean passwordEnabled) { - this.passwordEnabled = passwordEnabled; - } - - public Long getIsoId() { return isoId; } - - public void setIsoId(long isoId) { - this.isoId = isoId; - } - - public String getIsoUuid() { return isoUuid; } - - - - public void setIsoUuid(String isoUuid) { - this.isoUuid = isoUuid; - } - - - - public String getIsoName() { return isoName; } - - public void setIsoName(String isoName) { - this.isoName = isoName; - } - - public String getIsoDisplayText() { return isoDisplayText; } - public void setIsoDisplayText(String isoDisplayText) { - this.isoDisplayText = isoDisplayText; - } - - - - public String getServiceOfferingUuid() { return serviceOfferingUuid; } - - - - public void setServiceOfferingUuid(String serviceOfferingUuid) { - this.serviceOfferingUuid = serviceOfferingUuid; - } - - - - public String getServiceOfferingName() { return serviceOfferingName; } - - public void setServiceOfferingName(String serviceOfferingName) { - this.serviceOfferingName = serviceOfferingName; - } - - public int getCpu() { return cpu; } - - public void setCpu(int cpu) { - this.cpu = cpu; - } - - public int getSpeed() { return speed; } - - public void setSpeed(int speed) { - this.speed = speed; - } - - public int getRamSize() { return ramSize; } - - public void setRamSize(int ramSize) { - this.ramSize = ramSize; - } - - public long getPoolId() { return poolId; } - - public void setPoolId(long poolId) { - this.poolId = poolId; - } - - public StoragePoolType getPoolType() { return poolType; } - - public void setPoolType(StoragePoolType poolType) { - this.poolType = poolType; - } - - public long getVolume_id() { return volume_id; } - - public void setVolume_id(long volume_id) { - this.volume_id = volume_id; - } - - public Long getVolumeDeviceId() { return volumeDeviceId; } - - public void setVolumeDeviceId(Long volumeDeviceId) { - this.volumeDeviceId = volumeDeviceId; - } - - public Volume.Type getVolumeType() { return volumeType; } - - public void setVolumeType(Volume.Type volumeType) { - this.volumeType = volumeType; - } - - public long getSecurityGroupId() { return securityGroupId; } - - public void setSecurityGroupId(long securityGroupId) { - this.securityGroupId = securityGroupId; - } - - public String getSecurityGroupName() { return securityGroupName; } - - public void setSecurityGroupName(String securityGroupName) { - this.securityGroupName = securityGroupName; - } - - public String getSecurityGroupDescription() { return securityGroupDescription; } - - public void setSecurityGroupDescription(String securityGroupDescription) { - this.securityGroupDescription = securityGroupDescription; - } - - public long getVpcId() { return vpcId; } - - - public void setVpcId(long vpcId) { - this.vpcId = vpcId; - } - - - - public long getNicId() { return nicId; } - - public void setNicId(long nicId) { - this.nicId = nicId; - } - - public boolean isDefaultNic() { return isDefaultNic; } - - public void setDefaultNic(boolean isDefaultNic) { - this.isDefaultNic = isDefaultNic; - } - - public String getIpAddress() { return ipAddress; } - - public void setIpAddress(String ipAddress) { - this.ipAddress = ipAddress; - } - - public String getGateway() { return gateway; } - - public void setGateway(String gateway) { - this.gateway = gateway; - } - - public String getNetmask() { return netmask; } - - public void setNetmask(String netmask) { - this.netmask = netmask; - } - - public String getMacAddress() { return macAddress; } - - public void setMacAddress(String macAddress) { - this.macAddress = macAddress; - } - - public URI getBroadcastUri() { return broadcastUri; } - - public void setBroadcastUri(URI broadcastUri) { - this.broadcastUri = broadcastUri; - } - - public URI getIsolationUri() { return isolationUri; } - - public void setIsolationUri(URI isolationUri) { - this.isolationUri = isolationUri; - } - - public long getNetworkId() { return networkId; } - - public void setNetworkId(long networkId) { - this.networkId = networkId; - } - - public String getNetworkName() { return networkName; } - - public void setNetworkName(String networkName) { - this.networkName = networkName; - } - - public TrafficType getTrafficType() { return trafficType; } - - public void setTrafficType(TrafficType trafficType) { - this.trafficType = trafficType; - } - - public GuestType getGuestType() { return guestType; } - - public void setGuestType(GuestType guestType) { - this.guestType = guestType; - } - - public long getPublicIpId() { return publicIpId; } - - - - public void setPublicIpId(long publicIpId) { - this.publicIpId = publicIpId; - } - - - - public String getPublicIpAddress() { return publicIpAddress; } - - public void setPublicIpAddress(String publicIpAddress) { - this.publicIpAddress = publicIpAddress; - } - - - public long getServiceOfferingId() { return serviceOfferingId; } - - - - public void setServiceOfferingId(long serviceOfferingId) { - this.serviceOfferingId = serviceOfferingId; - } - - public Map getDetails() { return details; } @@ -1273,382 +740,116 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return details != null ? details.get(name) : null; } - public void setDetail(String name, String value) { - assert (details != null) : "Did you forget to load the details?"; - - details.put(name, value); - } - - public void setDetails(Map details) { - this.details = details; - } - - public void setUserData(String userData) { - this.userData = userData; - } - public String getUserData() { return userData; } - - public long getGuestOsId() { return guestOsId; } - - - - public void setGuestOsId(long guestOsId) { - this.guestOsId = guestOsId; - } - - - - public long getProjectId() { return projectId; } - - - - public void setProjectId(long projectId) { - this.projectId = projectId; - } - - - - @Override public String getProjectUuid() { return projectUuid; } - - - - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - - - - @Override public String getProjectName() { return projectName; } - - - - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - - public String getKeypairName() { return keypairName; } - - - - public void setKeypairName(String keypairName) { - this.keypairName = keypairName; - } - - - - public long getTagId() { return tagId; } - - - - public void setTagId(long tagId) { - this.tagId = tagId; - } - - - - public String getTagUuid() { return tagUuid; } - - - - public void setTagUuid(String tagUuid) { - this.tagUuid = tagUuid; - } - - - - public String getTagKey() { return tagKey; } - - - - public void setTagKey(String tagKey) { - this.tagKey = tagKey; - } - - - - public String getTagValue() { return tagValue; } - - - - public void setTagValue(String tagValue) { - this.tagValue = tagValue; - } - - - - public long getTagDomainId() { return tagDomainId; } - - - - public void setTagDomainId(long tagDomainId) { - this.tagDomainId = tagDomainId; - } - - - - public long getTagAccountId() { return tagAccountId; } - - - - public void setTagAccountId(long tagAccountId) { - this.tagAccountId = tagAccountId; - } - - - - public long getTagResourceId() { return tagResourceId; } - - - - public void setTagResourceId(long tagResourceId) { - this.tagResourceId = tagResourceId; - } - - - - public String getTagResourceUuid() { return tagResourceUuid; } - - - - public void setTagResourceUuid(String tagResourceUuid) { - this.tagResourceUuid = tagResourceUuid; - } - - - - - public TaggedResourceType getTagResourceType() { + public ResourceObjectType getTagResourceType() { return tagResourceType; } - - - - public void setTagResourceType(TaggedResourceType tagResourceType) { - this.tagResourceType = tagResourceType; - } - - - - public String getTagCustomer() { return tagCustomer; } - - - - public void setTagCustomer(String tagCustomer) { - this.tagCustomer = tagCustomer; - } - - - - public boolean isLimitCpuUse() { return limitCpuUse; } - - public String getPoolUuid() { return poolUuid; } - - - - public void setPoolUuid(String poolUuid) { - this.poolUuid = poolUuid; - } - - - - public String getVolume_uuid() { return volume_uuid; } - - - - public void setVolume_uuid(String volume_uuid) { - this.volume_uuid = volume_uuid; - } - - - - public String getSecurityGroupUuid() { return securityGroupUuid; } - - - - public void setSecurityGroupUuid(String securityGroupUuid) { - this.securityGroupUuid = securityGroupUuid; - } - - - - public String getVpcUuid() { return vpcUuid; } - - - - public void setVpcUuid(String vpcUuid) { - this.vpcUuid = vpcUuid; - } - - - - public String getNicUuid() { return nicUuid; } - - - - public void setNicUuid(String nicUuid) { - this.nicUuid = nicUuid; - } - - - - public String getNetworkUuid() { return networkUuid; } - - - - public void setNetworkUuid(String networkUuid) { - this.networkUuid = networkUuid; - } - - - - public String getPublicIpUuid() { return publicIpUuid; } - - - - public void setPublicIpUuid(String publicIpUuid) { - this.publicIpUuid = publicIpUuid; - } - - - public Long getJobId() { return jobId; } - - - - public void setJobId(Long jobId) { - this.jobId = jobId; - } - - - - public String getJobUuid() { return jobUuid; } - - - - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - - - - public int getJobStatus() { return jobStatus; } - - - - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - - - transient String toString; @Override public String toString() { @@ -1658,64 +859,30 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return toString; } - public String getIp6Address() { - return ip6Address; - } + public String getIp6Address() { + return ip6Address; + } + public String getIp6Gateway() { + return ip6Gateway; + } - - - public void setIp6Address(String ip6Address) { - this.ip6Address = ip6Address; - } - - - - - public String getIp6Gateway() { - return ip6Gateway; - } - - - - - public void setIp6Gateway(String ip6Gateway) { - this.ip6Gateway = ip6Gateway; - } - - - - - public String getIp6Cidr() { - return ip6Cidr; - } - - - - - public void setIp6Cidr(String ip6Cidr) { - this.ip6Cidr = ip6Cidr; - } - + public String getIp6Cidr() { + return ip6Cidr; + } public long getAffinityGroupId() { return affinityGroupId; } - - public String getAffinityGroupUuid() { return affinityGroupUuid; } - - public String getAffinityGroupName() { return affinityGroupName; } - - public String getAffinityGroupDescription() { return affinityGroupDescription; } @@ -1724,9 +891,12 @@ public class UserVmJoinVO extends BaseViewVO implements ControlledViewEntity { return isDynamicallyScalable; } - public void setDynamicallyScalable(boolean isDynamicallyScalable) { - this.isDynamicallyScalable = isDynamicallyScalable; + public String getDetailName() { + return detailName; } + public String getDetailValue() { + return detailValue; + } } diff --git a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java index c7b43ba1569..7c03cd7bd21 100644 --- a/server/src/com/cloud/api/query/vo/VolumeJoinVO.java +++ b/server/src/com/cloud/api/query/vo/VolumeJoinVO.java @@ -26,7 +26,7 @@ import javax.persistence.Table; import javax.persistence.Temporal; import javax.persistence.TemporalType; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.Storage; import com.cloud.storage.Volume; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; @@ -255,15 +255,17 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { @Column(name="tag_resource_type") @Enumerated(value=EnumType.STRING) - private TaggedResourceType tagResourceType; + private ResourceObjectType tagResourceType; @Column(name="tag_customer") private String tagCustomer; - @Column(name="display_volume", updatable=true, nullable=false) protected boolean displayVolume; + @Column(name="path") + protected String path; + public VolumeJoinVO() { } @@ -277,13 +279,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - @Override - public void setId(long id) { - this.id = id; - } - - - @Override public String getUuid() { return uuid; @@ -291,121 +286,61 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setUuid(String uuid) { - this.uuid = uuid; - } - - - public String getName() { return name; } - public void setName(String name) { - this.name = name; - } - - - public Long getDeviceId() { return deviceId; } - public void setDeviceId(Long deviceId) { - this.deviceId = deviceId; - } - - - public Volume.Type getVolumeType() { return volumeType; } - public void setVolumeType(Volume.Type volumeType) { - this.volumeType = volumeType; - } - - - public long getSize() { return size; } - public void setSize(long size) { - this.size = size; - } - public Long getMinIops() { return minIops; } - public void setMinIops(Long minIops) { - this.minIops = minIops; - } - public Long getMaxIops() { return maxIops; } - public void setMaxIops(Long maxIops) { - this.maxIops = maxIops; - } - public Volume.State getState() { return state; } - public void setState(Volume.State state) { - this.state = state; - } - - - public Date getCreated() { return created; } - public void setCreated(Date created) { - this.created = created; - } - - - public Date getAttached() { return attached; } - public void setAttached(Date attached) { - this.attached = attached; - } - - - public Date getRemoved() { return removed; } - public void setRemoved(Date removed) { - this.removed = removed; - } - - - @Override public long getAccountId() { return accountId; @@ -413,19 +348,10 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setAccountId(long accountId) { - this.accountId = accountId; - } - - public boolean isDisplayVolume() { return displayVolume; } - public void setDisplayVolume(boolean displayVolume) { - this.displayVolume = displayVolume; - } - @Override public String getAccountUuid() { return accountUuid; @@ -433,12 +359,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setAccountUuid(String accountUuid) { - this.accountUuid = accountUuid; - } - - - @Override public String getAccountName() { return accountName; @@ -446,12 +366,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setAccountName(String accountName) { - this.accountName = accountName; - } - - - @Override public short getAccountType() { return accountType; @@ -459,12 +373,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setAccountType(short accountType) { - this.accountType = accountType; - } - - - @Override public long getDomainId() { return domainId; @@ -472,12 +380,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setDomainId(long domainId) { - this.domainId = domainId; - } - - - @Override public String getDomainUuid() { return domainUuid; @@ -485,12 +387,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setDomainUuid(String domainUuid) { - this.domainUuid = domainUuid; - } - - - @Override public String getDomainName() { return domainName; @@ -498,12 +394,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setDomainName(String domainName) { - this.domainName = domainName; - } - - - @Override public String getDomainPath() { return domainPath; @@ -511,24 +401,12 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setDomainPath(String domainPath) { - this.domainPath = domainPath; - } - - - public long getProjectId() { return projectId; } - public void setProjectId(long projectId) { - this.projectId = projectId; - } - - - @Override public String getProjectUuid() { return projectUuid; @@ -536,12 +414,6 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setProjectUuid(String projectUuid) { - this.projectUuid = projectUuid; - } - - - @Override public String getProjectName() { return projectName; @@ -549,245 +421,122 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setProjectName(String projectName) { - this.projectName = projectName; - } - - - public long getVmId() { return vmId; } - public void setVmId(long vmId) { - this.vmId = vmId; - } - - - public String getVmUuid() { return vmUuid; } - public void setVmUuid(String vmUuid) { - this.vmUuid = vmUuid; - } - - - public String getVmName() { return vmName; } - public void setVmName(String vmName) { - this.vmName = vmName; - } - - - public String getVmDisplayName() { return vmDisplayName; } - public void setVmDisplayName(String vmDisplayName) { - this.vmDisplayName = vmDisplayName; - } - - - public VirtualMachine.State getVmState() { return vmState; } - public void setVmState(VirtualMachine.State vmState) { - this.vmState = vmState; - } - - - public VirtualMachine.Type getVmType() { return vmType; } - public void setVmType(VirtualMachine.Type vmType) { - this.vmType = vmType; - } - public long getVolumeStoreSize() { return volumeStoreSize; } - public void setVolumeStoreSize(long volumeStoreSize) { - this.volumeStoreSize = volumeStoreSize; - } - public Date getCreatedOnStore() { return createdOnStore; } - public void setCreatedOnStore(Date createdOnStore) { - this.createdOnStore = createdOnStore; - } - public Storage.ImageFormat getFormat() { return format; } - public void setFormat(Storage.ImageFormat format) { - this.format = format; - } - - - public int getDownloadPercent() { return downloadPercent; } - public void setDownloadPercent(int downloadPercent) { - this.downloadPercent = downloadPercent; - } - - - public Status getDownloadState() { return downloadState; } - public void setDownloadState(Status downloadState) { - this.downloadState = downloadState; - } - - - public String getErrorString() { return errorString; } - public void setErrorString(String errorString) { - this.errorString = errorString; - } - - - public HypervisorType getHypervisorType() { return hypervisorType; } - public void setHypervisorType(HypervisorType hypervisorType) { - this.hypervisorType = hypervisorType; - } - - - public long getDiskOfferingId() { return diskOfferingId; } - public void setDiskOfferingId(long diskOfferingId) { - this.diskOfferingId = diskOfferingId; - } - - - public String getDiskOfferingUuid() { return diskOfferingUuid; } - public void setDiskOfferingUuid(String diskOfferingUuid) { - this.diskOfferingUuid = diskOfferingUuid; - } - - - public String getDiskOfferingName() { return diskOfferingName; } - public void setDiskOfferingName(String diskOfferingName) { - this.diskOfferingName = diskOfferingName; - } - - - public String getDiskOfferingDisplayText() { return diskOfferingDisplayText; } - public void setDiskOfferingDisplayText(String diskOfferingDisplayText) { - this.diskOfferingDisplayText = diskOfferingDisplayText; - } - - - public boolean isUseLocalStorage() { return useLocalStorage; } - public void setUseLocalStorage(boolean useLocalStorage) { - this.useLocalStorage = useLocalStorage; - } - - - public void setBytesReadRate(Long bytesReadRate) { - this.bytesReadRate = bytesReadRate; - } - public Long getBytesReadRate() { return bytesReadRate; } - public void setBytesWriteRate(Long bytesWriteRate) { - this.bytesWriteRate = bytesWriteRate; - } - public Long getBytesWriteRate() { return bytesWriteRate; } - public void setIopsReadRate(Long iopsReadRate) { - this.iopsReadRate = iopsReadRate; - } - public Long getIopsReadRate() { return iopsReadRate; } - public void setIopsWriteRate(Long iopsWriteRate) { - this.iopsWriteRate = iopsWriteRate; - } - public Long getIopsWriteRate() { return iopsWriteRate; } @@ -798,299 +547,153 @@ public class VolumeJoinVO extends BaseViewVO implements ControlledViewEntity { - public void setPoolId(long poolId) { - this.poolId = poolId; - } - - - public String getPoolUuid() { return poolUuid; } - public void setPoolUuid(String poolUuid) { - this.poolUuid = poolUuid; - } - - - public String getPoolName() { return poolName; } - public void setPoolName(String poolName) { - this.poolName = poolName; - } - - - public long getTemplateId() { return templateId; } - public void setTemplateId(long templateId) { - this.templateId = templateId; - } - - - public String getTemplateUuid() { return templateUuid; } - public void setTemplateUuid(String templateUuid) { - this.templateUuid = templateUuid; - } - - - public boolean isExtractable() { return extractable; } - public void setExtractable(boolean extractable) { - this.extractable = extractable; - } - - - public Storage.TemplateType getTemplateType() { return templateType; } - public void setTemplateType(Storage.TemplateType templateType) { - this.templateType = templateType; - } - - - public Long getJobId() { return jobId; } - public void setJobId(Long jobId) { - this.jobId = jobId; - } - - - public String getJobUuid() { return jobUuid; } - public void setJobUuid(String jobUuid) { - this.jobUuid = jobUuid; - } - - - public int getJobStatus() { return jobStatus; } - public void setJobStatus(int jobStatus) { - this.jobStatus = jobStatus; - } - - - public long getTagId() { return tagId; } - public void setTagId(long tagId) { - this.tagId = tagId; - } - - - public String getTagUuid() { return tagUuid; } - public void setTagUuid(String tagUuid) { - this.tagUuid = tagUuid; - } - - - public String getTagKey() { return tagKey; } - public void setTagKey(String tagKey) { - this.tagKey = tagKey; - } - - - public String getTagValue() { return tagValue; } - public void setTagValue(String tagValue) { - this.tagValue = tagValue; - } - - - public long getTagDomainId() { return tagDomainId; } - public void setTagDomainId(long tagDomainId) { - this.tagDomainId = tagDomainId; - } - - - public long getTagAccountId() { return tagAccountId; } - public void setTagAccountId(long tagAccountId) { - this.tagAccountId = tagAccountId; - } - - - public long getTagResourceId() { return tagResourceId; } - public void setTagResourceId(long tagResourceId) { - this.tagResourceId = tagResourceId; - } - - - public String getTagResourceUuid() { return tagResourceUuid; } - public void setTagResourceUuid(String tagResourceUuid) { - this.tagResourceUuid = tagResourceUuid; - } - - - - public TaggedResourceType getTagResourceType() { + public ResourceObjectType getTagResourceType() { return tagResourceType; } - public void setTagResourceType(TaggedResourceType tagResourceType) { - this.tagResourceType = tagResourceType; - } - - - public String getTagCustomer() { return tagCustomer; } - public void setTagCustomer(String tagCustomer) { - this.tagCustomer = tagCustomer; - } - - - public long getDataCenterId() { return dataCenterId; } - public void setDataCenterId(long dataCenterId) { - this.dataCenterId = dataCenterId; - } - - - public String getDataCenterUuid() { return dataCenterUuid; } - public void setDataCenterUuid(String dataCenterUuid) { - this.dataCenterUuid = dataCenterUuid; - } - - - public String getDataCenterName() { return dataCenterName; } - public void setDataCenterName(String dataCenterName) { - this.dataCenterName = dataCenterName; - } - - - public long getPodId() { return podId; } - public void setPodId(long podId) { - this.podId = podId; - } - - - public boolean isSystemUse() { return systemUse; } - public void setSystemUse(boolean systemUse) { - this.systemUse = systemUse; + public String getPath() { + return path; } - - } diff --git a/server/src/com/cloud/api/response/ApiResponseSerializer.java b/server/src/com/cloud/api/response/ApiResponseSerializer.java index 37f350fec0d..9631850ba6e 100644 --- a/server/src/com/cloud/api/response/ApiResponseSerializer.java +++ b/server/src/com/cloud/api/response/ApiResponseSerializer.java @@ -17,7 +17,6 @@ package com.cloud.api.response; import java.lang.reflect.Field; -import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Arrays; @@ -273,47 +272,6 @@ public class ApiResponseSerializer { } } - private static Method getGetMethod(Object o, String propName) { - Method method = null; - String methodName = getGetMethodName("get", propName); - try { - method = o.getClass().getMethod(methodName); - } catch (SecurityException e1) { - s_logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); - } catch (NoSuchMethodException e1) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName - + ", will check is-prefixed method to see if it is boolean property"); - } - } - - if (method != null) - return method; - - methodName = getGetMethodName("is", propName); - try { - method = o.getClass().getMethod(methodName); - } catch (SecurityException e1) { - s_logger.error("Security exception in getting ResponseObject " + o.getClass().getName() + " get method for property: " + propName); - } catch (NoSuchMethodException e1) { - s_logger.warn("ResponseObject " + o.getClass().getName() + " does not have " + methodName + "() method for property: " + propName); - } - return method; - } - - private static String getGetMethodName(String prefix, String fieldName) { - StringBuffer sb = new StringBuffer(prefix); - - if (fieldName.length() >= prefix.length() && fieldName.substring(0, prefix.length()).equals(prefix)) { - return fieldName; - } else { - sb.append(fieldName.substring(0, 1).toUpperCase()); - sb.append(fieldName.substring(1)); - } - - return sb.toString(); - } - private static String escapeSpecialXmlChars(String originalString) { char[] origChars = originalString.toCharArray(); StringBuilder resultString = new StringBuilder(); diff --git a/server/src/com/cloud/capacity/CapacityManagerImpl.java b/server/src/com/cloud/capacity/CapacityManagerImpl.java index b28fe201f78..72905a7c0c4 100755 --- a/server/src/com/cloud/capacity/CapacityManagerImpl.java +++ b/server/src/com/cloud/capacity/CapacityManagerImpl.java @@ -28,7 +28,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; @@ -61,6 +60,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.offering.ServiceOffering; +import com.cloud.org.Cluster; import com.cloud.org.Grouping.AllocationState; import com.cloud.resource.ResourceListener; import com.cloud.resource.ResourceManager; @@ -81,6 +81,9 @@ import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.fsm.StateListener; import com.cloud.vm.UserVmDetailVO; import com.cloud.vm.UserVmVO; @@ -170,100 +173,104 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @DB @Override - public boolean releaseVmCapacity(VirtualMachine vm, boolean moveFromReserved, boolean moveToReservered, Long hostId) { - ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); + public boolean releaseVmCapacity(VirtualMachine vm, final boolean moveFromReserved, final boolean moveToReservered, final Long hostId) { + final ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); CapacityVO capacityMemory = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_MEMORY); Long clusterId = null; if (hostId != null) { - HostVO host = _hostDao.findById(hostId); + HostVO host = _hostDao.findById(hostId); clusterId = host.getClusterId(); } if (capacityCpu == null || capacityMemory == null || svo == null) { return false; } - Transaction txn = Transaction.currentTxn(); try { - txn.start(); - - capacityCpu = _capacityDao.lockRow(capacityCpu.getId(), true); - capacityMemory = _capacityDao.lockRow(capacityMemory.getId(), true); - - long usedCpu = capacityCpu.getUsedCapacity(); - long usedMem = capacityMemory.getUsedCapacity(); - long reservedCpu = capacityCpu.getReservedCapacity(); - long reservedMem = capacityMemory.getReservedCapacity(); - long actualTotalCpu = capacityCpu.getTotalCapacity(); - float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"cpuOvercommitRatio").getValue()); - float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId,"memoryOvercommitRatio").getValue()); - int vmCPU = svo.getCpu() * svo.getSpeed(); - long vmMem = svo.getRamSize() * 1024L * 1024L; - long actualTotalMem = capacityMemory.getTotalCapacity(); - long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); - long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); - s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); - } - - - if (!moveFromReserved) { - /* move resource from used */ - if (usedCpu >= vmCPU) { - capacityCpu.setUsedCapacity(usedCpu - vmCPU); - } - if (usedMem >= vmMem) { - capacityMemory.setUsedCapacity(usedMem - vmMem); - } - - if (moveToReservered) { - if (reservedCpu + vmCPU <= totalCpu) { - capacityCpu.setReservedCapacity(reservedCpu + vmCPU); + final Long clusterIdFinal = clusterId; + final long capacityCpuId = capacityCpu.getId(); + final long capacityMemoryId = capacityMemory.getId(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + CapacityVO capacityCpu = _capacityDao.lockRow(capacityCpuId, true); + CapacityVO capacityMemory = _capacityDao.lockRow(capacityMemoryId, true); + + long usedCpu = capacityCpu.getUsedCapacity(); + long usedMem = capacityMemory.getUsedCapacity(); + long reservedCpu = capacityCpu.getReservedCapacity(); + long reservedMem = capacityMemory.getReservedCapacity(); + long actualTotalCpu = capacityCpu.getTotalCapacity(); + float cpuOvercommitRatio =Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal,"cpuOvercommitRatio").getValue()); + float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterIdFinal,"memoryOvercommitRatio").getValue()); + int vmCPU = svo.getCpu() * svo.getSpeed(); + long vmMem = svo.getRamSize() * 1024L * 1024L; + long actualTotalMem = capacityMemory.getTotalCapacity(); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); + if (s_logger.isDebugEnabled()) { + s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); + s_logger.debug("Hosts's actual total RAM: " + actualTotalMem + " and RAM after applying overprovisioning: " + totalMem); } - if (reservedMem + vmMem <= totalMem) { - capacityMemory.setReservedCapacity(reservedMem + vmMem); + + + if (!moveFromReserved) { + /* move resource from used */ + if (usedCpu >= vmCPU) { + capacityCpu.setUsedCapacity(usedCpu - vmCPU); + } + if (usedMem >= vmMem) { + capacityMemory.setUsedCapacity(usedMem - vmMem); + } + + if (moveToReservered) { + if (reservedCpu + vmCPU <= totalCpu) { + capacityCpu.setReservedCapacity(reservedCpu + vmCPU); + } + if (reservedMem + vmMem <= totalMem) { + capacityMemory.setReservedCapacity(reservedMem + vmMem); + } + } + } else { + if (reservedCpu >= vmCPU) { + capacityCpu.setReservedCapacity(reservedCpu - vmCPU); + } + if (reservedMem >= vmMem) { + capacityMemory.setReservedCapacity(reservedMem - vmMem); + } } - } - } else { - if (reservedCpu >= vmCPU) { - capacityCpu.setReservedCapacity(reservedCpu - vmCPU); - } - if (reservedMem >= vmMem) { - capacityMemory.setReservedCapacity(reservedMem - vmMem); - } - } - s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " - + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" - + capacityCpu.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); + s_logger.debug("release cpu from host: " + hostId + ", old used: " + usedCpu + ",reserved: " + reservedCpu + ", actual total: " + + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used: " + capacityCpu.getUsedCapacity() + ",reserved:" + + capacityCpu.getReservedCapacity() + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); - s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem - + "; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() - + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); + s_logger.debug("release mem from host: " + hostId + ", old used: " + usedMem + ",reserved: " + reservedMem + ", total: " + totalMem + + "; new used: " + capacityMemory.getUsedCapacity() + ",reserved:" + capacityMemory.getReservedCapacity() + + "; movedfromreserved: " + moveFromReserved + ",moveToReservered" + moveToReservered); + + _capacityDao.update(capacityCpu.getId(), capacityCpu); + _capacityDao.update(capacityMemory.getId(), capacityMemory); + } + }); - _capacityDao.update(capacityCpu.getId(), capacityCpu); - _capacityDao.update(capacityMemory.getId(), capacityMemory); - txn.commit(); return true; } catch (Exception e) { s_logger.debug("Failed to transit vm's state, due to " + e.getMessage()); - txn.rollback(); return false; } } @DB @Override - public void allocateVmCapacity(VirtualMachine vm, boolean fromLastHost) { + public void allocateVmCapacity(VirtualMachine vm, final boolean fromLastHost) { - long hostId = vm.getHostId(); + final long hostId = vm.getHostId(); HostVO host = _hostDao.findById(hostId); - long clusterId = host.getClusterId(); - float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio").getValue()); - float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "memoryOvercommitRatio").getValue()); + final long clusterId = host.getClusterId(); + final float cpuOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "cpuOvercommitRatio").getValue()); + final float memoryOvercommitRatio = Float.parseFloat(_clusterDetailsDao.findDetail(clusterId, "memoryOvercommitRatio").getValue()); - ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); + final ServiceOfferingVO svo = _offeringsDao.findById(vm.getServiceOfferingId()); CapacityVO capacityCpu = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_CPU); CapacityVO capacityMem = _capacityDao.findByHostIdType(hostId, CapacityVO.CAPACITY_TYPE_MEMORY); @@ -272,74 +279,78 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, return; } - int cpu = svo.getCpu() * svo.getSpeed(); - long ram = svo.getRamSize() * 1024L * 1024L; + final int cpu = svo.getCpu() * svo.getSpeed(); + final long ram = svo.getRamSize() * 1024L * 1024L; - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - capacityCpu = _capacityDao.lockRow(capacityCpu.getId(), true); - capacityMem = _capacityDao.lockRow(capacityMem.getId(), true); + final long capacityCpuId = capacityCpu.getId(); + final long capacityMemId = capacityMem.getId(); - long usedCpu = capacityCpu.getUsedCapacity(); - long usedMem = capacityMem.getUsedCapacity(); - long reservedCpu = capacityCpu.getReservedCapacity(); - long reservedMem = capacityMem.getReservedCapacity(); - long actualTotalCpu = capacityCpu.getTotalCapacity(); - long actualTotalMem = capacityMem.getTotalCapacity(); - long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); - long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); - if (s_logger.isDebugEnabled()) { - s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + CapacityVO capacityCpu = _capacityDao.lockRow(capacityCpuId, true); + CapacityVO capacityMem = _capacityDao.lockRow(capacityMemId, true); - long freeCpu = totalCpu - (reservedCpu + usedCpu); - long freeMem = totalMem - (reservedMem + usedMem); - - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); - s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); - s_logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram); - } - capacityCpu.setUsedCapacity(usedCpu + cpu); - capacityMem.setUsedCapacity(usedMem + ram); - - if (fromLastHost) { - /* alloc from reserved */ - if (s_logger.isDebugEnabled()) { - s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); - s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); - s_logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram); - } - if (reservedCpu >= cpu && reservedMem >= ram) { - capacityCpu.setReservedCapacity(reservedCpu - cpu); - capacityMem.setReservedCapacity(reservedMem - ram); - } - } else { - /* alloc from free resource */ - if (!((reservedCpu + usedCpu + cpu <= totalCpu) && (reservedMem + usedMem + ram <= totalMem))) { + long usedCpu = capacityCpu.getUsedCapacity(); + long usedMem = capacityMem.getUsedCapacity(); + long reservedCpu = capacityCpu.getReservedCapacity(); + long reservedMem = capacityMem.getReservedCapacity(); + long actualTotalCpu = capacityCpu.getTotalCapacity(); + long actualTotalMem = capacityMem.getTotalCapacity(); + long totalCpu = (long) (actualTotalCpu * cpuOvercommitRatio); + long totalMem = (long) (actualTotalMem * memoryOvercommitRatio); if (s_logger.isDebugEnabled()) { - s_logger.debug("Host doesnt seem to have enough free capacity, but increasing the used capacity anyways, since the VM is already starting on this host "); + s_logger.debug("Hosts's actual total CPU: " + actualTotalCpu + " and CPU after applying overprovisioning: " + totalCpu); } + + long freeCpu = totalCpu - (reservedCpu + usedCpu); + long freeMem = totalMem - (reservedMem + usedMem); + + if (s_logger.isDebugEnabled()) { + s_logger.debug("We are allocating VM, increasing the used capacity of this host:" + hostId); + s_logger.debug("Current Used CPU: " + usedCpu + " , Free CPU:" + freeCpu + " ,Requested CPU: " + cpu); + s_logger.debug("Current Used RAM: " + usedMem + " , Free RAM:" + freeMem + " ,Requested RAM: " + ram); + } + capacityCpu.setUsedCapacity(usedCpu + cpu); + capacityMem.setUsedCapacity(usedMem + ram); + + if (fromLastHost) { + /* alloc from reserved */ + if (s_logger.isDebugEnabled()) { + s_logger.debug("We are allocating VM to the last host again, so adjusting the reserved capacity if it is not less than required"); + s_logger.debug("Reserved CPU: " + reservedCpu + " , Requested CPU: " + cpu); + s_logger.debug("Reserved RAM: " + reservedMem + " , Requested RAM: " + ram); + } + if (reservedCpu >= cpu && reservedMem >= ram) { + capacityCpu.setReservedCapacity(reservedCpu - cpu); + capacityMem.setReservedCapacity(reservedMem - ram); + } + } else { + /* alloc from free resource */ + if (!((reservedCpu + usedCpu + cpu <= totalCpu) && (reservedMem + usedMem + ram <= totalMem))) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Host doesnt seem to have enough free capacity, but increasing the used capacity anyways, since the VM is already starting on this host "); + } + } + } + + s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu + + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" + + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu + + ",alloc_from_last:" + fromLastHost); + + s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem + + ", total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " + capacityMem.getReservedCapacity() + + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost); + + _capacityDao.update(capacityCpu.getId(), capacityCpu); + _capacityDao.update(capacityMem.getId(), capacityMem); } - } - - s_logger.debug("CPU STATS after allocation: for host: " + hostId + ", old used: " + usedCpu + ", old reserved: " + reservedCpu - + ", actual total: " + actualTotalCpu + ", total with overprovisioning: " + totalCpu + "; new used:" - + capacityCpu.getUsedCapacity() + ", reserved:" + capacityCpu.getReservedCapacity() + "; requested cpu:" + cpu - + ",alloc_from_last:" + fromLastHost); - - s_logger.debug("RAM STATS after allocation: for host: " + hostId + ", old used: " + usedMem + ", old reserved: " + reservedMem - + ", total: " + totalMem + "; new used: " + capacityMem.getUsedCapacity() + ", reserved: " + capacityMem.getReservedCapacity() - + "; requested mem: " + ram + ",alloc_from_last:" + fromLastHost); - - _capacityDao.update(capacityCpu.getId(), capacityCpu); - _capacityDao.update(capacityMem.getId(), capacityMem); - txn.commit(); + }); } catch (Exception e) { - txn.rollback(); + s_logger.error("Exception allocating VM capacity", e); return; } } @@ -512,7 +523,7 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, @DB @Override - public void updateCapacityForHost(Host host){ + public void updateCapacityForHost(final Host host){ // prepare the service offerings List offerings = _offeringsDao.listAllIncludingRemoved(); Map offeringsMap = new HashMap(); @@ -625,36 +636,42 @@ public class CapacityManagerImpl extends ManagerBase implements CapacityManager, s_logger.error("Caught exception while updating cpu/memory capacity for the host " +host.getId(), e); } }else { - Transaction txn = Transaction.currentTxn(); - txn.start(); - CapacityVO capacity = new CapacityVO(host.getId(), - host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemory, - host.getTotalMemory(), - CapacityVO.CAPACITY_TYPE_MEMORY); - capacity.setReservedCapacity(reservedMemory); - CapacityState capacityState = CapacityState.Enabled; - if (host.getClusterId() != null) { - cluster = ApiDBUtils.findClusterById(host.getClusterId()); - if (cluster != null) { - capacityState = _configMgr.findClusterAllocationState(cluster) == AllocationState.Disabled ? CapacityState.Disabled - : CapacityState.Enabled; + final long usedMemoryFinal = usedMemory; + final long reservedMemoryFinal = reservedMemory; + final long usedCpuFinal = usedCpu; + final long reservedCpuFinal = reservedCpu; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + CapacityVO capacity = new CapacityVO(host.getId(), + host.getDataCenterId(), host.getPodId(), host.getClusterId(), usedMemoryFinal, + host.getTotalMemory(), + CapacityVO.CAPACITY_TYPE_MEMORY); + capacity.setReservedCapacity(reservedMemoryFinal); + CapacityState capacityState = CapacityState.Enabled; + if (host.getClusterId() != null) { + ClusterVO cluster = ApiDBUtils.findClusterById(host.getClusterId()); + if (cluster != null) { + capacityState = _configMgr.findClusterAllocationState(cluster) == AllocationState.Disabled ? CapacityState.Disabled + : CapacityState.Enabled; + capacity.setCapacityState(capacityState); + } + } + _capacityDao.persist(capacity); + + capacity = new CapacityVO( + host.getId(), + host.getDataCenterId(), + host.getPodId(), + host.getClusterId(), + usedCpuFinal, + host.getCpus().longValue() * host.getSpeed().longValue(), + CapacityVO.CAPACITY_TYPE_CPU); + capacity.setReservedCapacity(reservedCpuFinal); capacity.setCapacityState(capacityState); + _capacityDao.persist(capacity); } - } - _capacityDao.persist(capacity); - - capacity = new CapacityVO( - host.getId(), - host.getDataCenterId(), - host.getPodId(), - host.getClusterId(), - usedCpu, - host.getCpus().longValue() * host.getSpeed().longValue(), - CapacityVO.CAPACITY_TYPE_CPU); - capacity.setReservedCapacity(reservedCpu); - capacity.setCapacityState(capacityState); - _capacityDao.persist(capacity); - txn.commit(); + }); } diff --git a/server/src/com/cloud/configuration/Config.java b/server/src/com/cloud/configuration/Config.java index 8ca595b14d0..3fdc3439092 100755 --- a/server/src/com/cloud/configuration/Config.java +++ b/server/src/com/cloud/configuration/Config.java @@ -79,7 +79,8 @@ public enum Config { CreatePrivateTemplateFromSnapshotWait("Storage", UserVmManager.class, Integer.class, "create.private.template.from.snapshot.wait", "10800", "In second, timeout for CreatePrivateTemplateFromSnapshotCommand", null), BackupSnapshotWait( "Storage", StorageManager.class, Integer.class, "backup.snapshot.wait", "21600", "In second, timeout for BackupSnapshotCommand", null), - + HAStorageMigration("Storage", ManagementServer.class, Boolean.class, "enable.ha.storage.migration", "true", "Enable/disable storage migration across primary storage during HA", null), + // Network NetworkLBHaproxyStatsVisbility("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.visibility", "global", "Load Balancer(haproxy) stats visibilty, the value can be one of the following six parameters : global,guest-network,link-local,disabled,all,default", null), NetworkLBHaproxyStatsUri("Network", ManagementServer.class, String.class, "network.loadbalancer.haproxy.stats.uri","/admin?stats","Load Balancer(haproxy) uri.",null), @@ -150,7 +151,7 @@ public enum Config { ExtractURLExpirationInterval("Advanced", ManagementServer.class, Integer.class, "extract.url.expiration.interval", "14400", "The life of an extract URL after which it is deleted ", null), HostStatsInterval("Advanced", ManagementServer.class, Integer.class, "host.stats.interval", "60000", "The interval (in milliseconds) when host stats are retrieved from agents.", null), HostRetry("Advanced", AgentManager.class, Integer.class, "host.retry", "2", "Number of times to retry hosts for creating a volume", null), - IntegrationAPIPort("Advanced", ManagementServer.class, Integer.class, "integration.api.port", null, "Defaul API port", null), + IntegrationAPIPort("Advanced", ManagementServer.class, Integer.class, "integration.api.port", null, "Default API port", null), InvestigateRetryInterval("Advanced", HighAvailabilityManager.class, Integer.class, "investigate.retry.interval", "60", "Time (in seconds) between VM pings when agent is disconnected", null), MigrateRetryInterval("Advanced", HighAvailabilityManager.class, Integer.class, "migrate.retry.interval", "120", "Time (in seconds) between migration retries", null), RouterCpuMHz("Advanced", NetworkOrchestrationService.class, Integer.class, "router.cpu.mhz", String.valueOf(VpcVirtualNetworkApplianceManager.DEFAULT_ROUTER_CPU_MHZ), "Default CPU speed (MHz) for router VM.", null), @@ -245,6 +246,8 @@ public enum Config { XenHeartBeatInterval("Advanced", ManagementServer.class, Integer.class, "xen.heartbeat.interval", "60", "heartbeat to use when implementing XenServer Self Fencing", null), XenGuestNetwork("Hidden", ManagementServer.class, String.class, "xen.guest.network.device", null, "Specify for guest network name label", null), XenMaxNics("Advanced", AgentManager.class, Integer.class, "xen.nics.max", "7", "Maximum allowed nics for Vms created on Xen", null), + XenPVdriverVersion("Advanced", ManagementServer.class, String.class, "xen.pvdriver.version", "xenserver61", "default Xen PV driver version for registered template, valid value:xenserver56,xenserver61 ", "xenserver56,xenserver61"), + // VMware VmwareUseNexusVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.nexus.vswitch", "false", "Enable/Disable Cisco Nexus 1000v vSwitch in VMware environment", null), VmwareUseDVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.dvswitch", "false", "Enable/Disable Nexus/Vmware dvSwitch in VMware environment", null), @@ -376,6 +379,8 @@ public enum Config { // object store S3EnableRRS("Advanced", ManagementServer.class, Boolean.class, "s3.rrs.enabled", "false", "enable s3 reduced redundancy storage", null), + S3MaxSingleUploadSize("Advanced", ManagementServer.class, Integer.class, "s3.singleupload.max.size", "5", "The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. " + + "If it is set to -1, then it means always use single-part upload to upload object to S3. ", null), // Ldap LdapBasedn("Advanced", ManagementServer.class, String.class, "ldap.basedn", null, "Sets the basedn for LDAP", null), diff --git a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java index 0c7374efe3e..b7ffbfce3db 100755 --- a/server/src/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/com/cloud/configuration/ConfigurationManagerImpl.java @@ -35,8 +35,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - import org.apache.cloudstack.acl.SecurityChecker; import org.apache.cloudstack.affinity.AffinityGroup; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -84,6 +82,7 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; +import org.apache.log4j.Logger; import com.cloud.alert.AlertManager; import com.cloud.api.ApiDBUtils; @@ -96,10 +95,10 @@ import com.cloud.dc.ClusterDetailsVO; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenter; import com.cloud.dc.DataCenter.NetworkType; +import com.cloud.dc.DataCenterDetailVO; import com.cloud.dc.DataCenterIpAddressVO; import com.cloud.dc.DataCenterLinkLocalIpAddressVO; import com.cloud.dc.DataCenterVO; -import com.cloud.dc.DcDetailVO; import com.cloud.dc.DedicatedResourceVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.Pod; @@ -110,9 +109,9 @@ import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.dc.dao.DataCenterIpAddressDao; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; -import com.cloud.dc.dao.DcDetailsDao; import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.dc.dao.PodVlanMapDao; @@ -168,6 +167,7 @@ import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; import com.cloud.server.ConfigurationServer; import com.cloud.server.ManagementService; +import com.cloud.service.ServiceOfferingDetailsVO; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.service.dao.ServiceOfferingDetailsDao; @@ -192,6 +192,10 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.NicIpAlias; @@ -250,7 +254,6 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject AlertManager _alertMgr; // @com.cloud.utils.component.Inject(adapter = SecurityChecker.class) - @Inject List _secChecker; @Inject @@ -282,7 +285,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Inject ConfigurationServer _configServer; @Inject - DcDetailsDao _dcDetailsDao; + DataCenterDetailsDao _dcDetailsDao; @Inject ClusterDetailsDao _clusterDetailsDao; @Inject @@ -448,14 +451,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (zone == null) { throw new InvalidParameterValueException("unable to find zone by id " + resourceId); } - DcDetailVO dcDetailVO = _dcDetailsDao.findDetail(resourceId, name.toLowerCase()); - if (dcDetailVO == null) { - dcDetailVO = new DcDetailVO(resourceId, name, value); - _dcDetailsDao.persist(dcDetailVO); - } else { - dcDetailVO.setValue(value); - _dcDetailsDao.update(dcDetailVO.getId(), dcDetailVO); - } + _dcDetailsDao.addDetail(resourceId, name, value); break; case Cluster: ClusterVO cluster = _clusterDao.findById(resourceId); @@ -477,15 +473,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (pool == null) { throw new InvalidParameterValueException("unable to find storage pool by id " + resourceId); } - StoragePoolDetailVO storagePoolDetailVO = _storagePoolDetailsDao.findDetail(resourceId, name); - if (storagePoolDetailVO == null) { - storagePoolDetailVO = new StoragePoolDetailVO(resourceId, name, value); - _storagePoolDetailsDao.persist(storagePoolDetailVO); + _storagePoolDetailsDao.addDetail(resourceId, name, value); - } else { - storagePoolDetailVO.setValue(value); - _storagePoolDetailsDao.update(storagePoolDetailVO.getId(), storagePoolDetailVO); - } break; case Account: @@ -509,7 +498,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } // Execute all updates in a single transaction - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); if (!_configDao.update(name, category, value)) { @@ -907,7 +896,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati selectSql += " and removed IS NULL"; } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); stmt.setLong(1, podId); @@ -993,9 +982,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB public boolean deletePod(DeletePodCmd cmd) { - Long podId = cmd.getId(); - - Transaction txn = Transaction.currentTxn(); + final Long podId = cmd.getId(); // Make sure the pod exists if (!validPod(podId)) { @@ -1004,10 +991,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati checkIfPodIsDeletable(podId); - HostPodVO pod = _podDao.findById(podId); - - txn.start(); + final HostPodVO pod = _podDao.findById(podId); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { // Delete private ip addresses for the pod if there are any List privateIps = _privateIpAddressDao.listByPodIdDcId(Long.valueOf(podId), pod.getDataCenterId()); @@ -1047,7 +1035,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (dr != null) { _dedicatedDao.remove(dr.getId()); } - txn.commit(); + } + }); return true; } @@ -1060,12 +1049,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB - public Pod editPod(long id, String name, String startIp, String endIp, String gateway, String netmask, + public Pod editPod(final long id, String name, String startIp, String endIp, String gateway, String netmask, String allocationStateStr) { // verify parameters - HostPodVO pod = _podDao.findById(id); - ; + final HostPodVO pod = _podDao.findById(id); + if (pod == null) { throw new InvalidParameterValueException("Unable to find pod by id " + id); } @@ -1147,23 +1136,37 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } // Verify pod's attributes - String cidr = NetUtils.ipAndNetMaskToCidr(gateway, netmask); + final String cidr = NetUtils.ipAndNetMaskToCidr(gateway, netmask); boolean checkForDuplicates = !oldPodName.equals(name); checkPodAttributes(id, name, pod.getDataCenterId(), gateway, cidr, startIp, endIp, allocationStateStr, checkForDuplicates, false); - Transaction txn = Transaction.currentTxn(); try { - txn.start(); + + final String[] existingPodIpRangeFinal = existingPodIpRange; + final String[] leftRangeToAddFinal = leftRangeToAdd; + final String[] rightRangeToAddFinal = rightRangeToAdd; + final boolean allowToDownsizeFinal = allowToDownsize; + final String allocationStateStrFinal = allocationStateStr; + final String startIpFinal = startIp; + final String endIpFinal = endIp; + final String nameFinal = name; + final String gatewayFinal = gateway; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { long zoneId = pod.getDataCenterId(); - if (!allowToDownsize) { - if (leftRangeToAdd != null) { - _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), leftRangeToAdd[0], leftRangeToAdd[1]); + String startIp = startIpFinal; + String endIp = endIpFinal; + + if (!allowToDownsizeFinal) { + if (leftRangeToAddFinal != null) { + _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), leftRangeToAddFinal[0], leftRangeToAddFinal[1]); } - if (rightRangeToAdd != null) { - _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), rightRangeToAdd[0], rightRangeToAdd[1]); + if (rightRangeToAddFinal != null) { + _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), rightRangeToAddFinal[0], rightRangeToAddFinal[1]); } } else { @@ -1172,34 +1175,34 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // add the new one if (startIp == null) { - startIp = existingPodIpRange[0]; + startIp = existingPodIpRangeFinal[0]; } if (endIp == null) { - endIp = existingPodIpRange[1]; + endIp = existingPodIpRangeFinal[1]; } _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIp); } - pod.setName(name); + pod.setName(nameFinal); pod.setDataCenterId(zoneId); - pod.setGateway(gateway); + pod.setGateway(gatewayFinal); pod.setCidrAddress(getCidrAddress(cidr)); pod.setCidrSize(getCidrSize(cidr)); String ipRange = startIp + "-" + endIp; pod.setDescription(ipRange); Grouping.AllocationState allocationState = null; - if (allocationStateStr != null && !allocationStateStr.isEmpty()) { - allocationState = Grouping.AllocationState.valueOf(allocationStateStr); - _capacityDao.updateCapacityState(null, pod.getId(), null, null, allocationStateStr); + if (allocationStateStrFinal != null && !allocationStateStrFinal.isEmpty()) { + allocationState = Grouping.AllocationState.valueOf(allocationStateStrFinal); + _capacityDao.updateCapacityState(null, pod.getId(), null, null, allocationStateStrFinal); pod.setAllocationState(allocationState); } _podDao.update(id, pod); - - txn.commit(); + } + }); } catch (Exception e) { s_logger.error("Unable to edit pod due to " + e.getMessage(), e); throw new CloudRuntimeException("Failed to edit pod. Please contact Cloud Support."); @@ -1222,7 +1225,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB - public HostPodVO createPod(long userId, String podName, long zoneId, String gateway, String cidr, String startIp, + public HostPodVO createPod(long userId, String podName, final long zoneId, String gateway, String cidr, final String startIp, String endIp, String allocationStateStr, boolean skipGatewayOverlapCheck) { // Check if the zone is valid @@ -1261,21 +1264,23 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Start ip is required parameter"); } - HostPodVO pod = new HostPodVO(podName, zoneId, gateway, cidrAddress, cidrSize, ipRange); + final HostPodVO podFinal = new HostPodVO(podName, zoneId, gateway, cidrAddress, cidrSize, ipRange); Grouping.AllocationState allocationState = null; if (allocationStateStr != null && !allocationStateStr.isEmpty()) { allocationState = Grouping.AllocationState.valueOf(allocationStateStr); - pod.setAllocationState(allocationState); + podFinal.setAllocationState(allocationState); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + final String endIpFinal = endIp; + return Transaction.execute(new TransactionCallback() { + @Override + public HostPodVO doInTransaction(TransactionStatus status) { - pod = _podDao.persist(pod); + HostPodVO pod = _podDao.persist(podFinal); if (startIp != null) { - _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIp); + _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIpFinal); } String[] linkLocalIpRanges = getLinkLocalIPRange(); @@ -1283,10 +1288,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati _zoneDao.addLinkLocalIpAddress(zoneId, pod.getId(), linkLocalIpRanges[0], linkLocalIpRanges[1]); } - txn.commit(); - return pod; } + }); + } @DB protected void checkIfZoneIsDeletable(long zoneId) { @@ -1370,7 +1375,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati selectSql += " AND state != '" + VirtualMachine.State.Expunging.toString() + "'"; } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); stmt.setLong(1, zoneId); @@ -1505,11 +1510,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @ActionEvent(eventType = EventTypes.EVENT_ZONE_DELETE, eventDescription = "deleting zone", async = false) public boolean deleteZone(DeleteZoneCmd cmd) { - Transaction txn = Transaction.currentTxn(); - boolean success = false; - Long userId = CallContext.current().getCallingUserId(); - Long zoneId = cmd.getId(); + final Long zoneId = cmd.getId(); if (userId == null) { userId = Long.valueOf(User.UID_SYSTEM); @@ -1522,15 +1524,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati checkIfZoneIsDeletable(zoneId); - txn.start(); - + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { // delete vlans for this zone List vlans = _vlanDao.listByZone(zoneId); for (VlanVO vlan : vlans) { _vlanDao.remove(vlan.getId()); } - success = _zoneDao.remove(zoneId); + boolean success = _zoneDao.remove(zoneId); if (success) { // delete all capacity records for the zone @@ -1550,10 +1553,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - txn.commit(); - return success; - + } + }); } @Override @@ -1561,7 +1563,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @ActionEvent(eventType = EventTypes.EVENT_ZONE_EDIT, eventDescription = "editing zone", async = false) public DataCenter editZone(UpdateZoneCmd cmd) { // Parameter validation as from execute() method in V1 - Long zoneId = cmd.getId(); + final Long zoneId = cmd.getId(); String zoneName = cmd.getZoneName(); String dns1 = cmd.getDns1(); String dns2 = cmd.getDns2(); @@ -1571,14 +1573,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String internalDns2 = cmd.getInternalDns2(); String guestCidr = cmd.getGuestCidrAddress(); List dnsSearchOrder = cmd.getDnsSearchOrder(); - Boolean isPublic = cmd.isPublic(); - String allocationStateStr = cmd.getAllocationState(); - String dhcpProvider = cmd.getDhcpProvider(); + final Boolean isPublic = cmd.isPublic(); + final String allocationStateStr = cmd.getAllocationState(); + final String dhcpProvider = cmd.getDhcpProvider(); Map detailsMap = cmd.getDetails(); String networkDomain = cmd.getDomain(); Boolean localStorageEnabled = cmd.getLocalStorageEnabled(); - Map newDetails = new HashMap(); + final Map newDetails = new HashMap(); if (detailsMap != null) { Collection zoneDetailsCollection = detailsMap.values(); Iterator iter = zoneDetailsCollection.iterator(); @@ -1612,7 +1614,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati newDetails.put(ZoneConfig.DnsSearchOrder.getName(), StringUtils.join(dnsSearchOrder, ",")); } - DataCenterVO zone = _zoneDao.findById(zoneId); + final DataCenterVO zone = _zoneDao.findById(zoneId); if (zone == null) { throw new InvalidParameterValueException("unable to find zone by id " + zoneId); } @@ -1699,9 +1701,9 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { Map updatedDetails = new HashMap(); _zoneDao.loadDetails(zone); if (zone.getDetails() != null) { @@ -1777,15 +1779,16 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (!_zoneDao.update(zoneId, zone)) { throw new CloudRuntimeException("Failed to edit zone. Please contact Cloud Support."); } + } + }); - txn.commit(); return zone; } @Override @DB public DataCenterVO createZone(long userId, String zoneName, String dns1, String dns2, String internalDns1, - String internalDns2, String guestCidr, String domain, Long domainId, NetworkType zoneType, + String internalDns2, String guestCidr, String domain, final Long domainId, NetworkType zoneType, String allocationStateStr, String networkDomain, boolean isSecurityGroupEnabled, boolean isLocalStorageEnabled, String ip6Dns1, String ip6Dns2) { @@ -1810,23 +1813,25 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati byte[] bytes = (zoneName + System.currentTimeMillis()).getBytes(); String zoneToken = UUID.nameUUIDFromBytes(bytes).toString(); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); + // Create the new zone in the database - DataCenterVO zone = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, + final DataCenterVO zoneFinal = new DataCenterVO(zoneName, null, dns1, dns2, internalDns1, internalDns2, guestCidr, domain, domainId, zoneType, zoneToken, networkDomain, isSecurityGroupEnabled, isLocalStorageEnabled, ip6Dns1, ip6Dns2); if (allocationStateStr != null && !allocationStateStr.isEmpty()) { Grouping.AllocationState allocationState = Grouping.AllocationState.valueOf(allocationStateStr); - zone.setAllocationState(allocationState); + zoneFinal.setAllocationState(allocationState); } else { // Zone will be disabled since 3.0. Admin should enable it after // physical network and providers setup. - zone.setAllocationState(Grouping.AllocationState.Disabled); + zoneFinal.setAllocationState(Grouping.AllocationState.Disabled); } - zone = _zoneDao.persist(zone); + + return Transaction.execute(new TransactionCallback() { + @Override + public DataCenterVO doInTransaction(TransactionStatus status) { + DataCenterVO zone = _zoneDao.persist(zoneFinal); if (domainId != null) { // zone is explicitly dedicated to this domain // create affinity group associated and dedicate the zone. @@ -1838,15 +1843,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Create default system networks createDefaultSystemNetworks(zone.getId()); - txn.commit(); + return zone; - } catch (Exception ex) { - txn.rollback(); - s_logger.warn("Exception: ", ex); - throw new CloudRuntimeException("Fail to create a network"); - } finally { - txn.close(); } + }); } private AffinityGroup createDedicatedAffinityGroup(String affinityGroupName, Long domainId, Long accountId) { @@ -2100,7 +2100,12 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if ((offering = _serviceOfferingDao.persist(offering)) != null) { if (details != null) { - _serviceOfferingDetailsDao.persist(offering.getId(), details); + List detailsVO = new ArrayList(); + for (String key : details.keySet()) { + detailsVO.add(new ServiceOfferingDetailsVO(offering.getId(), key, details.get(key))); + } + + _serviceOfferingDetailsDao.saveDetails(detailsVO); } CallContext.current().setEventDetails("Service offering id=" + offering.getId()); return offering; @@ -2311,6 +2316,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati String name = cmd.getDiskOfferingName(); String displayText = cmd.getDisplayText(); Integer sortKey = cmd.getSortKey(); + Boolean displayDiskOffering = cmd.getDisplayOffering(); // Check if diskOffering exists DiskOffering diskOfferingHandle = _entityMgr.findById(DiskOffering.class, diskOfferingId); @@ -2319,7 +2325,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Unable to find disk offering by id " + diskOfferingId); } - boolean updateNeeded = (name != null || displayText != null || sortKey != null); + boolean updateNeeded = (name != null || displayText != null || sortKey != null || displayDiskOffering != null); if (!updateNeeded) { return _diskOfferingDao.findById(diskOfferingId); } @@ -2338,6 +2344,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati diskOffering.setSortKey(sortKey); } + if(displayDiskOffering != null){ + diskOffering.setDisplayOffering(displayDiskOffering); + } + // Note: tag editing commented out for now;keeping the code intact, // might need to re-enable in next releases // if (tags != null) @@ -2658,8 +2668,21 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (ipv4) { checkOverlapPrivateIpRange(zoneId, startIP, endIP); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + + return commitVlan(zoneId, podId, startIP, endIP, newVlanGateway, newVlanNetmask, vlanId, + forVirtualNetwork, networkId, physicalNetworkId, startIPv6, endIPv6, ip6Gateway, ip6Cidr, vlanOwner, + network, sameSubnet); + } + + private Vlan commitVlan(final Long zoneId, final Long podId, final String startIP, final String endIP, final String newVlanGatewayFinal, + final String newVlanNetmaskFinal, final String vlanId, final Boolean forVirtualNetwork, final Long networkId, final Long physicalNetworkId, + final String startIPv6, final String endIPv6, final String ip6Gateway, final String ip6Cidr, final Account vlanOwner, final Network network, + final Pair> sameSubnet) { + return Transaction.execute(new TransactionCallback() { + @Override + public Vlan doInTransaction(TransactionStatus status) { + String newVlanNetmask = newVlanNetmaskFinal; + String newVlanGateway = newVlanGatewayFinal; if ((sameSubnet == null || sameSubnet.first() == false) && (network.getTrafficType()== TrafficType.Guest) && (network.getGuestType() == GuestType.Shared) && (_vlanDao.listVlansByNetworkId(networkId) != null)) { Map dhcpCapabilities = _networkSvc.getNetworkOfferingServiceCapabilities(_networkOfferingDao.findById(network.getNetworkOfferingId()), Service.Dhcp); @@ -2679,11 +2702,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati endIP, newVlanGateway, newVlanNetmask, vlanId, vlanOwner, startIPv6, endIPv6, ip6Gateway, ip6Cidr); // create an entry in the nic_secondary table. This will be the new // gateway that will be configured on the corresponding routervm. - - txn.commit(); - return vlan; } + }); + } public NetUtils.supersetOrSubset checkIfSubsetOrSuperset(String newVlanGateway, String newVlanNetmask, VlanVO vlan, String startIP, String endIP) { @@ -2886,7 +2908,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } // Make sure the netmask is valid - if (!NetUtils.isValidIp(vlanNetmask)) { + if (!NetUtils.isValidNetmask(vlanNetmask)) { throw new InvalidParameterValueException("Please specify a valid netmask"); } } @@ -2903,6 +2925,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati if (ipv4) { String newCidr = NetUtils.getCidrFromGatewayAndNetmask(vlanGateway, vlanNetmask); + //Make sure start and end ips are with in the range of cidr calculated for this gateway and netmask { + if(!NetUtils.isIpWithtInCidrRange(vlanGateway, newCidr) || !NetUtils.isIpWithtInCidrRange(startIP, newCidr) || !NetUtils.isIpWithtInCidrRange(endIP, newCidr)) { + throw new InvalidParameterValueException("Please specify a valid IP range or valid netmask or valid gateway"); + } + // Check if the new VLAN's subnet conflicts with the guest network // in // the specified zone (guestCidr is null for basic zone) @@ -2918,6 +2945,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati // Check if there are any errors with the IP range checkPublicIpRangeErrors(zoneId, vlanId, vlanGateway, vlanNetmask, startIP, endIP); + checkConflictsWithPortableIpRange(zoneId, vlanId, vlanGateway, vlanNetmask, startIP, endIP); + // Throw an exception if this subnet overlaps with subnet on other VLAN, // if this is ip range extension, gateway, network mask should be same and ip range should not overlap @@ -3010,9 +3039,19 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } // Everything was fine, so persist the VLAN - Transaction txn = Transaction.currentTxn(); - txn.start(); + VlanVO vlan = commitVlanAndIpRange(zoneId, networkId, physicalNetworkId, podId, startIP, endIP, vlanGateway, + vlanNetmask, vlanId, vlanOwner, vlanIp6Gateway, vlanIp6Cidr, ipv4, zone, vlanType, ipv6Range, ipRange); + return vlan; + } + + private VlanVO commitVlanAndIpRange(final long zoneId, final long networkId, final long physicalNetworkId, final Long podId, + final String startIP, final String endIP, final String vlanGateway, final String vlanNetmask, final String vlanId, final Account vlanOwner, + final String vlanIp6Gateway, final String vlanIp6Cidr, final boolean ipv4, final DataCenterVO zone, final VlanType vlanType, + final String ipv6Range, final String ipRange) { + return Transaction.execute(new TransactionCallback() { + @Override + public VlanVO doInTransaction(TransactionStatus status) { VlanVO vlan = new VlanVO(vlanType, vlanId, vlanGateway, vlanNetmask, zone.getId(), ipRange, networkId, physicalNetworkId, vlanIp6Gateway, vlanIp6Cidr, ipv6Range); s_logger.debug("Saving vlan range " + vlan); @@ -3047,15 +3086,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati PodVlanMapVO podVlanMapVO = new PodVlanMapVO(podId, vlan.getId()); _podVlanMapDao.persist(podVlanMapVO); } - - txn.commit(); - return vlan; + } + }); + } @Override @DB - public boolean deleteVlanAndPublicIpRange(long userId, long vlanDbId, Account caller) { + public boolean deleteVlanAndPublicIpRange(long userId, final long vlanDbId, Account caller) { VlanVO vlanRange = _vlanDao.findById(vlanDbId); if (vlanRange == null) { throw new InvalidParameterValueException("Please specify a valid IP range id."); @@ -3142,12 +3181,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { _publicIpAddressDao.deletePublicIPRange(vlanDbId); _vlanDao.expunge(vlanDbId); - txn.commit(); + } + }); return true; } @@ -3225,15 +3265,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - // Create an AccountVlanMapVO entry AccountVlanMapVO accountVlanMapVO = new AccountVlanMapVO(vlanOwner.getId(), vlan.getId()); _accountVlanMapDao.persist(accountVlanMapVO); - txn.commit(); - // generate usage event for dedication of every ip address in the range for (IPAddressVO ip : ips) { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_IP_ASSIGN, vlanOwner.getId(), ip.getDataCenterId(), @@ -3328,16 +3363,20 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } @DB - protected boolean savePublicIPRange(String startIP, String endIP, long zoneId, long vlanDbId, long sourceNetworkid, - long physicalNetworkId) { - long startIPLong = NetUtils.ip2Long(startIP); - long endIPLong = NetUtils.ip2Long(endIP); - Transaction txn = Transaction.currentTxn(); - txn.start(); + protected boolean savePublicIPRange(String startIP, String endIP, final long zoneId, final long vlanDbId, final long sourceNetworkid, + final long physicalNetworkId) { + final long startIPLong = NetUtils.ip2Long(startIP); + final long endIPLong = NetUtils.ip2Long(endIP); + + List problemIps = Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { IPRangeConfig config = new IPRangeConfig(); - List problemIps = config.savePublicIPRange(txn, startIPLong, endIPLong, zoneId, vlanDbId, + return config.savePublicIPRange(TransactionLegacy.currentTxn(), startIPLong, endIPLong, zoneId, vlanDbId, sourceNetworkid, physicalNetworkId); - txn.commit(); + } + }); + return problemIps != null && problemIps.size() == 0; } @@ -3378,6 +3417,26 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } + private void checkConflictsWithPortableIpRange(long zoneId, String vlanId, String vlanGateway, String vlanNetmask, + String startIP, String endIP) { + // check and throw exception if there is portable IP range that overlaps with public ip range being configured + if (checkOverlapPortableIpRange(_regionDao.getRegionId(), startIP, endIP)) { + throw new InvalidParameterValueException("Ip range: " + startIP + "-" + endIP + + " overlaps with a portable" + " IP range already configured in the region " + _regionDao.getRegionId()); + } + + // verify and throw exception if the VLAN Id is used by any portable IP range + List existingPortableIPRanges = _portableIpRangeDao.listByRegionId(_regionDao.getRegionId()); + if (existingPortableIPRanges != null && !existingPortableIPRanges.isEmpty()) { + for (PortableIpRangeVO portableIpRange : existingPortableIPRanges) { + if (portableIpRange.getVlanTag().equalsIgnoreCase(vlanId)) { + throw new InvalidParameterValueException("The VLAN tag " + vlanId + + " is already being used for portable ip range in this region"); + } + } + } + } + private String getCidrAddress(String cidr) { String[] cidrPair = cidr.split("\\/"); return cidrPair[0]; @@ -3615,6 +3674,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati Map detailsStr = cmd.getDetails(); Boolean egressDefaultPolicy = cmd.getEgressDefaultPolicy(); Integer maxconn = null; + boolean enableKeepAlive = false; // Verify traffic type for (TrafficType tType : TrafficType.values()) { @@ -3770,12 +3830,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } validateLoadBalancerServiceCapabilities(lbServiceCapabilityMap); - if (!serviceProviderMap.containsKey(Service.Lb) && lbServiceCapabilityMap != null && !lbServiceCapabilityMap.isEmpty()) { + if (lbServiceCapabilityMap != null && !lbServiceCapabilityMap.isEmpty()) { maxconn = cmd.getMaxconnections(); if (maxconn == null) { maxconn=Integer.parseInt(_configDao.getValue(Config.NetworkLBHaproxyMaxConn.key())); } } + if(cmd.getKeepAliveEnabled() != null && cmd.getKeepAliveEnabled()) { + enableKeepAlive = true; + } // validate the Source NAT service capabilities specified in the network // offering @@ -3834,7 +3897,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati NetworkOffering offering = createNetworkOffering(name, displayText, trafficType, tags, specifyVlan, availability, networkRate, serviceProviderMap, false, guestType, false, serviceOfferingId, conserveMode, serviceCapabilityMap, - specifyIpRanges, isPersistent, details, egressDefaultPolicy, maxconn); + specifyIpRanges, isPersistent, details, egressDefaultPolicy, maxconn, enableKeepAlive); CallContext.current().setEventDetails(" Id: " + offering.getId() + " Name: " + name); return offering; } @@ -3924,54 +3987,39 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati void validateStaticNatServiceCapablities(Map staticNatServiceCapabilityMap) { if (staticNatServiceCapabilityMap != null && !staticNatServiceCapabilityMap.isEmpty()) { - if (staticNatServiceCapabilityMap.keySet().size() > 2) { - throw new InvalidParameterValueException("Only " + Capability.ElasticIp.getName() + " and " - + Capability.AssociatePublicIP.getName() - + " capabilitiy can be sepcified for static nat service"); - } - boolean eipEnabled = false; - boolean eipDisabled = false; boolean associatePublicIP = true; for (Capability capability : staticNatServiceCapabilityMap.keySet()) { - String value = staticNatServiceCapabilityMap.get(capability); + String value = staticNatServiceCapabilityMap.get(capability).toLowerCase(); + if (! (value.contains("true") ^ value.contains("false"))) { + throw new InvalidParameterValueException("Unknown specified value (" + value + ") for " + + capability); + } if (capability == Capability.ElasticIp) { eipEnabled = value.contains("true"); - eipDisabled = value.contains("false"); - if (!eipEnabled && !eipDisabled) { - throw new InvalidParameterValueException("Unknown specified value for " - + Capability.ElasticIp.getName()); - } } else if (capability == Capability.AssociatePublicIP) { - if (value.contains("true")) { - associatePublicIP = true; - } else if (value.contains("false")) { - associatePublicIP = false; - } else { - throw new InvalidParameterValueException("Unknown specified value for " - + Capability.AssociatePublicIP.getName()); - } + associatePublicIP = value.contains("true"); } else { throw new InvalidParameterValueException("Only " + Capability.ElasticIp.getName() + " and " + Capability.AssociatePublicIP.getName() + " capabilitiy can be sepcified for static nat service"); } - if (eipDisabled && associatePublicIP) { + } + if ((! eipEnabled) && associatePublicIP) { throw new InvalidParameterValueException("Capability " + Capability.AssociatePublicIP.getName() + " can only be set when capability " + Capability.ElasticIp.getName() + " is true"); } } } - } @Override @DB public NetworkOfferingVO createNetworkOffering(String name, String displayText, TrafficType trafficType, String tags, boolean specifyVlan, Availability availability, Integer networkRate, - Map> serviceProviderMap, boolean isDefault, Network.GuestType type, + final Map> serviceProviderMap, boolean isDefault, Network.GuestType type, boolean systemOnly, Long serviceOfferingId, boolean conserveMode, Map> serviceCapabilityMap, boolean specifyIpRanges, boolean isPersistent, - Map details, boolean egressDefaultPolicy, Integer maxconn) { + final Map details, boolean egressDefaultPolicy, final Integer maxconn, final boolean enableKeepAlive) { String multicastRateStr = _configDao.getValue("multicast.throttling.rate"); int multicastRate = ((multicastRateStr == null) ? 10 : Integer.parseInt(multicastRateStr)); @@ -4111,13 +4159,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati publicLb = true; } - NetworkOfferingVO offering = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan, + final NetworkOfferingVO offeringFinal = new NetworkOfferingVO(name, displayText, trafficType, systemOnly, specifyVlan, networkRate, multicastRate, isDefault, availability, tags, type, conserveMode, dedicatedLb, sharedSourceNat, redundantRouter, elasticIp, elasticLb, specifyIpRanges, inline, isPersistent, associatePublicIp, publicLb, internalLb, egressDefaultPolicy); if (serviceOfferingId != null) { - offering.setServiceOfferingId(serviceOfferingId); + offeringFinal.setServiceOfferingId(serviceOfferingId); } // validate the details @@ -4125,11 +4173,15 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati validateNtwkOffDetails(details, serviceProviderMap); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public NetworkOfferingVO doInTransaction(TransactionStatus status) { + NetworkOfferingVO offering = offeringFinal; + // 1) create network offering object s_logger.debug("Adding network offering " + offering); offering.setConcurrentConnections(maxconn); + offering.setKeepAliveEnabled(enableKeepAlive); offering = _networkOfferingDao.persist(offering, details); // 2) populate services and providers if (serviceProviderMap != null) { @@ -4162,10 +4214,10 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati } } - txn.commit(); - return offering; } + }); + } protected void validateNtwkOffDetails(Map details, Map> serviceProviderMap) { for (Detail detail : details.keySet()) { @@ -4691,27 +4743,29 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @Override @DB - public boolean releaseAccountSpecificVirtualRanges(long accountId) { - List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); - boolean result = true; + public boolean releaseAccountSpecificVirtualRanges(final long accountId) { + final List maps = _accountVlanMapDao.listAccountVlanMapsByAccount(accountId); if (maps != null && !maps.isEmpty()) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { for (AccountVlanMapVO map : maps) { if (!releasePublicIpRange(map.getVlanDbId(), _accountMgr.getSystemUser().getId(), _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM))) { - result = false; + throw new CloudRuntimeException("Failed to release account specific virtual ip ranges for account id=" + accountId); } } - if (result) { - txn.commit(); - } else { - s_logger.error("Failed to release account specific virtual ip ranges for account id=" + accountId); + } + }); + } catch ( CloudRuntimeException e ) { + s_logger.error(e); + return false; } } else { s_logger.trace("Account id=" + accountId + " has no account specific virtual ip ranges, nothing to release"); } - return result; + return true; } @Override @@ -4782,14 +4836,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati @ActionEvent(eventType = EventTypes.EVENT_PORTABLE_IP_RANGE_CREATE, eventDescription = "creating portable ip range", async = false) public PortableIpRange createPortableIpRange(CreatePortableIpRangeCmd cmd) throws ConcurrentOperationException { - Integer regionId = cmd.getRegionId(); - String startIP = cmd.getStartIp(); - String endIP = cmd.getEndIp(); - String gateway = cmd.getGateway(); - String netmask = cmd.getNetmask(); + final Integer regionId = cmd.getRegionId(); + final String startIP = cmd.getStartIp(); + final String endIP = cmd.getEndIp(); + final String gateway = cmd.getGateway(); + final String netmask = cmd.getNetmask(); String vlanId = cmd.getVlan(); - RegionVO region = _regionDao.findById(regionId); + final RegionVO region = _regionDao.findById(regionId); if (region == null) { throw new InvalidParameterValueException("Invalid region ID: " + regionId); } @@ -4820,29 +4874,33 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati throw new InvalidParameterValueException("Invalid vlan id " + vlanId); } - // check if there is zone vlan with same id List zones= _zoneDao.listAllZones(); if (zones != null && !zones.isEmpty()) { for (DataCenterVO zone: zones) { + // check if there is zone vlan with same id if (_vlanDao.findByZoneAndVlanId(zone.getId(), vlanId) != null) throw new InvalidParameterValueException("Found a VLAN id " + vlanId + " already existing in" + " zone " + zone.getUuid() + " that conflicts with VLAN id of the portable ip range being configured"); + //check if there is a public ip range that overlaps with portable ip range being created + checkOverlapPublicIpRange(zone.getId(), startIP, endIP); } } } GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange"); portableIpLock.lock(5); - Transaction txn = Transaction.currentTxn(); - txn.start(); - - PortableIpRangeVO portableIpRange = new PortableIpRangeVO(regionId, vlanId, gateway, netmask, startIP, endIP); + try { + final String vlanIdFinal = vlanId; + return Transaction.execute(new TransactionCallback() { + @Override + public PortableIpRangeVO doInTransaction(TransactionStatus status) { + PortableIpRangeVO portableIpRange = new PortableIpRangeVO(regionId, vlanIdFinal, gateway, netmask, startIP, endIP); portableIpRange = _portableIpRangeDao.persist(portableIpRange); long startIpLong = NetUtils.ip2Long(startIP); long endIpLong = NetUtils.ip2Long(endIP); while (startIpLong <= endIpLong) { - PortableIpVO portableIP = new PortableIpVO(regionId, portableIpRange.getId(), vlanId, gateway, netmask, + PortableIpVO portableIP = new PortableIpVO(regionId, portableIpRange.getId(), vlanIdFinal, gateway, netmask, NetUtils.long2Ip(startIpLong)); _portableIpDao.persist(portableIP); startIpLong++; @@ -4852,10 +4910,13 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati region.setPortableipEnabled(true); _regionDao.update(region.getId(), region); - txn.commit(); - portableIpLock.unlock(); return portableIpRange; } + }); + } finally { + portableIpLock.unlock(); + } + } @Override @DB @@ -4932,6 +4993,11 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati long newEndIp = NetUtils.ip2Long(newEndIpStr); List existingPortableIPRanges = _portableIpRangeDao.listByRegionId(regionId); + + if (existingPortableIPRanges == null || existingPortableIPRanges.isEmpty()) { + return false; + } + for (PortableIpRangeVO portableIpRange : existingPortableIPRanges) { String ipRangeStr = portableIpRange.getIpRange(); String[] range = ipRangeStr.split("-"); @@ -4949,5 +5015,14 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati return false; } + public List getSecChecker() { + return _secChecker; + } + + @Inject + public void setSecChecker(List secChecker) { + this._secChecker = secChecker; + } + } diff --git a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java index 90273f7588d..e82aabaeddc 100755 --- a/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java +++ b/server/src/com/cloud/consoleproxy/ConsoleProxyManagerImpl.java @@ -117,9 +117,10 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; import com.cloud.utils.events.SubscriptionMgr; import com.cloud.utils.exception.CloudRuntimeException; @@ -167,7 +168,6 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { private int _mgmt_port = 8250; - @Inject private List _consoleProxyAllocators; @Inject @@ -571,10 +571,10 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { } catch (ResourceUnavailableException e) { s_logger.warn("Exception while trying to start console proxy", e); return null; - } catch (CloudRuntimeException e) { + } catch (ConcurrentOperationException e) { s_logger.warn("Runtime Exception while trying to start console proxy", e); return null; - } catch (ConcurrentOperationException e) { + } catch (CloudRuntimeException e) { s_logger.warn("Runtime Exception while trying to start console proxy", e); return null; } catch (OperationTimedoutException e) { @@ -1051,25 +1051,24 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { @Override @DB - public void setManagementState(ConsoleProxyManagementState state) { - Transaction txn = Transaction.currentTxn(); + public void setManagementState(final ConsoleProxyManagementState state) { try { - txn.start(); - - ConsoleProxyManagementState lastState = getManagementState(); + final ConsoleProxyManagementState lastState = getManagementState(); if (lastState == null) { - txn.commit(); return; } if (lastState != state) { - _configDao.update(Config.ConsoleProxyManagementLastState.key(), Config.ConsoleProxyManagementLastState.getCategory(), lastState.toString()); - _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), state.toString()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _configDao.update(Config.ConsoleProxyManagementLastState.key(), Config.ConsoleProxyManagementLastState.getCategory(), lastState.toString()); + _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), state.toString()); + } + }); } - - txn.commit(); } catch (Throwable e) { - txn.rollback(); + s_logger.error("Failed to set managment state", e); } } @@ -1092,23 +1091,18 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { @Override @DB public void resumeLastManagementState() { - Transaction txn = Transaction.currentTxn(); try { - txn.start(); ConsoleProxyManagementState state = getManagementState(); ConsoleProxyManagementState lastState = getLastManagementState(); if (lastState == null) { - txn.commit(); return; } if (lastState != state) { _configDao.update(Config.ConsoleProxyManagementState.key(), Config.ConsoleProxyManagementState.getCategory(), lastState.toString()); } - - txn.commit(); } catch (Throwable e) { - txn.rollback(); + s_logger.error("Failed to resume last management state", e); } } @@ -1332,7 +1326,7 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { ConsoleProxyVO vm = _consoleProxyDao.findById(profile.getId()); - Map details = _vmDetailsDao.findDetails(vm.getId()); + Map details = _vmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); StringBuilder buf = profile.getBootArgsBuilder(); @@ -1693,9 +1687,9 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { } protected HostVO findConsoleProxyHostByName(String name) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.ConsoleProxy); - sc.addAnd(sc.getEntity().getName(), Op.EQ, name); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.ConsoleProxy); + sc.and(sc.entity().getName(), Op.EQ, name); return sc.find(); } @@ -1703,4 +1697,13 @@ VirtualMachineGuru, SystemVmLoadScanHandler, ResourceStateAdapter { public void prepareStop(VirtualMachineProfile profile) { } + public List getConsoleProxyAllocators() { + return _consoleProxyAllocators; + } + + @Inject + public void setConsoleProxyAllocators(List consoleProxyAllocators) { + this._consoleProxyAllocators = consoleProxyAllocators; + } + } diff --git a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java index 675ff2505bb..ef161338a1c 100755 --- a/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java +++ b/server/src/com/cloud/consoleproxy/StaticConsoleProxyManager.java @@ -70,6 +70,9 @@ public class StaticConsoleProxyManager extends AgentBasedConsoleProxyManager imp private String _ip = null; + public StaticConsoleProxyManager() { + + } @Override protected HostVO findHost(VMInstanceVO vm) { diff --git a/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java index bc58021a4d4..3902847c4f9 100644 --- a/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java +++ b/server/src/com/cloud/dc/dao/DedicatedResourceDaoImpl.java @@ -23,19 +23,19 @@ import javax.ejb.Local; import org.springframework.stereotype.Component; import com.cloud.dc.DedicatedResourceVO; -import com.cloud.dc.HostPodVO; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; @Component -@Local(value={DedicatedResourceDao.class}) @DB(txn = false) +@Local(value={DedicatedResourceDao.class}) @DB public class DedicatedResourceDaoImpl extends GenericDaoBase implements DedicatedResourceDao { protected final SearchBuilder ZoneSearch; protected final SearchBuilder PodSearch; @@ -339,7 +339,7 @@ public class DedicatedResourceDaoImpl extends GenericDaoBase() { + @Override + public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); @@ -585,13 +587,13 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy return true; } else { s_logger.debug("Cannot use this host for usage: " + resourceUsageRequired - + ", since this host has been reserved for planner usage : " + hostResourceType); + + ", since this host has been reserved for planner usage : " + hostResourceTypeFinal); return false; } } - } finally { - txn.commit(); } + }); + } } @@ -600,7 +602,7 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy } @DB - public boolean checkHostReservationRelease(Long hostId) { + public boolean checkHostReservationRelease(final Long hostId) { if (hostId != null) { PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); @@ -662,12 +664,11 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy s_logger.debug("Host has no VMs associated, releasing the planner reservation for host " + hostId); } - long id = reservationEntry.getId(); - final Transaction txn = Transaction.currentTxn(); - - try { - txn.start(); + final long id = reservationEntry.getId(); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { final PlannerHostReservationVO lockedEntry = _plannerHostReserveDao.lockRow(id, true); if (lockedEntry == null) { s_logger.error("Unable to lock the host entry for reservation, host: " + hostId); @@ -679,18 +680,19 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy _plannerHostReserveDao.persist(lockedEntry); return true; } - } finally { - txn.commit(); + + return false; } + }); } } return false; } - class HostReservationReleaseChecker extends TimerTask { + class HostReservationReleaseChecker extends ManagedContextTimerTask { @Override - public void run() { + protected void runInContext() { try { s_logger.debug("Checking if any host reservation can be released ... "); checkHostReservations(); @@ -932,15 +934,36 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy if (!allocatorAvoidOutput.shouldAvoid(host)) { // there's some host in the cluster that is not yet in avoid set avoidAllHosts = false; + break; } } + // Cluster can be put in avoid set in following scenarios: + // 1. If storage allocators haven't put any pools in avoid set means either no pools in cluster + // or pools not suitable for the allocators to handle. + // 2. If all 'shared' or 'local' pools are in avoid set + if (allocatorAvoidOutput.getPoolsToAvoid() != null && !allocatorAvoidOutput.getPoolsToAvoid().isEmpty()) { + // check shared pools List allPoolsInCluster = _storagePoolDao.findPoolsByTags(clusterVO.getDataCenterId(), clusterVO.getPodId(), clusterVO.getId(), null); for (StoragePoolVO pool : allPoolsInCluster) { if (!allocatorAvoidOutput.shouldAvoid(pool)) { // there's some pool in the cluster that is not yet in avoid set avoidAllPools = false; + break; + } + } + if (avoidAllPools) { + // check local pools + List allLocalPoolsInCluster = _storagePoolDao.findLocalStoragePoolsByTags(clusterVO.getDataCenterId(), + clusterVO.getPodId(), clusterVO.getId(), null); + for (StoragePoolVO pool : allLocalPoolsInCluster) { + if (!allocatorAvoidOutput.shouldAvoid(pool)) { + // there's some pool in the cluster that is not yet in avoid set + avoidAllPools = false; + break; + } + } } } @@ -1230,17 +1253,18 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy @DB @Override - public String finalizeReservation(DeployDestination plannedDestination, - VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) + public String finalizeReservation(final DeployDestination plannedDestination, + final VirtualMachineProfile vmProfile, DeploymentPlan plan, ExcludeList avoids) throws InsufficientServerCapacityException, AffinityConflictException { - VirtualMachine vm = vmProfile.getVirtualMachine(); - long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); + final VirtualMachine vm = vmProfile.getVirtualMachine(); + final long vmGroupCount = _affinityGroupVMMapDao.countAffinityGroupsForVm(vm.getId()); + return Transaction.execute(new TransactionCallback() { + @Override + public String doInTransaction(TransactionStatus status) { boolean saveReservation = true; - final Transaction txn = Transaction.currentTxn(); - try { - txn.start(); + if (vmGroupCount > 0) { List groupIds = _affinityGroupVMMapDao.listAffinityGroupIdsByVmId(vm.getId()); SearchCriteria criteria = _affinityGroupDao.createSearchCriteria(); @@ -1270,11 +1294,11 @@ public class DeploymentPlanningManagerImpl extends ManagerBase implements Deploy _reservationDao.persist(vmReservation); return vmReservation.getUuid(); } - } finally { - txn.commit(); - } + return null; } + }); + } @Override public boolean preStateTransitionEvent(State oldState, Event event, State newState, VirtualMachine vo, diff --git a/server/src/com/cloud/event/ActionEventInterceptor.java b/server/src/com/cloud/event/ActionEventInterceptor.java index 83965120883..1820b346fbf 100644 --- a/server/src/com/cloud/event/ActionEventInterceptor.java +++ b/server/src/com/cloud/event/ActionEventInterceptor.java @@ -20,14 +20,45 @@ import java.lang.reflect.Method; import java.util.ArrayList; import java.util.List; +import org.aopalliance.intercept.MethodInterceptor; +import org.aopalliance.intercept.MethodInvocation; import org.apache.cloudstack.context.CallContext; import com.cloud.utils.component.ComponentMethodInterceptor; -public class ActionEventInterceptor implements ComponentMethodInterceptor { +public class ActionEventInterceptor implements ComponentMethodInterceptor, MethodInterceptor { public ActionEventInterceptor() { } + + @Override + public Object invoke(MethodInvocation invocation) throws Throwable { + Method m = invocation.getMethod(); + Object target = invocation.getThis(); + + if ( getActionEvents(m).size() == 0 ) { + /* Look for annotation on impl class */ + m = target.getClass().getMethod(m.getName(), m.getParameterTypes()); + } + + Object interceptorData = null; + + boolean success = true; + try { + interceptorData = interceptStart(m, target); + + Object result = invocation.proceed(); + success = true; + + return result; + } finally { + if ( success ) { + interceptComplete(m, target, interceptorData); + } else { + interceptException(m, target, interceptorData); + } + } + } @Override public Object interceptStart(Method method, Object target) { diff --git a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java index 73fab300885..e5513868765 100755 --- a/server/src/com/cloud/ha/AbstractInvestigatorImpl.java +++ b/server/src/com/cloud/ha/AbstractInvestigatorImpl.java @@ -21,7 +21,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; - import javax.inject.Inject; import javax.naming.ConfigurationException; @@ -38,16 +37,15 @@ import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.resource.ResourceManager; import com.cloud.utils.component.AdapterBase; -import com.cloud.utils.db.SearchCriteria2; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteriaService; public abstract class AbstractInvestigatorImpl extends AdapterBase implements Investigator { private static final Logger s_logger = Logger.getLogger(AbstractInvestigatorImpl.class); - @Inject private HostDao _hostDao = null; - @Inject private AgentManager _agentMgr = null; - @Inject private ResourceManager _resourceMgr = null; + @Inject private final HostDao _hostDao = null; + @Inject private final AgentManager _agentMgr = null; + @Inject private final ResourceManager _resourceMgr = null; @Override @@ -68,10 +66,10 @@ public abstract class AbstractInvestigatorImpl extends AdapterBase implements In // Host.status is up and Host.type is routing protected List findHostByPod(long podId, Long excludeHostId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Type.Routing); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, podId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ, Type.Routing); + sc.and(sc.entity().getPodId(), Op.EQ, podId); + sc.and(sc.entity().getStatus(), Op.EQ, Status.Up); List hosts = sc.list(); List hostIds = new ArrayList(hosts.size()); diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java index f1e0f3f5dec..801e7dac572 100644 --- a/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerExtImpl.java @@ -24,14 +24,13 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.springframework.context.annotation.Primary; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.alert.AlertManager; import com.cloud.usage.dao.UsageJobDao; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Local(value={HighAvailabilityManager.class}) public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl { @@ -65,16 +64,16 @@ public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl return true; } - protected class UsageServerMonitorTask implements Runnable { + protected class UsageServerMonitorTask extends ManagedContextRunnable{ @Override - public void run() { + protected void runInContext() { if (s_logger.isInfoEnabled()) { s_logger.info("checking health of usage server"); } try { boolean isRunning = false; - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { Date lastHeartbeat = _usageJobDao.getLastHeartbeat(); if (lastHeartbeat != null) { @@ -91,7 +90,7 @@ public class HighAvailabilityManagerExtImpl extends HighAvailabilityManagerImpl txn.close(); // switch back to VMOPS db - Transaction swap = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); swap.close(); } diff --git a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java index 464b9524556..59337c0326d 100755 --- a/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java +++ b/server/src/com/cloud/ha/HighAvailabilityManagerImpl.java @@ -31,10 +31,10 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.apache.log4j.NDC; - -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContext; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -116,6 +116,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai @Inject ClusterDetailsDao _clusterDetailsDao; long _serverId; + + @Inject + ManagedContext _managedContext; List _investigators; public List getInvestigators() { @@ -773,9 +776,9 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai return true; } - protected class CleanupTask implements Runnable { + protected class CleanupTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { s_logger.info("HA Cleanup Thread Running"); try { @@ -793,71 +796,75 @@ public class HighAvailabilityManagerImpl extends ManagerBase implements HighAvai @Override public void run() { - ServerContexts.registerSystemContext(); - try { - s_logger.info("Starting work"); - while (!_stopped) { - HaWorkVO work = null; - try { - s_logger.trace("Checking the database"); - work = _haDao.take(_serverId); - if (work == null) { - try { - synchronized (this) { - wait(_timeToSleep); - } - continue; - } catch (final InterruptedException e) { - s_logger.info("Interrupted"); - continue; - } - } - - NDC.push("work-" + work.getId()); - s_logger.info("Processing " + work); - - try { - final WorkType wt = work.getWorkType(); - Long nextTime = null; - if (wt == WorkType.Migration) { - nextTime = migrate(work); - } else if (wt == WorkType.HA) { - nextTime = restart(work); - } else if (wt == WorkType.Stop || wt == WorkType.CheckStop || wt == WorkType.ForceStop) { - nextTime = stopVM(work); - } else if (wt == WorkType.Destroy) { - nextTime = destroyVM(work); - } else { - assert false : "How did we get here with " + wt.toString(); - continue; - } - - if (nextTime == null) { - s_logger.info("Completed " + work); - work.setStep(Step.Done); - } else { - s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); - work.setTimeToTry(nextTime); - work.setServerId(null); - work.setDateTaken(null); - } - } catch (Exception e) { - s_logger.error("Terminating " + work, e); - work.setStep(Step.Error); - } - _haDao.update(work.getId(), work); - } catch (final Throwable th) { - s_logger.error("Caught this throwable, ", th); - } finally { - if (work != null) { - NDC.pop(); - } - } + s_logger.info("Starting work"); + while (!_stopped) { + _managedContext.runWithContext(new Runnable() { + @Override + public void run() { + runWithContext(); } - s_logger.info("Time to go home!"); - } finally { - ServerContexts.unregisterSystemContext(); + }); } + s_logger.info("Time to go home!"); + } + + private void runWithContext() { + HaWorkVO work = null; + try { + s_logger.trace("Checking the database"); + work = _haDao.take(_serverId); + if (work == null) { + try { + synchronized (this) { + wait(_timeToSleep); + } + return; + } catch (final InterruptedException e) { + s_logger.info("Interrupted"); + return; + } + } + + NDC.push("work-" + work.getId()); + s_logger.info("Processing " + work); + + try { + final WorkType wt = work.getWorkType(); + Long nextTime = null; + if (wt == WorkType.Migration) { + nextTime = migrate(work); + } else if (wt == WorkType.HA) { + nextTime = restart(work); + } else if (wt == WorkType.Stop || wt == WorkType.CheckStop || wt == WorkType.ForceStop) { + nextTime = stopVM(work); + } else if (wt == WorkType.Destroy) { + nextTime = destroyVM(work); + } else { + assert false : "How did we get here with " + wt.toString(); + return; + } + + if (nextTime == null) { + s_logger.info("Completed " + work); + work.setStep(Step.Done); + } else { + s_logger.info("Rescheduling " + work + " to try again at " + new Date(nextTime << 10)); + work.setTimeToTry(nextTime); + work.setServerId(null); + work.setDateTaken(null); + } + } catch (Exception e) { + s_logger.error("Terminating " + work, e); + work.setStep(Step.Error); + } + _haDao.update(work.getId(), work); + } catch (final Throwable th) { + s_logger.error("Caught this throwable, ", th); + } finally { + if (work != null) { + NDC.pop(); + } + } } public synchronized void wakup() { diff --git a/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java b/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java index 2b6d261261c..aca5574613f 100644 --- a/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java +++ b/server/src/com/cloud/ha/ManagementIPSystemVMInvestigator.java @@ -17,11 +17,9 @@ package com.cloud.ha; import java.util.List; -import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -38,7 +36,6 @@ import com.cloud.vm.VirtualMachine; public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { private static final Logger s_logger = Logger.getLogger(ManagementIPSystemVMInvestigator.class); - private String _name = null; @Inject private final HostDao _hostDao = null; @Inject private final NetworkModel _networkMgr = null; @@ -114,18 +111,6 @@ public class ManagementIPSystemVMInvestigator extends AbstractInvestigatorImpl { return null; } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - - return true; - } - - @Override - public String getName() { - return _name; - } - @Override public boolean start() { return true; diff --git a/server/src/com/cloud/ha/UserVmDomRInvestigator.java b/server/src/com/cloud/ha/UserVmDomRInvestigator.java index 195deff5ab0..90cf7c4e0bf 100644 --- a/server/src/com/cloud/ha/UserVmDomRInvestigator.java +++ b/server/src/com/cloud/ha/UserVmDomRInvestigator.java @@ -18,11 +18,9 @@ package com.cloud.ha; import java.util.ArrayList; import java.util.List; -import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; import org.apache.log4j.Logger; @@ -45,7 +43,6 @@ import com.cloud.vm.dao.UserVmDao; public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { private static final Logger s_logger = Logger.getLogger(UserVmDomRInvestigator.class); - private String _name = null; @Inject private final UserVmDao _userVmDao = null; @Inject private final AgentManager _agentMgr = null; @Inject private final NetworkModel _networkMgr = null; @@ -143,18 +140,6 @@ public class UserVmDomRInvestigator extends AbstractInvestigatorImpl { return null; } - @Override - public boolean configure(String name, Map params) throws ConfigurationException { - _name = name; - - return true; - } - - @Override - public String getName() { - return _name; - } - @Override public boolean start() { return true; diff --git a/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java b/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java index 83a71b80299..d800483b472 100644 --- a/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java +++ b/server/src/com/cloud/ha/dao/HighAvailabilityDaoImpl.java @@ -33,6 +33,7 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -130,7 +131,7 @@ public class HighAvailabilityDaoImpl extends GenericDaoBase impl @Override public HaWorkVO take(final long serverId) { - final Transaction txn = Transaction.currentTxn(); + final TransactionLegacy txn = TransactionLegacy.currentTxn(); try { final SearchCriteria sc = TBASearch.create(); sc.setParameters("time", System.currentTimeMillis() >> 10); diff --git a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java index 9e4bee029ec..8761b6baba9 100755 --- a/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java +++ b/server/src/com/cloud/hypervisor/CloudZonesStartupProcessor.java @@ -23,11 +23,11 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - import com.cloud.agent.AgentManager; import com.cloud.agent.StartupCommandProcessor; import com.cloud.agent.api.StartupCommand; @@ -38,19 +38,16 @@ import com.cloud.configuration.ConfigurationManager; import com.cloud.configuration.ZoneConfig; import com.cloud.dc.ClusterVO; import com.cloud.dc.DataCenterVO; -import com.cloud.dc.DcDetailVO; import com.cloud.dc.HostPodVO; import com.cloud.dc.dao.ClusterDao; import com.cloud.dc.dao.DataCenterDao; -import com.cloud.dc.dao.DcDetailsDao; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.dc.dao.HostPodDao; import com.cloud.exception.ConnectionException; import com.cloud.host.Host; -import com.cloud.host.Host.Type; import com.cloud.host.HostVO; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.storage.Storage; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.MacAddress; @@ -69,7 +66,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo @Inject DataCenterDao _zoneDao = null; @Inject HostDao _hostDao = null; @Inject HostPodDao _podDao = null; - @Inject DcDetailsDao _zoneDetailsDao = null; + @Inject DataCenterDetailsDao _zoneDetailsDao = null; @Inject AgentManager _agentManager = null; @Inject ConfigurationManager _configurationManager = null; @@ -175,7 +172,7 @@ public class CloudZonesStartupProcessor extends AdapterBase implements StartupCo } long zoneId = zone.getId(); - DcDetailVO maxHostsInZone = _zoneDetailsDao.findDetail(zoneId, ZoneConfig.MaxHosts.key()); + ResourceDetail maxHostsInZone = _zoneDetailsDao.findDetail(zoneId, ZoneConfig.MaxHosts.key()); if(maxHostsInZone != null){ long maxHosts = new Long(maxHostsInZone.getValue()).longValue(); long currentCountOfHosts = _hostDao.countRoutingHostsByDataCenter(zoneId); diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java index 2ee6364b231..4e28a6aba66 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruBase.java @@ -39,12 +39,14 @@ import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.NicSecondaryIpDao; import com.cloud.vm.dao.VMInstanceDao; +import com.cloud.vm.dao.UserVmDetailsDao; public abstract class HypervisorGuruBase extends AdapterBase implements HypervisorGuru { @Inject VMTemplateDetailsDao _templateDetailsDao; @Inject NicDao _nicDao; @Inject VMInstanceDao _virtualMachineDao; + @Inject UserVmDetailsDao _userVmDetailsDao; @Inject NicSecondaryIpDao _nicSecIpDao; @Inject ConfigurationServer _configServer; @@ -113,14 +115,10 @@ public abstract class HypervisorGuruBase extends AdapterBase implements Hypervis to.setArch("x86_64"); } - long templateId = vm.getTemplateId(); - Map details = _templateDetailsDao.findDetails(templateId); - assert(details != null); - Map detailsInVm = vm.getDetails(); + Map detailsInVm = _userVmDetailsDao.listDetailsKeyPairs(vm.getId()); if(detailsInVm != null) { - details.putAll(detailsInVm); + to.setDetails(detailsInVm); } - to.setDetails(details); // Workaround to make sure the TO has the UUID we need for Niciri integration VMInstanceVO vmInstance = _virtualMachineDao.findById(to.getId()); // check if XStools/VMWare tools are present in the VM and dynamic scaling feature is enabled (per zone/global) diff --git a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java index 4d1e1b50b40..ace7c9127b5 100644 --- a/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java +++ b/server/src/com/cloud/hypervisor/HypervisorGuruManagerImpl.java @@ -19,6 +19,7 @@ package com.cloud.hypervisor; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import javax.annotation.PostConstruct; import javax.ejb.Local; @@ -26,6 +27,7 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import com.cloud.utils.Pair; + import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -43,8 +45,8 @@ public class HypervisorGuruManagerImpl extends ManagerBase implements Hypervisor @Inject HostDao _hostDao; - @Inject List _hvGuruList; - Map _hvGurus = new HashMap(); + List _hvGuruList; + Map _hvGurus = new ConcurrentHashMap(); @PostConstruct public void init() { @@ -55,7 +57,23 @@ public class HypervisorGuruManagerImpl extends ManagerBase implements Hypervisor @Override public HypervisorGuru getGuru(HypervisorType hypervisorType) { - return _hvGurus.get(hypervisorType); + if (hypervisorType == null) { + return null; + } + + HypervisorGuru result = _hvGurus.get(hypervisorType); + + if ( result == null ) { + for ( HypervisorGuru guru : _hvGuruList ) { + if ( guru.getHypervisorType() == hypervisorType ) { + _hvGurus.put(hypervisorType, guru); + result = guru; + break; + } + } + } + + return result; } @Override @@ -68,4 +86,14 @@ public class HypervisorGuruManagerImpl extends ManagerBase implements Hypervisor } return hostId; } + + public List getHvGuruList() { + return _hvGuruList; + } + + @Inject + public void setHvGuruList(List hvGuruList) { + this._hvGuruList = hvGuruList; + } + } diff --git a/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java b/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java index 8586589959f..5f7ad4ba9a8 100644 --- a/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java +++ b/server/src/com/cloud/metadata/ResourceMetaDataManagerImpl.java @@ -16,7 +16,6 @@ // under the License. package com.cloud.metadata; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -25,142 +24,74 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.server.ResourceMetaDataService; -import com.cloud.storage.VolumeDetailVO; -import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.vm.NicDetailVO; -import com.cloud.vm.dao.NicDao; -import com.cloud.vm.dao.NicDetailDao; - -import org.apache.cloudstack.api.command.user.tag.ListTagsCmd; -import org.apache.cloudstack.context.CallContext; - +import org.apache.cloudstack.api.ResourceDetail; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; +import org.apache.cloudstack.resourcedetail.dao.FirewallRuleDetailsDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolDetailsDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.api.query.dao.ResourceTagJoinDao; -import com.cloud.api.query.vo.ResourceTagJoinVO; -import com.cloud.domain.Domain; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.PermissionDeniedException; -import com.cloud.network.dao.FirewallRulesDao; -import com.cloud.network.dao.IPAddressDao; -import com.cloud.network.dao.LoadBalancerDao; -import com.cloud.network.dao.NetworkDao; -import com.cloud.network.dao.RemoteAccessVpnDao; -import com.cloud.network.rules.dao.PortForwardingRulesDao; -import com.cloud.network.security.dao.SecurityGroupDao; -import com.cloud.network.vpc.dao.StaticRouteDao; -import com.cloud.network.vpc.dao.VpcDao; -import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.projects.dao.ProjectDao; -import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.network.dao.NetworkDetailsDao; +import com.cloud.server.ResourceMetaDataService; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.TaggedResourceService; -import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VMTemplateDao; -import com.cloud.storage.dao.VolumeDao; -import com.cloud.tags.dao.ResourceTagDao; -import com.cloud.user.Account; -import com.cloud.user.AccountManager; -import com.cloud.user.DomainManager; -import com.cloud.utils.Pair; -import com.cloud.utils.Ternary; -import com.cloud.utils.component.Manager; +import com.cloud.service.dao.ServiceOfferingDetailsDao; +import com.cloud.storage.dao.VMTemplateDetailsDao; +import com.cloud.storage.dao.VolumeDetailsDao; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; -import com.cloud.utils.db.DbUtil; -import com.cloud.utils.db.Filter; -import com.cloud.utils.db.GenericDao; -import com.cloud.utils.db.SearchBuilder; -import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; -import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.uuididentity.dao.IdentityDao; -import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.snapshot.dao.VMSnapshotDao; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.vm.dao.NicDetailsDao; +import com.cloud.vm.dao.UserVmDetailsDao; @Component @Local(value = { ResourceMetaDataService.class, ResourceMetaDataManager.class }) public class ResourceMetaDataManagerImpl extends ManagerBase implements ResourceMetaDataService, ResourceMetaDataManager { public static final Logger s_logger = Logger.getLogger(ResourceMetaDataManagerImpl.class); - - - private static Map> _daoMap= - new HashMap>(); @Inject - AccountManager _accountMgr; + VolumeDetailsDao _volumeDetailDao; @Inject - ResourceTagDao _resourceTagDao; + NicDetailsDao _nicDetailDao; @Inject - ResourceTagJoinDao _resourceTagJoinDao; + UserVmDetailsDao _userVmDetailDao; @Inject - IdentityDao _identityDao; + DataCenterDetailsDao _dcDetailsDao; @Inject - DomainManager _domainMgr; - @Inject - UserVmDao _userVmDao; - @Inject - VolumeDao _volumeDao; - @Inject - VMTemplateDao _templateDao; - @Inject - SnapshotDao _snapshotDao; - @Inject - NetworkDao _networkDao; - @Inject - LoadBalancerDao _lbDao; - @Inject - PortForwardingRulesDao _pfDao; - @Inject - FirewallRulesDao _firewallDao; - @Inject - SecurityGroupDao _securityGroupDao; - @Inject - RemoteAccessVpnDao _vpnDao; - @Inject - IPAddressDao _publicIpDao; - @Inject - ProjectDao _projectDao; - @Inject - VpcDao _vpcDao; - @Inject - StaticRouteDao _staticRouteDao; - @Inject - VMSnapshotDao _vmSnapshotDao; - @Inject - protected VolumeDetailsDao _volumeDetailDao; - @Inject - NicDetailDao _nicDetailDao; - @Inject - NicDao _nicDao; + NetworkDetailsDao _networkDetailsDao; @Inject TaggedResourceService _taggedResourceMgr; + @Inject + VMTemplateDetailsDao _templateDetailsDao; + @Inject + ServiceOfferingDetailsDao _serviceOfferingDetailsDao; + @Inject + StoragePoolDetailsDao _storageDetailsDao; + @Inject + FirewallRuleDetailsDao _firewallRuleDetailsDao; + + private static Map> _daoMap= + new HashMap>(); + @Override public boolean configure(String name, Map params) throws ConfigurationException { - - _daoMap.put(TaggedResourceType.UserVm, _userVmDao); - _daoMap.put(TaggedResourceType.Volume, _volumeDao); - _daoMap.put(TaggedResourceType.Template, _templateDao); - _daoMap.put(TaggedResourceType.ISO, _templateDao); - _daoMap.put(TaggedResourceType.Snapshot, _snapshotDao); - _daoMap.put(TaggedResourceType.Network, _networkDao); - _daoMap.put(TaggedResourceType.LoadBalancer, _lbDao); - _daoMap.put(TaggedResourceType.PortForwardingRule, _pfDao); - _daoMap.put(TaggedResourceType.FirewallRule, _firewallDao); - _daoMap.put(TaggedResourceType.SecurityGroup, _securityGroupDao); - _daoMap.put(TaggedResourceType.PublicIpAddress, _publicIpDao); - _daoMap.put(TaggedResourceType.Project, _projectDao); - _daoMap.put(TaggedResourceType.Vpc, _vpcDao); - _daoMap.put(TaggedResourceType.NetworkACL, _firewallDao); - _daoMap.put(TaggedResourceType.Nic, _nicDao); - _daoMap.put(TaggedResourceType.StaticRoute, _staticRouteDao); - _daoMap.put(TaggedResourceType.VMSnapshot, _vmSnapshotDao); - _daoMap.put(TaggedResourceType.RemoteAccessVpn, _vpnDao); + _daoMap.put(ResourceObjectType.UserVm, _userVmDetailDao); + _daoMap.put(ResourceObjectType.Volume, _volumeDetailDao); + _daoMap.put(ResourceObjectType.Template, _templateDetailsDao); + _daoMap.put(ResourceObjectType.Network, _networkDetailsDao); + _daoMap.put(ResourceObjectType.Nic, _nicDetailDao); + _daoMap.put(ResourceObjectType.ServiceOffering, _serviceOfferingDetailsDao); + _daoMap.put(ResourceObjectType.Zone, _dcDetailsDao); + _daoMap.put(ResourceObjectType.Storage, _storageDetailsDao); + _daoMap.put(ResourceObjectType.FirewallRule, _firewallRuleDetailsDao); + return true; } @@ -173,78 +104,104 @@ public class ResourceMetaDataManagerImpl extends ManagerBase implements Resource public boolean stop() { return true; } - - - - - @Override - public TaggedResourceType getResourceType(String resourceTypeStr) { - - for (TaggedResourceType type : ResourceTag.TaggedResourceType.values()) { - if (type.toString().equalsIgnoreCase(resourceTypeStr)) { - return type; - } - } - throw new InvalidParameterValueException("Invalid resource type " + resourceTypeStr); - } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_RESOURCE_DETAILS_CREATE, eventDescription = "creating resource meta data") - public boolean addResourceMetaData(String resourceId, TaggedResourceType resourceType, Map details){ + public boolean addResourceMetaData(final String resourceId, final ResourceObjectType resourceType, final Map details){ + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + for (String key : details.keySet()) { + String value = details.get(key); - Transaction txn = Transaction.currentTxn(); - txn.start(); + if (value == null || value.isEmpty()) { + throw new InvalidParameterValueException("Value for the key " + key + " is either null or empty"); + } - for (String key : details.keySet()) { - Long id = _taggedResourceMgr.getResourceId(resourceId, resourceType); - - //check if object exists - if (_daoMap.get(resourceType).findById(id) == null) { - throw new InvalidParameterValueException("Unable to find resource by id " + resourceId + - " and type " + resourceType); + DetailDaoHelper newDetailDaoHelper = new DetailDaoHelper(resourceType); + newDetailDaoHelper.addDetail( _taggedResourceMgr.getResourceId(resourceId, resourceType), key, value); } - - String value = details.get(key); - - if (value == null || value.isEmpty()) { - throw new InvalidParameterValueException("Value for the key " + key + " is either null or empty"); - } - - // TODO - Have a better design here. - if(resourceType == TaggedResourceType.Volume){ - VolumeDetailVO v = new VolumeDetailVO(id, key, value); - _volumeDetailDao.persist(v); - }else if (resourceType == TaggedResourceType.Nic){ - NicDetailVO n = new NicDetailVO(id, key, value); - _nicDetailDao.persist(n); - }else{ - throw new InvalidParameterValueException("The resource type " + resourceType + " is not supported by the API yet"); - } - - } - - txn.commit(); - - return true; + + return true; + } + }); } @Override @DB @ActionEvent(eventType = EventTypes.EVENT_RESOURCE_DETAILS_DELETE, eventDescription = "deleting resource meta data") - public boolean deleteResourceMetaData(String resourceId, TaggedResourceType resourceType, String key){ - - Long id = _taggedResourceMgr.getResourceId(resourceId, resourceType); - // TODO - Have a better design here. - if(resourceType == TaggedResourceType.Volume){ - _volumeDetailDao.removeDetails(id, key); - } else { - _nicDetailDao.removeDetails(id, key); - } + public boolean deleteResourceMetaData(String resourceId, ResourceObjectType resourceType, String key){ + long id = _taggedResourceMgr.getResourceId(resourceId, resourceType); + + DetailDaoHelper newDetailDaoHelper = new DetailDaoHelper(resourceType); + newDetailDaoHelper.removeDetail(id, key); return true; } - + private class DetailDaoHelper { + private ResourceObjectType resourceType; + private ResourceDetailsDao dao; + + private DetailDaoHelper(ResourceObjectType resourceType) { + if (!resourceType.resourceMetadataSupport()) { + throw new UnsupportedOperationException("ResourceType " + resourceType + " doesn't support metadata"); + } + this.resourceType = resourceType; + ResourceDetailsDao dao = _daoMap.get(resourceType); + if (dao == null) { + throw new UnsupportedOperationException("ResourceType " + resourceType + " doesn't support metadata"); + } + this.dao = (ResourceDetailsDao)_daoMap.get(resourceType); + } + + private void removeDetail(long resourceId, String key) { + dao.removeDetail(resourceId, key); + } + + private ResourceDetail getDetail(long resourceId, String key) { + return dao.findDetail(resourceId, key); + } + + private void addDetail(long resourceId, String key, String value) { + dao.addDetail(resourceId, key, value); + } + + private Map getDetailsMap(long resourceId, Boolean forDisplay) { + if (forDisplay == null) { + return dao.listDetailsKeyPairs(resourceId); + } else { + return dao.listDetailsKeyPairs(resourceId, forDisplay); + } + } + + private List getDetailsList(long resourceId, Boolean forDisplay) { + if (forDisplay == null) { + return dao.listDetails(resourceId); + } else { + return dao.listDetails(resourceId, forDisplay); + } + } + } + + @Override + public List getDetailsList(long resourceId, ResourceObjectType resourceType, Boolean forDisplay) { + DetailDaoHelper newDetailDaoHelper = new DetailDaoHelper(resourceType); + return newDetailDaoHelper.getDetailsList(resourceId, forDisplay); + } + + @Override + public ResourceDetail getDetail(long resourceId, ResourceObjectType resourceType, String key) { + DetailDaoHelper newDetailDaoHelper = new DetailDaoHelper(resourceType); + return newDetailDaoHelper.getDetail(resourceId, key); + } + + @Override + public Map getDetailsMap(long resourceId, ResourceObjectType resourceType, Boolean forDisplay) { + DetailDaoHelper newDetailDaoHelper = new DetailDaoHelper(resourceType); + return newDetailDaoHelper.getDetailsMap(resourceId, forDisplay); + } } diff --git a/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java b/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java index f5c6eeca18d..cd3532e837d 100644 --- a/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java +++ b/server/src/com/cloud/network/ExternalDeviceUsageManagerImpl.java @@ -31,8 +31,8 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.agent.AgentManager; import com.cloud.agent.api.ExternalNetworkResourceUsageAnswer; @@ -81,8 +81,9 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.exception.ExecutionException; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicVO; import com.cloud.vm.dao.DomainRouterDao; @@ -288,12 +289,19 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter newCurrentBytesReceived += bytesSentAndReceived[1]; } - UserStatisticsVO userStats; - final Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - userStats = _userStatsDao.lock(accountId, zone.getId(), networkId, publicIp, externalLoadBalancer.getId(), externalLoadBalancer.getType().toString()); + commitStats(networkId, externalLoadBalancer, accountId, publicIp, zone, statsEntryIdentifier, + newCurrentBytesSent, newCurrentBytesReceived); + } + } + private void commitStats(final long networkId, final HostVO externalLoadBalancer, final long accountId, final String publicIp, + final DataCenterVO zone, final String statsEntryIdentifier, final long newCurrentBytesSent, final long newCurrentBytesReceived) { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + UserStatisticsVO userStats; + userStats = _userStatsDao.lock(accountId, zone.getId(), networkId, publicIp, externalLoadBalancer.getId(), externalLoadBalancer.getType().toString()); + if (userStats != null) { long oldNetBytesSent = userStats.getNetBytesSent(); long oldNetBytesReceived = userStats.getNetBytesReceived(); @@ -301,19 +309,19 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter long oldCurrentBytesReceived = userStats.getCurrentBytesReceived(); String warning = "Received an external network stats byte count that was less than the stored value. Zone ID: " + userStats.getDataCenterId() + ", account ID: " + userStats.getAccountId() + "."; - + userStats.setCurrentBytesSent(newCurrentBytesSent); if (oldCurrentBytesSent > newCurrentBytesSent) { s_logger.warn(warning + "Stored bytes sent: " + oldCurrentBytesSent + ", new bytes sent: " + newCurrentBytesSent + "."); userStats.setNetBytesSent(oldNetBytesSent + oldCurrentBytesSent); } - + userStats.setCurrentBytesReceived(newCurrentBytesReceived); if (oldCurrentBytesReceived > newCurrentBytesReceived) { s_logger.warn(warning + "Stored bytes received: " + oldCurrentBytesReceived + ", new bytes received: " + newCurrentBytesReceived + "."); userStats.setNetBytesReceived(oldNetBytesReceived + oldCurrentBytesReceived); } - + if (_userStatsDao.update(userStats.getId(), userStats)) { s_logger.debug("Successfully updated stats for " + statsEntryIdentifier); } else { @@ -322,23 +330,18 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter } else { s_logger.warn("Unable to find user stats entry for " + statsEntryIdentifier); } - - txn.commit(); - } catch (final Exception e) { - txn.rollback(); - throw new CloudRuntimeException("Problem getting stats after reboot/stop ", e); } - } + }); } - protected class ExternalDeviceNetworkUsageTask implements Runnable { + protected class ExternalDeviceNetworkUsageTask extends ManagedContextRunnable { public ExternalDeviceNetworkUsageTask() { } @Override - public void run() { + protected void runInContext() { GlobalLock scanLock = GlobalLock.getInternLock("ExternalDeviceNetworkUsageManagerImpl"); try { if (scanLock.lock(20)) { @@ -601,78 +604,78 @@ public class ExternalDeviceUsageManagerImpl extends ManagerBase implements Exter * Stats entries are created for source NAT IP addresses, static NAT rules, port forwarding rules, and load * balancing rules */ - private boolean manageStatsEntries(boolean create, long accountId, long zoneId, Network network, - HostVO externalFirewall, ExternalNetworkResourceUsageAnswer firewallAnswer, - HostVO externalLoadBalancer, ExternalNetworkResourceUsageAnswer lbAnswer) { - String accountErrorMsg = "Failed to update external network stats entry. Details: account ID = " + accountId; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + private boolean manageStatsEntries(final boolean create, final long accountId, final long zoneId, final Network network, + final HostVO externalFirewall, final ExternalNetworkResourceUsageAnswer firewallAnswer, + final HostVO externalLoadBalancer, final ExternalNetworkResourceUsageAnswer lbAnswer) { + final String accountErrorMsg = "Failed to update external network stats entry. Details: account ID = " + accountId; try { - txn.start(); - String networkErrorMsg = accountErrorMsg + ", network ID = " + network.getId(); - - boolean sharedSourceNat = false; - Map sourceNatCapabilities = _networkModel.getNetworkServiceCapabilities(network.getId(), Network.Service.SourceNat); - if (sourceNatCapabilities != null) { - String supportedSourceNatTypes = sourceNatCapabilities.get(Network.Capability.SupportedSourceNatTypes).toLowerCase(); - if (supportedSourceNatTypes.contains("zone")) { - sharedSourceNat = true; - } - } - - if (externalFirewall != null && firewallAnswer != null) { - if (!sharedSourceNat) { - // Manage the entry for this network's source NAT IP address - List sourceNatIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); - if (sourceNatIps.size() == 1) { - String publicIp = sourceNatIps.get(0).getAddress().addr(); - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { - throw new ExecutionException(networkErrorMsg + ", source NAT IP = " + publicIp); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + String networkErrorMsg = accountErrorMsg + ", network ID = " + network.getId(); + + boolean sharedSourceNat = false; + Map sourceNatCapabilities = _networkModel.getNetworkServiceCapabilities(network.getId(), Network.Service.SourceNat); + if (sourceNatCapabilities != null) { + String supportedSourceNatTypes = sourceNatCapabilities.get(Network.Capability.SupportedSourceNatTypes).toLowerCase(); + if (supportedSourceNatTypes.contains("zone")) { + sharedSourceNat = true; } } - - // Manage one entry for each static NAT rule in this network - List staticNatIps = _ipAddressDao.listStaticNatPublicIps(network.getId()); - for (IPAddressVO staticNatIp : staticNatIps) { - String publicIp = staticNatIp.getAddress().addr(); - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { - throw new ExecutionException(networkErrorMsg + ", static NAT rule public IP = " + publicIp); + + if (externalFirewall != null && firewallAnswer != null) { + if (!sharedSourceNat) { + // Manage the entry for this network's source NAT IP address + List sourceNatIps = _ipAddressDao.listByAssociatedNetwork(network.getId(), true); + if (sourceNatIps.size() == 1) { + String publicIp = sourceNatIps.get(0).getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new CloudRuntimeException(networkErrorMsg + ", source NAT IP = " + publicIp); + } + } + + // Manage one entry for each static NAT rule in this network + List staticNatIps = _ipAddressDao.listStaticNatPublicIps(network.getId()); + for (IPAddressVO staticNatIp : staticNatIps) { + String publicIp = staticNatIp.getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new CloudRuntimeException(networkErrorMsg + ", static NAT rule public IP = " + publicIp); + } + } + + // Manage one entry for each port forwarding rule in this network + List portForwardingRules = _portForwardingRulesDao.listByNetwork(network.getId()); + for (PortForwardingRuleVO portForwardingRule : portForwardingRules) { + String publicIp = _networkModel.getIp(portForwardingRule.getSourceIpAddressId()).getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { + throw new CloudRuntimeException(networkErrorMsg + ", port forwarding rule public IP = " + publicIp); + } + } + } else { + // Manage the account-wide entry for the external firewall + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), null, externalFirewall.getId(), firewallAnswer, false)) { + throw new CloudRuntimeException(networkErrorMsg); + } } } - - // Manage one entry for each port forwarding rule in this network - List portForwardingRules = _portForwardingRulesDao.listByNetwork(network.getId()); - for (PortForwardingRuleVO portForwardingRule : portForwardingRules) { - String publicIp = _networkModel.getIp(portForwardingRule.getSourceIpAddressId()).getAddress().addr(); - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalFirewall.getId(), firewallAnswer, false)) { - throw new ExecutionException(networkErrorMsg + ", port forwarding rule public IP = " + publicIp); + + // If an external load balancer is added, manage one entry for each load balancing rule in this network + if (externalLoadBalancer != null && lbAnswer != null) { + boolean inline = _networkModel.isNetworkInlineMode(network); + List loadBalancers = _loadBalancerDao.listByNetworkIdAndScheme(network.getId(), Scheme.Public); + for (LoadBalancerVO loadBalancer : loadBalancers) { + String publicIp = _networkModel.getIp(loadBalancer.getSourceIpAddressId()).getAddress().addr(); + if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalLoadBalancer.getId(), lbAnswer, inline)) { + throw new CloudRuntimeException(networkErrorMsg + ", load balancing rule public IP = " + publicIp); + } } } - } else { - // Manage the account-wide entry for the external firewall - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), null, externalFirewall.getId(), firewallAnswer, false)) { - throw new ExecutionException(networkErrorMsg); - } } - } - - // If an external load balancer is added, manage one entry for each load balancing rule in this network - if (externalLoadBalancer != null && lbAnswer != null) { - boolean inline = _networkModel.isNetworkInlineMode(network); - List loadBalancers = _loadBalancerDao.listByNetworkIdAndScheme(network.getId(), Scheme.Public); - for (LoadBalancerVO loadBalancer : loadBalancers) { - String publicIp = _networkModel.getIp(loadBalancer.getSourceIpAddressId()).getAddress().addr(); - if (!createOrUpdateStatsEntry(create, accountId, zoneId, network.getId(), publicIp, externalLoadBalancer.getId(), lbAnswer, inline)) { - throw new ExecutionException(networkErrorMsg + ", load balancing rule public IP = " + publicIp); - } - } - } - return txn.commit(); + }); + return true; } catch (Exception e) { s_logger.warn("Exception: ", e); - txn.rollback(); return false; - } finally { - txn.close(); } } } diff --git a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java index 629bef5eca1..b0e1b39b870 100644 --- a/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalFirewallDeviceManagerImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.ExternalFirewallResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -108,6 +107,8 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.UrlUtil; @@ -159,7 +160,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @Override @DB - public ExternalFirewallDeviceVO addExternalFirewall(long physicalNetworkId, String url, String username, String password, String deviceName, ServerResource resource) { + public ExternalFirewallDeviceVO addExternalFirewall(long physicalNetworkId, String url, String username, String password, final String deviceName, ServerResource resource) { String guid; PhysicalNetworkVO pNetwork = null; NetworkDevice ntwkDevice = NetworkDevice.getNetworkDevice(deviceName); @@ -176,7 +177,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } zoneId = pNetwork.getDataCenterId(); - PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); + final PhysicalNetworkServiceProviderVO ntwkSvcProvider = _physicalNetworkServiceProviderDao.findByServiceProvider(pNetwork.getId(), ntwkDevice.getNetworkServiceProvder()); if (ntwkSvcProvider == null ) { throw new CloudRuntimeException("Network Service Provider: " + ntwkDevice.getNetworkServiceProvder() + " is not enabled in the physical network: " + physicalNetworkId + "to add this device" ); @@ -204,7 +205,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl hostDetails.put("username", username); hostDetails.put("password", password); hostDetails.put("deviceName", deviceName); - Map configParams = new HashMap(); + final Map configParams = new HashMap(); UrlUtil.parseQueryParameters(uri.getQuery(), false, configParams); hostDetails.putAll(configParams); @@ -215,27 +216,29 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl throw new CloudRuntimeException(e.getMessage()); } - Host externalFirewall = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalFirewall, hostDetails); + final Host externalFirewall = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalFirewall, hostDetails); if (externalFirewall != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + final PhysicalNetworkVO pNetworkFinal = pNetwork; + return Transaction.execute(new TransactionCallback() { + @Override + public ExternalFirewallDeviceVO doInTransaction(TransactionStatus status) { + boolean dedicatedUse = (configParams.get(ApiConstants.FIREWALL_DEVICE_DEDICATED) != null) ? Boolean.parseBoolean(configParams.get(ApiConstants.FIREWALL_DEVICE_DEDICATED)) : false; + long capacity = NumbersUtil.parseLong(configParams.get(ApiConstants.FIREWALL_DEVICE_CAPACITY), 0); + if (capacity == 0) { + capacity = _defaultFwCapacity; + } - boolean dedicatedUse = (configParams.get(ApiConstants.FIREWALL_DEVICE_DEDICATED) != null) ? Boolean.parseBoolean(configParams.get(ApiConstants.FIREWALL_DEVICE_DEDICATED)) : false; - long capacity = NumbersUtil.parseLong(configParams.get(ApiConstants.FIREWALL_DEVICE_CAPACITY), 0); - if (capacity == 0) { - capacity = _defaultFwCapacity; - } + ExternalFirewallDeviceVO fwDevice = new ExternalFirewallDeviceVO(externalFirewall.getId(), pNetworkFinal.getId(), ntwkSvcProvider.getProviderName(), + deviceName, capacity, dedicatedUse); - ExternalFirewallDeviceVO fwDevice = new ExternalFirewallDeviceVO(externalFirewall.getId(), pNetwork.getId(), ntwkSvcProvider.getProviderName(), - deviceName, capacity, dedicatedUse); + _externalFirewallDeviceDao.persist(fwDevice); - _externalFirewallDeviceDao.persist(fwDevice); + DetailVO hostDetail = new DetailVO(externalFirewall.getId(), ApiConstants.FIREWALL_DEVICE_ID, String.valueOf(fwDevice.getId())); + _hostDetailDao.persist(hostDetail); - DetailVO hostDetail = new DetailVO(externalFirewall.getId(), ApiConstants.FIREWALL_DEVICE_ID, String.valueOf(fwDevice.getId())); - _hostDetailDao.persist(hostDetail); - - txn.commit(); - return fwDevice; + return fwDevice; + } + }); } else { return null; } @@ -343,7 +346,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl @DB protected boolean freeFirewallForNetwork(Network network) { - Transaction txn = Transaction.currentTxn(); GlobalLock deviceMapLock = GlobalLock.getInternLock("NetworkFirewallDeviceMap"); try { if (deviceMapLock.lock(120)) { @@ -353,7 +355,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl _networkExternalFirewallDao.remove(fwDeviceForNetwork.getId()); } } catch (Exception exception) { - txn.rollback(); s_logger.error("Failed to release firewall device for the network" + network.getId() + " due to " + exception.getMessage()); return false; } finally { @@ -363,7 +364,6 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl } finally { deviceMapLock.releaseRef(); } - txn.commit(); return true; } @@ -434,17 +434,20 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl IPAddressVO sourceNatIp = null; if (!sharedSourceNat) { - // Get the source NAT IP address for this account + // Get the source NAT IP address for this network List sourceNatIps = _networkModel.listPublicIpsAssignedToAccount(network.getAccountId(), zoneId, true); - if (sourceNatIps.size() != 1) { - String errorMsg = "External firewall was unable to find the source NAT IP address for account " - + account.getAccountName(); + for (IpAddress ipAddress : sourceNatIps) { + if (ipAddress.getAssociatedWithNetworkId().longValue() == network.getId()) { + sourceNatIp = _ipAddressDao.findById(ipAddress.getId()); + break; + } + } + if (sourceNatIp == null) { + String errorMsg = "External firewall was unable to find the source NAT IP address for network " + network.getName(); s_logger.error(errorMsg); return true; - } else { - sourceNatIp = _ipAddressDao.findById(sourceNatIps.get(0).getId()); } } @@ -669,7 +672,7 @@ public abstract class ExternalFirewallDeviceManagerImpl extends AdapterBase impl String maskedIpRange = ipRange[0] + "-" + ipRange[1]; - RemoteAccessVpnCfgCommand createVpnCmd = new RemoteAccessVpnCfgCommand(create, ip.getAddress().addr(), vpn.getLocalIp(), maskedIpRange, vpn.getIpsecPresharedKey()); + RemoteAccessVpnCfgCommand createVpnCmd = new RemoteAccessVpnCfgCommand(create, ip.getAddress().addr(), vpn.getLocalIp(), maskedIpRange, vpn.getIpsecPresharedKey(), false); createVpnCmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(network.getAccountId())); createVpnCmd.setAccessDetail(NetworkElementCommand.GUEST_NETWORK_CIDR, network.getCidr()); Answer answer = _agentMgr.easySend(externalFirewall.getId(), createVpnCmd); diff --git a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java index dd4893030fe..6ca40c01e98 100644 --- a/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java +++ b/server/src/com/cloud/network/ExternalLoadBalancerDeviceManagerImpl.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.response.ExternalLoadBalancerResponse; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -118,6 +117,9 @@ import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.net.UrlUtil; @@ -196,11 +198,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase @Override @DB public ExternalLoadBalancerDeviceVO addExternalLoadBalancer(long physicalNetworkId, String url, - String username, String password, String deviceName, ServerResource resource, boolean gslbProvider, - String gslbSitePublicIp, String gslbSitePrivateIp) { + String username, String password, final String deviceName, ServerResource resource, final boolean gslbProvider, + final String gslbSitePublicIp, final String gslbSitePrivateIp) { PhysicalNetworkVO pNetwork = null; - NetworkDevice ntwkDevice = NetworkDevice.getNetworkDevice(deviceName); + final NetworkDevice ntwkDevice = NetworkDevice.getNetworkDevice(deviceName); long zoneId; if ((ntwkDevice == null) || (url == null) || (username == null) || (resource == null) || (password == null)) { @@ -258,39 +260,41 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase UrlUtil.parseQueryParameters(uri.getQuery(), false, configParams); hostDetails.putAll(configParams); - Transaction txn = Transaction.currentTxn(); try { resource.configure(hostName, hostDetails); - Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalLoadBalancer, hostDetails); + final Host host = _resourceMgr.addHost(zoneId, resource, Host.Type.ExternalLoadBalancer, hostDetails); if (host != null) { - boolean dedicatedUse = (configParams.get(ApiConstants.LOAD_BALANCER_DEVICE_DEDICATED) != null) ? Boolean.parseBoolean(configParams + final boolean dedicatedUse = (configParams.get(ApiConstants.LOAD_BALANCER_DEVICE_DEDICATED) != null) ? Boolean.parseBoolean(configParams .get(ApiConstants.LOAD_BALANCER_DEVICE_DEDICATED)) : false; long capacity = NumbersUtil.parseLong(configParams.get(ApiConstants.LOAD_BALANCER_DEVICE_CAPACITY), 0); if (capacity == 0) { capacity = _defaultLbCapacity; } - ExternalLoadBalancerDeviceVO lbDeviceVO; - txn.start(); - lbDeviceVO = new ExternalLoadBalancerDeviceVO(host.getId(), pNetwork.getId(), ntwkDevice.getNetworkServiceProvder(), - deviceName, capacity, dedicatedUse, gslbProvider); - if (gslbProvider) { - lbDeviceVO.setGslbSitePublicIP(gslbSitePublicIp); - lbDeviceVO.setGslbSitePrivateIP(gslbSitePrivateIp); - } - _externalLoadBalancerDeviceDao.persist(lbDeviceVO); - DetailVO hostDetail = new DetailVO(host.getId(), ApiConstants.LOAD_BALANCER_DEVICE_ID, String.valueOf(lbDeviceVO.getId())); - _hostDetailDao.persist(hostDetail); + final long capacityFinal = capacity; + final PhysicalNetworkVO pNetworkFinal = pNetwork; + return Transaction.execute(new TransactionCallback() { + @Override + public ExternalLoadBalancerDeviceVO doInTransaction(TransactionStatus status) { + ExternalLoadBalancerDeviceVO lbDeviceVO = new ExternalLoadBalancerDeviceVO(host.getId(), pNetworkFinal.getId(), ntwkDevice.getNetworkServiceProvder(), + deviceName, capacityFinal, dedicatedUse, gslbProvider); + if (gslbProvider) { + lbDeviceVO.setGslbSitePublicIP(gslbSitePublicIp); + lbDeviceVO.setGslbSitePrivateIP(gslbSitePrivateIp); + } + _externalLoadBalancerDeviceDao.persist(lbDeviceVO); + DetailVO hostDetail = new DetailVO(host.getId(), ApiConstants.LOAD_BALANCER_DEVICE_ID, String.valueOf(lbDeviceVO.getId())); + _hostDetailDao.persist(hostDetail); - txn.commit(); - return lbDeviceVO; + return lbDeviceVO; + } + }); } else { throw new CloudRuntimeException("Failed to add load balancer device due to internal error."); } } catch (ConfigurationException e) { - txn.rollback(); throw new CloudRuntimeException(e.getMessage()); } } @@ -402,7 +406,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } @DB - protected ExternalLoadBalancerDeviceVO allocateLoadBalancerForNetwork(Network guestConfig) throws InsufficientCapacityException { + protected ExternalLoadBalancerDeviceVO allocateLoadBalancerForNetwork(final Network guestConfig) throws InsufficientCapacityException { boolean retry = true; boolean tryLbProvisioning = false; ExternalLoadBalancerDeviceVO lbDevice = null; @@ -412,34 +416,33 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase while (retry) { GlobalLock deviceMapLock = GlobalLock.getInternLock("LoadBalancerAllocLock"); - Transaction txn = Transaction.currentTxn(); try { if (deviceMapLock.lock(120)) { try { - boolean dedicatedLB = offering.getDedicatedLB(); // does network offering supports a dedicated -// load balancer? - long lbDeviceId; + final boolean dedicatedLB = offering.getDedicatedLB(); // does network offering supports a dedicated load balancer? - txn.start(); try { - // FIXME: should the device allocation be done during network implement phase or do a - // lazy allocation when first rule for the network is configured?? - - // find a load balancer device for this network as per the network offering - lbDevice = findSuitableLoadBalancerForNetwork(guestConfig, dedicatedLB); - lbDeviceId = lbDevice.getId(); - - // persist the load balancer device id that will be used for this network. Once a network - // is implemented on a LB device then later on all rules will be programmed on to same -// device - NetworkExternalLoadBalancerVO networkLB = new NetworkExternalLoadBalancerVO(guestConfig.getId(), lbDeviceId); - _networkExternalLBDao.persist(networkLB); - - // mark device to be either dedicated or shared use - lbDevice.setAllocationState(dedicatedLB ? LBDeviceAllocationState.Dedicated : LBDeviceAllocationState.Shared); - _externalLoadBalancerDeviceDao.update(lbDeviceId, lbDevice); - - txn.commit(); + lbDevice = Transaction.execute(new TransactionCallbackWithException() { + @Override + public ExternalLoadBalancerDeviceVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException { + // FIXME: should the device allocation be done during network implement phase or do a + // lazy allocation when first rule for the network is configured?? + + // find a load balancer device for this network as per the network offering + ExternalLoadBalancerDeviceVO lbDevice = findSuitableLoadBalancerForNetwork(guestConfig, dedicatedLB); + long lbDeviceId = lbDevice.getId(); + + // persist the load balancer device id that will be used for this network. Once a network + // is implemented on a LB device then later on all rules will be programmed on to same device + NetworkExternalLoadBalancerVO networkLB = new NetworkExternalLoadBalancerVO(guestConfig.getId(), lbDeviceId); + _networkExternalLBDao.persist(networkLB); + + // mark device to be either dedicated or shared use + lbDevice.setAllocationState(dedicatedLB ? LBDeviceAllocationState.Dedicated : LBDeviceAllocationState.Shared); + _externalLoadBalancerDeviceDao.update(lbDeviceId, lbDevice); + return lbDevice; + } + }); // allocated load balancer for the network, so skip retry tryLbProvisioning = false; @@ -448,28 +451,22 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase // if already attempted to provision load balancer then throw out of capacity exception, if (tryLbProvisioning) { retry = false; - // TODO: throwing warning instead of error for now as its possible another provider can -// service this network + // TODO: throwing warning instead of error for now as its possible another provider can service this network s_logger.warn("There are no load balancer device with the capacity for implementing this network"); throw exception; } else { - tryLbProvisioning = true; // if possible provision a LB appliance in to the physical -// network + tryLbProvisioning = true; // if possible provision a LB appliance in to the physical network } } } finally { deviceMapLock.unlock(); - if (lbDevice == null) { - txn.rollback(); - } } } } finally { deviceMapLock.releaseRef(); } - // there are no LB devices or there is no free capacity on the devices in the physical network so provision -// a new LB appliance + // there are no LB devices or there is no free capacity on the devices in the physical network so provision a new LB appliance if (tryLbProvisioning) { // check if LB appliance can be dynamically provisioned List providerLbDevices = _externalLoadBalancerDeviceDao.listByProviderAndDeviceAllocationState(physicalNetworkId, provider, @@ -477,8 +474,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if ((providerLbDevices != null) && (!providerLbDevices.isEmpty())) { for (ExternalLoadBalancerDeviceVO lbProviderDevice : providerLbDevices) { if (lbProviderDevice.getState() == LBDeviceState.Enabled) { - // acquire a private IP from the data center which will be used as management IP of -// provisioned LB appliance, + // acquire a private IP from the data center which will be used as management IP of provisioned LB appliance, DataCenterIpAddressVO dcPrivateIp = _dcDao.allocatePrivateIpAddress(guestConfig.getDataCenterId(), lbProviderDevice.getUuid()); if (dcPrivateIp == null) { throw new InsufficientNetworkCapacityException("failed to acquire a priavate IP in the zone " + guestConfig.getDataCenterId() + @@ -509,13 +505,11 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase String publicIf = createLbAnswer.getPublicInterface(); String privateIf = createLbAnswer.getPrivateInterface(); - // we have provisioned load balancer so add the appliance as cloudstack provisioned external -// load balancer + // we have provisioned load balancer so add the appliance as cloudstack provisioned external load balancer String dedicatedLb = offering.getDedicatedLB() ? "true" : "false"; String capacity = Long.toString(lbProviderDevice.getCapacity()); - // acquire a public IP to associate with lb appliance (used as subnet IP to make the -// appliance part of private network) + // acquire a public IP to associate with lb appliance (used as subnet IP to make the appliance part of private network) PublicIp publicIp = _ipAddrMgr.assignPublicIpAddress(guestConfig.getDataCenterId(), null, _accountMgr.getSystemAccount(), @@ -542,8 +536,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } if (lbAppliance != null) { - // mark the load balancer as cloudstack managed and set parent host id on which lb -// appliance is provisioned + // mark the load balancer as cloudstack managed and set parent host id on which lb appliance is provisioned ExternalLoadBalancerDeviceVO managedLb = _externalLoadBalancerDeviceDao.findById(lbAppliance.getId()); managedLb.setIsManagedDevice(true); managedLb.setParentHostId(lbProviderDevice.getHostId()); @@ -557,8 +550,7 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase if (answer == null || !answer.getResult()) { s_logger.warn("Failed to destroy load balancer appliance created"); } else { - // release the public & private IP back to dc pool, as the load balancer -// appliance is now destroyed + // release the public & private IP back to dc pool, as the load balancer appliance is now destroyed _dcDao.releasePrivateIpAddress(lbIP, guestConfig.getDataCenterId(), null); _ipAddrMgr.disassociatePublicIpAddress(publicIp.getId(), _accountMgr.getSystemUser().getId(), _accountMgr.getSystemAccount()); } @@ -656,34 +648,40 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase } @DB - protected boolean freeLoadBalancerForNetwork(Network guestConfig) { - Transaction txn = Transaction.currentTxn(); + protected boolean freeLoadBalancerForNetwork(final Network guestConfig) { GlobalLock deviceMapLock = GlobalLock.getInternLock("LoadBalancerAllocLock"); try { if (deviceMapLock.lock(120)) { - txn.start(); - // since network is shutdown remove the network mapping to the load balancer device - NetworkExternalLoadBalancerVO networkLBDevice = _networkExternalLBDao.findByNetworkId(guestConfig.getId()); - long lbDeviceId = networkLBDevice.getExternalLBDeviceId(); - _networkExternalLBDao.remove(networkLBDevice.getId()); + ExternalLoadBalancerDeviceVO lbDevice = Transaction.execute(new TransactionCallback() { + @Override + public ExternalLoadBalancerDeviceVO doInTransaction(TransactionStatus status) { + // since network is shutdown remove the network mapping to the load balancer device + NetworkExternalLoadBalancerVO networkLBDevice = _networkExternalLBDao.findByNetworkId(guestConfig.getId()); + long lbDeviceId = networkLBDevice.getExternalLBDeviceId(); + _networkExternalLBDao.remove(networkLBDevice.getId()); - List ntwksMapped = _networkExternalLBDao.listByLoadBalancerDeviceId(networkLBDevice.getExternalLBDeviceId()); - ExternalLoadBalancerDeviceVO lbDevice = _externalLoadBalancerDeviceDao.findById(lbDeviceId); - boolean lbInUse = !(ntwksMapped == null || ntwksMapped.isEmpty()); - boolean lbCloudManaged = lbDevice.getIsManagedDevice(); + List ntwksMapped = _networkExternalLBDao.listByLoadBalancerDeviceId(networkLBDevice.getExternalLBDeviceId()); + ExternalLoadBalancerDeviceVO lbDevice = _externalLoadBalancerDeviceDao.findById(lbDeviceId); + boolean lbInUse = !(ntwksMapped == null || ntwksMapped.isEmpty()); + boolean lbCloudManaged = lbDevice.getIsManagedDevice(); - if (!lbInUse && !lbCloudManaged) { - // this is the last network mapped to the load balancer device so set device allocation state to be -// free - lbDevice.setAllocationState(LBDeviceAllocationState.Free); - _externalLoadBalancerDeviceDao.update(lbDevice.getId(), lbDevice); - } + if (!lbInUse && !lbCloudManaged) { + // this is the last network mapped to the load balancer device so set device allocation state to be free + lbDevice.setAllocationState(LBDeviceAllocationState.Free); + _externalLoadBalancerDeviceDao.update(lbDevice.getId(), lbDevice); + } - // commit the changes before sending agent command to destroy cloudstack managed LB - txn.commit(); + // commit the changes before sending agent command to destroy cloudstack managed LB + if (!lbInUse && lbCloudManaged) { + return lbDevice; + } else { + return null; + } + } + }); - if (!lbInUse && lbCloudManaged) { + if (lbDevice != null) { // send DestroyLoadBalancerApplianceCommand to the host where load balancer appliance is provisioned Host lbHost = _hostDao.findById(lbDevice.getHostId()); String lbIP = lbHost.getPrivateIpAddress(); @@ -723,7 +721,6 @@ public abstract class ExternalLoadBalancerDeviceManagerImpl extends AdapterBase return false; } } catch (Exception exception) { - txn.rollback(); s_logger.error("Failed to release load balancer device for the network" + guestConfig.getId() + " due to " + exception.getMessage()); } finally { deviceMapLock.releaseRef(); diff --git a/server/src/com/cloud/network/IpAddressManagerImpl.java b/server/src/com/cloud/network/IpAddressManagerImpl.java index 4c49247c4e6..2347a6eb504 100644 --- a/server/src/com/cloud/network/IpAddressManagerImpl.java +++ b/server/src/com/cloud/network/IpAddressManagerImpl.java @@ -29,7 +29,6 @@ import java.util.UUID; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.context.CallContext; @@ -133,17 +132,24 @@ import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserDao; import com.cloud.utils.Journal; import com.cloud.utils.Pair; +import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; import com.cloud.vm.Nic; @@ -609,12 +615,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public boolean releasePortableIpAddress(long addrId) { - Transaction txn = Transaction.currentTxn(); - GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange"); + public boolean releasePortableIpAddress(final long addrId) { + final GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange"); - txn.start(); try { + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { portableIpLock.lock(5); IPAddressVO ip = _ipAddressDao.findById(addrId); @@ -629,8 +636,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // remove the provisioned public ip address _ipAddressDao.remove(ip.getId()); - txn.commit(); return true; + } + }); } finally { portableIpLock.releaseRef(); } @@ -649,15 +657,16 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } @DB - public PublicIp fetchNewPublicIp(long dcId, Long podId, List vlanDbIds, Account owner, VlanType vlanUse, Long guestNetworkId, boolean sourceNat, boolean assign, - String requestedIp, boolean isSystem, Long vpcId) throws InsufficientAddressCapacityException { + public PublicIp fetchNewPublicIp(final long dcId, final Long podId, final List vlanDbIds, final Account owner, final VlanType vlanUse, final Long guestNetworkId, final boolean sourceNat, final boolean assign, + final String requestedIp, final boolean isSystem, final Long vpcId) throws InsufficientAddressCapacityException { + IPAddressVO addr = Transaction.execute(new TransactionCallbackWithException() { + @Override + public IPAddressVO doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException { StringBuilder errorMessage = new StringBuilder("Unable to get ip adress in "); boolean fetchFromDedicatedRange = false; List dedicatedVlanDbIds = new ArrayList(); List nonDedicatedVlanDbIds = new ArrayList(); - Transaction txn = Transaction.currentTxn(); - txn.start(); SearchCriteria sc = null; if (podId != null) { sc = AssignIpAddressFromPodVlanSearch.create(); @@ -777,7 +786,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage _ipAddressDao.update(addr.getId(), addr); - txn.commit(); + return addr; + } + }); if (vlanUse == VlanType.VirtualNetwork) { _firewallMgr.addSystemFirewallRules(addr, owner); @@ -788,16 +799,15 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public void markPublicIpAsAllocated(IPAddressVO addr) { + public void markPublicIpAsAllocated(final IPAddressVO addr) { assert (addr.getState() == IpAddress.State.Allocating || addr.getState() == IpAddress.State.Free) : "Unable to transition from state " + addr.getState() + " to " + IpAddress.State.Allocated; - - Transaction txn = Transaction.currentTxn(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { Account owner = _accountMgr.getAccount(addr.getAllocatedToAccountId()); - txn.start(); addr.setState(IpAddress.State.Allocated); _ipAddressDao.update(addr.getId(), addr); @@ -819,13 +829,13 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage addr.getClass().getName(), addr.getUuid()); } - // don't increment resource count for direct and dedicated ip addresses - if ((addr.getAssociatedWithNetworkId() != null || addr.getVpcId() != null) && !isIpDedicated(addr)) { + + if (updateIpResourceCount(addr)) { _resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.public_ip); } } - - txn.commit(); + } + }); } private boolean isIpDedicated(IPAddressVO addr) { @@ -855,17 +865,17 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public PublicIp assignDedicateIpAddress(Account owner, Long guestNtwkId, Long vpcId, long dcId, boolean isSourceNat) throws ConcurrentOperationException, + public PublicIp assignDedicateIpAddress(Account owner, final Long guestNtwkId, final Long vpcId, final long dcId, final boolean isSourceNat) throws ConcurrentOperationException, InsufficientAddressCapacityException { - long ownerId = owner.getId(); + final long ownerId = owner.getId(); PublicIp ip = null; - Transaction txn = Transaction.currentTxn(); try { - txn.start(); - - owner = _accountDao.acquireInLockTable(ownerId); + ip = Transaction.execute(new TransactionCallbackWithException() { + @Override + public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException { + Account owner = _accountDao.acquireInLockTable(ownerId); if (owner == null) { // this ownerId comes from owner or type Account. See the class "AccountVO" and the annotations in that class @@ -877,13 +887,16 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage s_logger.debug("lock account " + ownerId + " is acquired"); } - ip = fetchNewPublicIp(dcId, null, null, owner, VlanType.VirtualNetwork, guestNtwkId, isSourceNat, false, null, false, vpcId); + PublicIp ip = fetchNewPublicIp(dcId, null, null, owner, VlanType.VirtualNetwork, guestNtwkId, isSourceNat, false, null, false, vpcId); IPAddressVO publicIp = ip.ip(); markPublicIpAsAllocated(publicIp); _ipAddressDao.update(publicIp.getId(), publicIp); - txn.commit(); + return ip; + } + }); + return ip; } finally { if (owner != null) { @@ -894,7 +907,6 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage _accountDao.releaseFromLockTable(ownerId); } if (ip == null) { - txn.rollback(); s_logger.error("Unable to get source nat ip address for account " + ownerId); } } @@ -977,11 +989,11 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public IpAddress allocateIp(Account ipOwner, boolean isSystem, Account caller, long callerUserId, DataCenter zone) throws ConcurrentOperationException, + public IpAddress allocateIp(final Account ipOwner, final boolean isSystem, Account caller, long callerUserId, final DataCenter zone) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException { - VlanType vlanType = VlanType.VirtualNetwork; - boolean assign = false; + final VlanType vlanType = VlanType.VirtualNetwork; + final boolean assign = false; if (Grouping.AllocationState.Disabled == zone.getAllocationState() && !_accountMgr.isRootAdmin(caller.getId())) { // zone is of type DataCenter. See DataCenterVO.java. @@ -992,7 +1004,6 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage PublicIp ip = null; - Transaction txn = Transaction.currentTxn(); Account accountToLock = null; try { if (s_logger.isDebugEnabled()) { @@ -1008,9 +1019,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage s_logger.debug("Associate IP address lock acquired"); } - txn.start(); - - ip = fetchNewPublicIp(zone.getId(), null, null, ipOwner, vlanType, null, false, assign, null, isSystem, null); + ip = Transaction.execute(new TransactionCallbackWithException() { + @Override + public PublicIp doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException { + PublicIp ip = fetchNewPublicIp(zone.getId(), null, null, ipOwner, vlanType, null, false, assign, null, isSystem, null); if (ip == null) { InsufficientAddressCapacityException ex = new InsufficientAddressCapacityException("Unable to find available public IP addresses", DataCenter.class, zone.getId()); @@ -1022,7 +1034,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage s_logger.debug("Got " + ipAddress + " to assign for account " + ipOwner.getId() + " in zone " + zone.getId()); - txn.commit(); + return ip; + } + }); + } finally { if (accountToLock != null) { if (s_logger.isDebugEnabled()) { @@ -1037,18 +1052,19 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override @DB - public IpAddress allocatePortableIp(Account ipOwner, Account caller, long dcId, Long networkId, Long vpcID) throws ConcurrentOperationException, ResourceAllocationException, + public IpAddress allocatePortableIp(final Account ipOwner, Account caller, final long dcId, final Long networkId, final Long vpcID) throws ConcurrentOperationException, ResourceAllocationException, InsufficientAddressCapacityException { - Transaction txn = Transaction.currentTxn(); GlobalLock portableIpLock = GlobalLock.getInternLock("PortablePublicIpRange"); - PortableIpVO allocatedPortableIp; IPAddressVO ipaddr; try { portableIpLock.lock(5); - txn.start(); + ipaddr = Transaction.execute(new TransactionCallbackWithException() { + @Override + public IPAddressVO doInTransaction(TransactionStatus status) throws InsufficientAddressCapacityException { + PortableIpVO allocatedPortableIp; List portableIpVOs = _portableIpDao.listByRegionIdAndState(1, PortableIp.State.Free); if (portableIpVOs == null || portableIpVOs.isEmpty()) { @@ -1086,7 +1102,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage vlan = _vlanDao.persist(vlan); // provision the portable IP in to user_ip_address table - ipaddr = new IPAddressVO(new Ip(allocatedPortableIp.getAddress()), dcId, networkId, vpcID, physicalNetworkId, network.getId(), vlan.getId(), true); + IPAddressVO ipaddr = new IPAddressVO(new Ip(allocatedPortableIp.getAddress()), dcId, networkId, vpcID, physicalNetworkId, network.getId(), vlan.getId(), true); ipaddr.setState(State.Allocated); ipaddr.setAllocatedTime(new Date()); ipaddr.setAllocatedInDomainId(ipOwner.getDomainId()); @@ -1104,8 +1120,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage ipaddr.getClass().getName(), ipaddr.getUuid()); - txn.commit(); - + return ipaddr; + } + }); } finally { portableIpLock.unlock(); } @@ -1198,12 +1215,23 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } } - // In Advance zone only allow to do IP assoc - // - for Isolated networks with source nat service enabled - // - for shared networks with source nat service enabled - if (zone.getNetworkType() == NetworkType.Advanced && !(_networkModel.areServicesSupportedInNetwork(network.getId(), Service.SourceNat))) { - throw new InvalidParameterValueException("In zone of type " + NetworkType.Advanced + " ip address can be associated only to the network of guest type " + - GuestType.Isolated + " with the " + Service.SourceNat.getName() + " enabled"); + if (zone.getNetworkType() == NetworkType.Advanced) { + // In Advance zone allow to do IP assoc only for Isolated networks with source nat service enabled + if (network.getGuestType() == GuestType.Isolated && + !(_networkModel.areServicesSupportedInNetwork(network.getId(), Service.SourceNat))) { + throw new InvalidParameterValueException("In zone of type " + NetworkType.Advanced + + " ip address can be associated only to the network of guest type " + GuestType.Isolated + + " with the " + Service.SourceNat.getName() + " enabled"); + } + + // In Advance zone allow to do IP assoc only for shared networks with source nat/static nat/lb/pf services enabled + if (network.getGuestType() == GuestType.Shared && + !isSharedNetworkOfferingWithServices(network.getNetworkOfferingId())) { + throw new InvalidParameterValueException("In zone of type " + NetworkType.Advanced + + " ip address can be associated with network of guest type " + GuestType.Shared + "only if at " + + "least one of the services " + Service.SourceNat.getName() + "/" + Service.StaticNat.getName() + "/" + + Service.Lb.getName() + "/" + Service.PortForwarding.getName() + " is enabled"); + } } NetworkOffering offering = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -1381,7 +1409,7 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public void transferPortableIP(long ipAddrId, long currentNetworkId, long newNetworkId) throws ResourceAllocationException, ResourceUnavailableException, + public void transferPortableIP(final long ipAddrId, long currentNetworkId, long newNetworkId) throws ResourceAllocationException, ResourceUnavailableException, InsufficientAddressCapacityException, ConcurrentOperationException { Network srcNetwork = _networksDao.findById(currentNetworkId); @@ -1389,18 +1417,16 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage throw new InvalidParameterValueException("Invalid source network id " + currentNetworkId + " is given"); } - Network dstNetwork = _networksDao.findById(newNetworkId); + final Network dstNetwork = _networksDao.findById(newNetworkId); if (dstNetwork == null) { throw new InvalidParameterValueException("Invalid source network id " + newNetworkId + " is given"); } - IPAddressVO ip = _ipAddressDao.findById(ipAddrId); + final IPAddressVO ip = _ipAddressDao.findById(ipAddrId); if (ip == null) { throw new InvalidParameterValueException("Invalid portable ip address id is given"); } - Transaction txn = Transaction.currentTxn(); - assert (isPortableIpTransferableFromNetwork(ipAddrId, currentNetworkId)); // disassociate portable IP with current network/VPC network @@ -1413,8 +1439,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage // If portable IP need to be transferred across the zones, then mark the entry corresponding to portable ip // in user_ip_address and vlan tables so as to emulate portable IP as provisioned in destination data center if (srcNetwork.getDataCenterId() != dstNetwork.getDataCenterId()) { - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { long physicalNetworkId = _networkModel.getDefaultPhysicalNetworkByZoneAndTrafficType(dstNetwork.getDataCenterId(), TrafficType.Public).getId(); long publicNetworkId = _networkModel.getSystemNetworkByZoneAndTrafficType(dstNetwork.getDataCenterId(), TrafficType.Public).getId(); @@ -1428,15 +1455,16 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage vlan.setNetworkId(publicNetworkId); vlan.setDataCenterId(dstNetwork.getDataCenterId()); _vlanDao.update(ip.getVlanId(), vlan); - - txn.commit(); + } + }); } // associate portable IP with new network/VPC network associatePortableIPToGuestNetwork(ipAddrId, newNetworkId, false); - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { if (dstNetwork.getVpcId() != null) { ip.setVpcId(dstNetwork.getVpcId()); } else { @@ -1444,8 +1472,9 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } _ipAddressDao.update(ipAddrId, ip); + } + }); - txn.commit(); // trigger an action event for the transfer of portable IP across the networks, so that external entities // monitoring for this event can initiate the route advertisement for the availability of IP from the zoe @@ -1463,17 +1492,21 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override @DB - public boolean associateIpAddressListToAccount(long userId, long accountId, long zoneId, Long vlanId, Network guestNetwork) throws InsufficientCapacityException, + public boolean associateIpAddressListToAccount(long userId, final long accountId, final long zoneId, final Long vlanId, final Network guestNetworkFinal) throws InsufficientCapacityException, ConcurrentOperationException, ResourceUnavailableException, ResourceAllocationException { - Account owner = _accountMgr.getActiveAccountById(accountId); - boolean createNetwork = false; + final Account owner = _accountMgr.getActiveAccountById(accountId); - if (guestNetwork != null && guestNetwork.getTrafficType() != TrafficType.Guest) { - throw new InvalidParameterValueException("Network " + guestNetwork + " is not of a type " + TrafficType.Guest); + if (guestNetworkFinal != null && guestNetworkFinal.getTrafficType() != TrafficType.Guest) { + throw new InvalidParameterValueException("Network " + guestNetworkFinal + " is not of a type " + TrafficType.Guest); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + Ternary, Network> pair = null; + try { + pair = Transaction.execute(new TransactionCallbackWithException, Network>,Exception>() { + @Override + public Ternary, Network> doInTransaction(TransactionStatus status) throws InsufficientCapacityException, ResourceAllocationException { + boolean createNetwork = false; + Network guestNetwork = guestNetworkFinal; if (guestNetwork == null) { List networks = getIsolatedNetworksWithSourceNATOwnedByAccountInZone(zoneId, owner); @@ -1560,8 +1593,19 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage markPublicIpAsAllocated(addr); } } + return new Ternary, Network>(createNetwork, requiredOfferings, guestNetwork); + } + }); + } catch (Exception e1) { + ExceptionUtil.rethrowRuntime(e1); + ExceptionUtil.rethrow(e1, InsufficientCapacityException.class); + ExceptionUtil.rethrow(e1, ResourceAllocationException.class); + throw new IllegalStateException(e1); + } - txn.commit(); + boolean createNetwork = pair.first(); + List requiredOfferings = pair.second(); + Network guestNetwork = pair.third(); // if the network offering has persistent set to true, implement the network if (createNetwork && requiredOfferings.get(0).getIsPersistent()) { @@ -1591,10 +1635,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @DB @Override - public IPAddressVO markIpAsUnavailable(long addrId) { - Transaction txn = Transaction.currentTxn(); - - IPAddressVO ip = _ipAddressDao.findById(addrId); + public IPAddressVO markIpAsUnavailable(final long addrId) { + final IPAddressVO ip = _ipAddressDao.findById(addrId); if (ip.getAllocatedToAccountId() == null && ip.getAllocatedTime() == null) { s_logger.trace("Ip address id=" + addrId + " is already released"); @@ -1602,10 +1644,10 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } if (ip.getState() != State.Releasing) { - txn.start(); - - // don't decrement resource count for direct and dedicated ips - if (ip.getAssociatedWithNetworkId() != null && !isIpDedicated(ip)) { + return Transaction.execute(new TransactionCallback() { + @Override + public IPAddressVO doInTransaction(TransactionStatus status) { + if (updateIpResourceCount(ip)) { _resourceLimitMgr.decrementResourceCount(_ipAddressDao.findById(addrId).getAllocatedToAccountId(), ResourceType.public_ip); } @@ -1629,14 +1671,19 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage } } - ip = _ipAddressDao.markAsUnavailable(addrId); - - txn.commit(); + return _ipAddressDao.markAsUnavailable(addrId); + } + }); } return ip; } + protected boolean updateIpResourceCount(IPAddressVO ip) { + // don't increment resource count for direct and dedicated ip addresses + return (ip.getAssociatedWithNetworkId() != null || ip.getVpcId() != null) && !isIpDedicated(ip); + } + @Override @DB public String acquireGuestIpAddress(Network network, String requestedIp) { @@ -1811,14 +1858,14 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage @Override @DB - public void allocateDirectIp(NicProfile nic, DataCenter dc, VirtualMachineProfile vm, Network network, String requestedIpv4, String requestedIpv6) + public void allocateDirectIp(final NicProfile nic, final DataCenter dc, final VirtualMachineProfile vm, final Network network, final String requestedIpv4, final String requestedIpv6) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientAddressCapacityException { //This method allocates direct ip for the Shared network in Advance zones boolean ipv4 = false; - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (network.getGateway() != null) { if (nic.getIp4Address() == null) { ipv4 = true; @@ -1876,8 +1923,8 @@ public class IpAddressManagerImpl extends ManagerBase implements IpAddressManage nic.setIp6Dns1(dc.getIp6Dns1()); nic.setIp6Dns2(dc.getIp6Dns2()); } - - txn.commit(); + } + }); } @Override diff --git a/server/src/com/cloud/network/NetworkServiceImpl.java b/server/src/com/cloud/network/NetworkServiceImpl.java index aa14a1da0a8..690365f3aaa 100755 --- a/server/src/com/cloud/network/NetworkServiceImpl.java +++ b/server/src/com/cloud/network/NetworkServiceImpl.java @@ -41,7 +41,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd; @@ -97,7 +96,7 @@ import com.cloud.network.Network.Service; import com.cloud.network.Networks.BroadcastDomainType; import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetwork.BroadcastDomainRange; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.AccountGuestVlanMapDao; import com.cloud.network.dao.AccountGuestVlanMapVO; @@ -137,7 +136,7 @@ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.org.Grouping; import com.cloud.projects.Project; import com.cloud.projects.ProjectManager; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -159,9 +158,15 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.net.NetUtils; import com.cloud.vm.Nic; import com.cloud.vm.NicSecondaryIp; @@ -190,6 +195,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { private static final long MAX_VLAN_ID = 4095L; // 2^12 - 1 private static final long MIN_GRE_KEY = 0L; private static final long MAX_GRE_KEY = 4294967295L; // 2^32 -1 + private static final long MIN_VXLAN_VNI = 0L; + private static final long MAX_VXLAN_VNI = 16777214L; // 2^24 -2 + // MAX_VXLAN_VNI should be 16777215L (2^24-1), but Linux vxlan interface doesn't accept VNI:2^24-1 now. + // It seems a bug. @Inject DataCenterDao _dcDao = null; @@ -228,7 +237,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Inject UsageEventDao _usageEventDao; - @Inject List _networkGurus; + List _networkGurus; @Inject NetworkDomainDao _networkDomainDao; @@ -633,11 +642,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override - public NicSecondaryIp allocateSecondaryGuestIP (Account ipOwner, long zoneId, Long nicId, Long networkId, String requestedIp) throws InsufficientAddressCapacityException { + public NicSecondaryIp allocateSecondaryGuestIP (Account ipOwner, long zoneId, final Long nicId, final Long networkId, String requestedIp) throws InsufficientAddressCapacityException { - Long accountId = null; - Long domainId = null; - Long vmId = null; String ipaddr = null; if (networkId == null) { @@ -647,7 +653,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { Account caller = CallContext.current().getCallingAccount(); //check whether the nic belongs to user vm. - NicVO nicVO = _nicDao.findById(nicId); + final NicVO nicVO = _nicDao.findById(nicId); if (nicVO == null) { throw new InvalidParameterValueException("There is no nic for the " + nicId); } @@ -668,8 +674,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (network == null) { throw new InvalidParameterValueException("Invalid network id is given"); } - accountId = ipOwner.getAccountId(); - domainId = ipOwner.getDomainId(); + final Long accountId = ipOwner.getAccountId(); + final Long domainId = ipOwner.getDomainId(); // Validate network offering NetworkOfferingVO ntwkOff = _networkOfferingDao.findById(network.getNetworkOfferingId()); @@ -714,12 +720,12 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return null; } - NicSecondaryIpVO secondaryIpVO; if (ipaddr != null) { // we got the ip addr so up the nics table and secodary ip - Transaction txn = Transaction.currentTxn(); - txn.start(); - + final String addrFinal = ipaddr; + long id = Transaction.execute(new TransactionCallback() { + @Override + public Long doInTransaction(TransactionStatus status) { boolean nicSecondaryIpSet = nicVO.getSecondaryIp(); if (!nicSecondaryIpSet) { nicVO.setSecondaryIp(true); @@ -729,11 +735,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } s_logger.debug("Setting nic_secondary_ip table ..."); - vmId = nicVO.getInstanceId(); - secondaryIpVO = new NicSecondaryIpVO(nicId, ipaddr, vmId, accountId, domainId, networkId); + Long vmId = nicVO.getInstanceId(); + NicSecondaryIpVO secondaryIpVO = new NicSecondaryIpVO(nicId, addrFinal, vmId, accountId, domainId, networkId); _nicSecondaryIpDao.persist(secondaryIpVO); - txn.commit(); - return getNicSecondaryIp(secondaryIpVO.getId()); + return secondaryIpVO.getId(); + } + }); + + return getNicSecondaryIp(id); } else { return null; } @@ -803,13 +812,15 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Can' remove the ip " + secondaryIp + "is associate with static NAT rule public IP address id " + publicIpVO.getId()); } } else if (dc.getNetworkType() == NetworkType.Basic || ntwkOff.getGuestType() == Network.GuestType.Shared) { - IPAddressVO ip = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secIpVO.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndNetworkId(secIpVO.getNetworkId(), secIpVO.getIp4Address()); if (ip != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { _ipAddrMgr.markIpAsUnavailable(ip.getId()); _ipAddressDao.unassignIpAddress(ip.getId()); - txn.commit(); + } + }); } } else { throw new InvalidParameterValueException("Not supported for this network now"); @@ -819,13 +830,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return success; } - boolean removeNicSecondaryIP(NicSecondaryIpVO ipVO, boolean lastIp) { - Transaction txn = Transaction.currentTxn(); - long nicId = ipVO.getNicId(); - NicVO nic = _nicDao.findById(nicId); - - txn.start(); + boolean removeNicSecondaryIP(final NicSecondaryIpVO ipVO, final boolean lastIp) { + final long nicId = ipVO.getNicId(); + final NicVO nic = _nicDao.findById(nicId); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { if (lastIp) { nic.setSecondaryIp(false); s_logger.debug("Setting nics secondary ip to false ..."); @@ -834,7 +845,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { s_logger.debug("Revoving nic secondary ip entry ..."); _nicSecondaryIpDao.remove(ipVO.getId()); - txn.commit(); + } + }); + return true; } @@ -1253,8 +1266,51 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw ex; } - Transaction txn = Transaction.currentTxn(); - txn.start(); + Network network = commitNetwork(networkOfferingId, gateway, startIP, endIP, netmask, networkDomain, vlanId, + name, displayText, caller, physicalNetworkId, zoneId, domainId, isDomainSpecific, subdomainAccess, + vpcId, startIPv6, endIPv6, ip6Gateway, ip6Cidr, displayNetwork, aclId, isolatedPvlan, ntwkOff, pNtwk, + aclType, owner, cidr, createVlan); + + // if the network offering has persistent set to true, implement the network + if ( ntwkOff.getIsPersistent() ) { + try { + if ( network.getState() == Network.State.Setup ) { + s_logger.debug("Network id=" + network.getId() + " is already provisioned"); + return network; + } + DeployDestination dest = new DeployDestination(zone, null, null, null); + UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); + Journal journal = new Journal.LogJournal("Implementing " + network, s_logger); + ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); + s_logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); + Pair implementedNetwork = _networkMgr.implementNetwork(network.getId(), dest, context); + if (implementedNetwork.first() == null) { + s_logger.warn("Failed to provision the network " + network); + } + network = implementedNetwork.second(); + } catch (ResourceUnavailableException ex) { + s_logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); + CloudRuntimeException e = new CloudRuntimeException("Failed to implement persistent guest network"); + e.addProxyObject(network.getUuid(), "networkId"); + throw e; + } + } + return network; + } + + private Network commitNetwork(final Long networkOfferingId, final String gateway, final String startIP, final String endIP, final String netmask, + final String networkDomain, final String vlanId, final String name, final String displayText, final Account caller, + final Long physicalNetworkId, final Long zoneId, final Long domainId, final boolean isDomainSpecific, final Boolean subdomainAccessFinal, + final Long vpcId, final String startIPv6, final String endIPv6, final String ip6Gateway, final String ip6Cidr, final Boolean displayNetwork, + final Long aclId, final String isolatedPvlan, final NetworkOfferingVO ntwkOff, final PhysicalNetwork pNtwk, final ACLType aclType, + final Account ownerFinal, final String cidr, final boolean createVlan) throws InsufficientCapacityException, + ResourceAllocationException { + try { + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public Network doInTransaction(TransactionStatus status) throws InsufficientCapacityException, ResourceAllocationException { + Account owner = ownerFinal; + Boolean subdomainAccess = subdomainAccessFinal; Long sharedDomainId = null; if (isDomainSpecific) { @@ -1312,34 +1368,15 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _configMgr.createVlanAndPublicIpRange(pNtwk.getDataCenterId(), network.getId(), physicalNetworkId, false, null, startIP, endIP, gateway, netmask, vlanId, null, startIPv6, endIPv6, ip6Gateway, ip6Cidr); } - - txn.commit(); - - // if the network offering has persistent set to true, implement the network - if ( ntwkOff.getIsPersistent() ) { - try { - if ( network.getState() == Network.State.Setup ) { - s_logger.debug("Network id=" + network.getId() + " is already provisioned"); return network; } - DeployDestination dest = new DeployDestination(zone, null, null, null); - UserVO callerUser = _userDao.findById(CallContext.current().getCallingUserId()); - Journal journal = new Journal.LogJournal("Implementing " + network, s_logger); - ReservationContext context = new ReservationContextImpl(UUID.randomUUID().toString(), journal, callerUser, caller); - s_logger.debug("Implementing network " + network + " as a part of network provision for persistent network"); - Pair implementedNetwork = _networkMgr.implementNetwork(network.getId(), dest, context); - if (implementedNetwork.first() == null) { - s_logger.warn("Failed to provision the network " + network); - } - network = implementedNetwork.second(); - } catch (ResourceUnavailableException ex) { - s_logger.warn("Failed to implement persistent guest network " + network + "due to ", ex); - CloudRuntimeException e = new CloudRuntimeException("Failed to implement persistent guest network"); - e.addProxyObject(network.getUuid(), "networkId"); - throw e; - } + }); + } catch (Exception e) { + ExceptionUtil.rethrowRuntime(e); + ExceptionUtil.rethrow(e, InsufficientCapacityException.class); + ExceptionUtil.rethrow(e, ResourceAllocationException.class); + throw new IllegalStateException(e); } - return network; } @Override @@ -1636,7 +1673,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.Network.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.Network.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -1913,13 +1950,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_NETWORK_UPDATE, eventDescription = "updating network", async = true) - public Network updateGuestNetwork(long networkId, String name, String displayText, Account callerAccount, - User callerUser, String domainSuffix, Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork) { + public Network updateGuestNetwork(final long networkId, String name, String displayText, Account callerAccount, + User callerUser, String domainSuffix, final Long networkOfferingId, Boolean changeCidr, String guestVmCidr, Boolean displayNetwork) { boolean restartNetwork = false; // verify input parameters - NetworkVO network = _networksDao.findById(networkId); + final NetworkVO network = _networksDao.findById(networkId); if (network == null) { // see NetworkVO.java InvalidParameterValueException ex = new InvalidParameterValueException("Specified network id doesn't exist in the system"); @@ -1973,7 +2010,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { boolean networkOfferingChanged = false; - long oldNetworkOfferingId = network.getNetworkOfferingId(); + final long oldNetworkOfferingId = network.getNetworkOfferingId(); NetworkOffering oldNtwkOff = _networkOfferingDao.findByIdIncludingRemoved(oldNetworkOfferingId); NetworkOfferingVO networkOffering = _networkOfferingDao.findById(networkOfferingId); if (networkOfferingId != null) { @@ -2022,10 +2059,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } - Map newSvcProviders = new HashMap(); - if (networkOfferingChanged) { - newSvcProviders = _networkMgr.finalizeServicesAndProvidersForNetwork(_entityMgr.findById(NetworkOffering.class, networkOfferingId), network.getPhysicalNetworkId()); - } + final Map newSvcProviders = networkOfferingChanged ? _networkMgr.finalizeServicesAndProvidersForNetwork(_entityMgr.findById(NetworkOffering.class, networkOfferingId), network.getPhysicalNetworkId()) + : new HashMap(); // don't allow to modify network domain if the service is not supported if (domainSuffix != null) { @@ -2199,8 +2234,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (networkOfferingId != null) { if (networkOfferingChanged) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { network.setNetworkOfferingId(networkOfferingId); _networksDao.update(networkId, network, newSvcProviders); // get all nics using this network @@ -2221,7 +2257,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NETWORK_OFFERING_ASSIGN, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), nicIdString, networkOfferingId, null, isDefault, VirtualMachine.class.getName(), vm.getUuid()); } - txn.commit(); + } + }); } else { network.setNetworkOfferingId(networkOfferingId); _networksDao.update(networkId, network, _networkMgr.finalizeServicesAndProvidersForNetwork(_entityMgr.findById(NetworkOffering.class, networkOfferingId), network.getPhysicalNetworkId())); @@ -2377,8 +2414,8 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_PHYSICAL_NETWORK_CREATE, eventDescription = "Creating Physical Network", create = true) - public PhysicalNetwork createPhysicalNetwork(Long zoneId, String vnetRange, String networkSpeed, List - isolationMethods, String broadcastDomainRangeStr, Long domainId, List tags, String name) { + public PhysicalNetwork createPhysicalNetwork(final Long zoneId, final String vnetRange, final String networkSpeed, final List + isolationMethods, String broadcastDomainRangeStr, final Long domainId, final List tags, final String name) { // Check if zone exists if (zoneId == null) { @@ -2443,12 +2480,14 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } - Transaction txn = Transaction.currentTxn(); try { - txn.start(); + final BroadcastDomainRange broadcastDomainRangeFinal = broadcastDomainRange; + return Transaction.execute(new TransactionCallback() { + @Override + public PhysicalNetworkVO doInTransaction(TransactionStatus status) { // Create the new physical network in the database long id = _physicalNetworkDao.getNextInSequence(Long.class, "id"); - PhysicalNetworkVO pNetwork = new PhysicalNetworkVO(id, zoneId, vnetRange, networkSpeed, domainId, broadcastDomainRange, name); + PhysicalNetworkVO pNetwork = new PhysicalNetworkVO(id, zoneId, vnetRange, networkSpeed, domainId, broadcastDomainRangeFinal, name); pNetwork.setTags(tags); pNetwork.setIsolationMethods(isolationMethods); @@ -2474,8 +2513,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { //Add Internal Load Balancer element as a default network service provider addDefaultInternalLbProviderToPhysicalNetwork(pNetwork.getId()); - txn.commit(); return pNetwork; + } + }); } catch (Exception ex) { s_logger.warn("Exception: ", ex); throw new CloudRuntimeException("Fail to create a physical network"); @@ -2566,7 +2606,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } @DB - public void addOrRemoveVnets(String [] listOfRanges, PhysicalNetworkVO network) { + public void addOrRemoveVnets(String [] listOfRanges, final PhysicalNetworkVO network) { List addVnets = null; List removeVnets =null; HashSet tempVnets = new HashSet(); @@ -2609,22 +2649,27 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } network.setVnet(comaSeperatedStingOfVnetRanges); - Transaction txn = Transaction.currentTxn(); - txn.start(); - if (addVnets != null) { - s_logger.debug("Adding vnet range " + addVnets.toString()+ " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + final List addVnetsFinal = addVnets; + final List removeVnetsFinal = removeVnets; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + if (addVnetsFinal != null) { + s_logger.debug("Adding vnet range " + addVnetsFinal.toString()+ " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //add vnet takes a list of strings to be added. each string is a vnet. - _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnets); + _dcDao.addVnet(network.getDataCenterId(), network.getId(), addVnetsFinal); } - if (removeVnets != null) { - s_logger.debug("removing vnet range " + removeVnets.toString()+ " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + if (removeVnetsFinal != null) { + s_logger.debug("removing vnet range " + removeVnetsFinal.toString()+ " for the physicalNetwork id= " + network.getId() + " and zone id=" + network.getDataCenterId() + " as a part of updatePhysicalNetwork call"); //deleteVnets takes a list of strings to be removed. each string is a vnet. - _datacneter_vnet.deleteVnets(txn, network.getDataCenterId(), network.getId(), removeVnets); + _datacneter_vnet.deleteVnets(TransactionLegacy.currentTxn(), network.getDataCenterId(), network.getId(), removeVnetsFinal); } _physicalNetworkDao.update(network.getId(), network); - txn.commit(); + } + }); + _physicalNetworkDao.releaseFromLockTable(network.getId()); } } @@ -2648,6 +2693,23 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { if (network.getIsolationMethods().contains("GRE")) { minVnet = MIN_GRE_KEY; maxVnet = MAX_GRE_KEY; + } else if (network.getIsolationMethods().contains("VXLAN")) { + minVnet = MIN_VXLAN_VNI; + maxVnet = MAX_VXLAN_VNI; + // fail if zone already contains VNI, need to be unique per zone. + // since adding a range adds each VNI to the database, need only check min/max + for(String vnet : VnetRange) { + s_logger.debug("Looking to see if VNI " + vnet + " already exists on another network in zone " + network.getDataCenterId()); + List vnis = _datacneter_vnet.findVnet(network.getDataCenterId(), vnet); + if (vnis != null && ! vnis.isEmpty()) { + for (DataCenterVnetVO vni : vnis) { + if (vni.getPhysicalNetworkId() != network.getId()) { + s_logger.debug("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + throw new InvalidParameterValueException("VNI " + vnet + " already exists on another network in zone, please specify a unique range"); + } + } + } + } } String rangeMessage = " between " + minVnet + " and " + maxVnet; if (VnetRange.length == 1 && VnetRange[0].equals("")) { @@ -2766,7 +2828,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override @ActionEvent(eventType = EventTypes.EVENT_PHYSICAL_NETWORK_DELETE, eventDescription = "deleting physical network", async = true) @DB - public boolean deletePhysicalNetwork(Long physicalNetworkId) { + public boolean deletePhysicalNetwork(final Long physicalNetworkId) { // verify input parameters PhysicalNetworkVO pNetwork = _physicalNetworkDao.findById(physicalNetworkId); @@ -2778,8 +2840,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { checkIfPhysicalNetworkIsDeletable(physicalNetworkId); - Transaction txn = Transaction.currentTxn(); - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { // delete vlans for this zone List vlans = _vlanDao.listVlansByPhysicalNetworkId(physicalNetworkId); for (VlanVO vlan : vlans) { @@ -2815,11 +2878,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { // delete traffic types _pNTrafficTypeDao.deleteTrafficTypes(physicalNetworkId); - boolean success = _physicalNetworkDao.remove(physicalNetworkId); - - txn.commit(); - - return success; + return _physicalNetworkDao.remove(physicalNetworkId); + } + }); } @DB @@ -2877,7 +2938,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { selectSql += " AND taken IS NOT NULL"; } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); stmt.setLong(1, physicalNetworkId); @@ -3040,12 +3101,9 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { accountGuestVlanMapVO.setGuestVlanRange(updatedVlanRange); _accountGuestVlanMapDao.update(guestVlanMapId, accountGuestVlanMapVO); } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); accountGuestVlanMapVO = new AccountGuestVlanMapVO(vlanOwner.getAccountId(), physicalNetworkId); accountGuestVlanMapVO.setGuestVlanRange(startVlan + "-" + endVlan); _accountGuestVlanMapDao.persist(accountGuestVlanMapVO); - txn.commit(); } // For every guest vlan set the corresponding account guest vlan map id List finaVlanTokens = getVlanFromRange(accountGuestVlanMapVO.getGuestVlanRange()); @@ -3271,9 +3329,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { services = new ArrayList(element.getCapabilities().keySet()); } - Transaction txn = Transaction.currentTxn(); try { - txn.start(); // Create the new physical network in the database PhysicalNetworkServiceProviderVO nsp = new PhysicalNetworkServiceProviderVO(physicalNetworkId, providerName); // set enabled services @@ -3284,7 +3340,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } nsp = _pNSPDao.persist(nsp); - txn.commit(); return nsp; } catch (Exception ex) { s_logger.warn("Exception: ", ex); @@ -3532,9 +3587,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } } - Transaction txn = Transaction.currentTxn(); try { - txn.start(); // Create the new traffic type in the database if (xenLabel == null) { xenLabel = getDefaultXenNetworkLabel(trafficType); @@ -3542,7 +3595,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { PhysicalNetworkTrafficTypeVO pNetworktrafficType = new PhysicalNetworkTrafficTypeVO(physicalNetworkId, trafficType, xenLabel, kvmLabel, vmwareLabel, simulatorLabel, vlan); pNetworktrafficType = _pNTrafficTypeDao.persist(pNetworktrafficType); - txn.commit(); return pNetworktrafficType; } catch (Exception ex) { s_logger.warn("Exception: ", ex); @@ -3680,7 +3732,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } VirtualRouterElement element = (VirtualRouterElement)networkElement; - element.addElement(nsp.getId(), VirtualRouterProviderType.VirtualRouter); + element.addElement(nsp.getId(), Type.VirtualRouter); return nsp; } @@ -3696,7 +3748,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } VpcVirtualRouterElement element = (VpcVirtualRouterElement)networkElement; - element.addElement(nsp.getId(), VirtualRouterProviderType.VPCVirtualRouter); + element.addElement(nsp.getId(), Type.VPCVirtualRouter); return nsp; } @@ -3804,11 +3856,11 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { @Override @DB - public Network createPrivateNetwork(String networkName, String displayText, long physicalNetworkId, - String broadcastUriString, String startIp, String endIp, String gateway, String netmask, long networkOwnerId, Long vpcId, Boolean sourceNat, Long networkOfferingId) + public Network createPrivateNetwork(final String networkName, final String displayText, long physicalNetworkId, + String broadcastUriString, final String startIp, String endIp, final String gateway, String netmask, final long networkOwnerId, final Long vpcId, final Boolean sourceNat, final Long networkOfferingId) throws ResourceAllocationException, ConcurrentOperationException, InsufficientCapacityException { - Account owner = _accountMgr.getAccount(networkOwnerId); + final Account owner = _accountMgr.getAccount(networkOwnerId); // Get system network offering NetworkOfferingVO ntwkOff = null; @@ -3822,7 +3874,7 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { } // Validate physical network - PhysicalNetwork pNtwk = _physicalNetworkDao.findById(physicalNetworkId); + final PhysicalNetwork pNtwk = _physicalNetworkDao.findById(physicalNetworkId); if (pNtwk == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find a physical network" + " having the given id"); @@ -3841,7 +3893,6 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Invalid format for the endIp address parameter"); } - String cidr = null; if (!NetUtils.isValidIp(gateway)) { throw new InvalidParameterValueException("Invalid gateway"); } @@ -3849,10 +3900,10 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("Invalid netmask"); } - cidr = NetUtils.ipAndNetMaskToCidr(gateway, netmask); + final String cidr = NetUtils.ipAndNetMaskToCidr(gateway, netmask); URI uri = BroadcastDomainType.fromString(broadcastUriString); - String uriString = uri.toString(); + final String uriString = uri.toString(); BroadcastDomainType tiep = BroadcastDomainType.getSchemeValue(uri); // numeric vlan or vlan uri are ok for now // TODO make a test for any supported scheme @@ -3861,18 +3912,20 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { throw new InvalidParameterValueException("unsupported type of broadcastUri specified: " + broadcastUriString); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + final NetworkOfferingVO ntwkOffFinal = ntwkOff; + try { + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public Network doInTransaction(TransactionStatus status) throws ResourceAllocationException, InsufficientCapacityException { //lock datacenter as we need to get mac address seq from there DataCenterVO dc = _dcDao.lockRow(pNtwk.getDataCenterId(), true); //check if we need to create guest network Network privateNetwork = _networksDao.getPrivateNetwork(uriString, cidr, - networkOwnerId, pNtwk.getDataCenterId(), null); + networkOwnerId, pNtwk.getDataCenterId(), networkOfferingId); if (privateNetwork == null) { //create Guest network - privateNetwork = _networkMgr.createGuestNetwork(ntwkOff.getId(), networkName, displayText, gateway, cidr, uriString, + privateNetwork = _networkMgr.createGuestNetwork(ntwkOffFinal.getId(), networkName, displayText, gateway, cidr, uriString, null, owner, null, pNtwk, pNtwk.getDataCenterId(), ACLType.Account, null, vpcId, null, null, true, null); s_logger.debug("Created private network " + privateNetwork); } else { @@ -3900,11 +3953,18 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { _dcDao.update(dc.getId(), dc); - txn.commit(); s_logger.debug("Private network " + privateNetwork + " is created"); return privateNetwork; } + }); + } catch (Exception e) { + ExceptionUtil.rethrowRuntime(e); + ExceptionUtil.rethrow(e, ResourceAllocationException.class); + ExceptionUtil.rethrow(e, InsufficientCapacityException.class); + throw new IllegalStateException(e); + } + } private NetworkOfferingVO findSystemNetworkOffering(String offeringName) { @@ -3939,4 +3999,13 @@ public class NetworkServiceImpl extends ManagerBase implements NetworkService { return _networkMgr.listVmNics(vmId, nicId); } + public List getNetworkGurus() { + return _networkGurus; + } + + @Inject + public void setNetworkGurus(List networkGurus) { + this._networkGurus = networkGurus; + } + } diff --git a/server/src/com/cloud/network/NetworkUsageManagerImpl.java b/server/src/com/cloud/network/NetworkUsageManagerImpl.java index 41c1bc2912b..0f2feb07b91 100755 --- a/server/src/com/cloud/network/NetworkUsageManagerImpl.java +++ b/server/src/com/cloud/network/NetworkUsageManagerImpl.java @@ -88,6 +88,8 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @@ -290,11 +292,11 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage return false; } - private boolean collectDirectNetworkUsage(HostVO host){ + private boolean collectDirectNetworkUsage(final HostVO host){ s_logger.debug("Direct Network Usage stats collector is running..."); - long zoneId = host.getDataCenterId(); - DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(),"last_collection"); + final long zoneId = host.getDataCenterId(); + final DetailVO lastCollectDetail = _detailsDao.findDetail(host.getId(),"last_collection"); if(lastCollectDetail == null){ s_logger.warn("Last collection time not available. Skipping direct usage collection for Traffic Monitor: "+host.getId()); return false; @@ -309,7 +311,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage // This coule be made configurable rightNow.add(Calendar.HOUR_OF_DAY, -2); - Date now = rightNow.getTime(); + final Date now = rightNow.getTime(); if(lastCollection.after(now)){ s_logger.debug("Current time is less than 2 hours after last collection time : " + lastCollection.toString() + ". Skipping direct network usage collection"); @@ -361,7 +363,7 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage } - List collectedStats = new ArrayList(); + final List collectedStats = new ArrayList(); //Get usage for Ips which were assigned for the entire duration if(fullDurationIpUsage.size() > 0){ @@ -431,28 +433,26 @@ public class NetworkUsageManagerImpl extends ManagerBase implements NetworkUsage return false; } //Persist all the stats and last_collection time in a single transaction - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - try { - txn.start(); - for(UserStatisticsVO stat : collectedStats){ - UserStatisticsVO stats = _statsDao.lock(stat.getAccountId(), stat.getDataCenterId(), 0L, null, host.getId(), "DirectNetwork"); - if (stats == null) { - stats = new UserStatisticsVO(stat.getAccountId(), zoneId, null, host.getId(), "DirectNetwork", 0L); - stats.setCurrentBytesSent(stat.getCurrentBytesSent()); - stats.setCurrentBytesReceived(stat.getCurrentBytesReceived()); - _statsDao.persist(stats); - } else { - stats.setCurrentBytesSent(stats.getCurrentBytesSent() + stat.getCurrentBytesSent()); - stats.setCurrentBytesReceived(stats.getCurrentBytesReceived() + stat.getCurrentBytesReceived()); - _statsDao.update(stats.getId(), stats); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for(UserStatisticsVO stat : collectedStats){ + UserStatisticsVO stats = _statsDao.lock(stat.getAccountId(), stat.getDataCenterId(), 0L, null, host.getId(), "DirectNetwork"); + if (stats == null) { + stats = new UserStatisticsVO(stat.getAccountId(), zoneId, null, host.getId(), "DirectNetwork", 0L); + stats.setCurrentBytesSent(stat.getCurrentBytesSent()); + stats.setCurrentBytesReceived(stat.getCurrentBytesReceived()); + _statsDao.persist(stats); + } else { + stats.setCurrentBytesSent(stats.getCurrentBytesSent() + stat.getCurrentBytesSent()); + stats.setCurrentBytesReceived(stats.getCurrentBytesReceived() + stat.getCurrentBytesReceived()); + _statsDao.update(stats.getId(), stats); + } } + lastCollectDetail.setValue(""+now.getTime()); + _detailsDao.update(lastCollectDetail.getId(), lastCollectDetail); } - lastCollectDetail.setValue(""+now.getTime()); - _detailsDao.update(lastCollectDetail.getId(), lastCollectDetail); - txn.commit(); - } finally { - txn.close(); - } + }); return true; } diff --git a/server/src/com/cloud/network/PortProfileManagerImpl.java b/server/src/com/cloud/network/PortProfileManagerImpl.java index f17ee6f45b6..c731597925f 100644 --- a/server/src/com/cloud/network/PortProfileManagerImpl.java +++ b/server/src/com/cloud/network/PortProfileManagerImpl.java @@ -55,18 +55,7 @@ public class PortProfileManagerImpl { // Else, go ahead and create the port profile. PortProfileVO portProfileObj = new PortProfileVO(portProfName, vsmId, vlanId, pType, bType); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _portProfileDao.persist(portProfileObj); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } - - // Return the PortProfileVO object created. - return portProfileObj; + return _portProfileDao.persist(portProfileObj); } @DB @@ -95,18 +84,7 @@ public class PortProfileManagerImpl { // Else, go ahead and create the port profile. portProfileObj = new PortProfileVO(portProfName, vsmId, lowVlanId, highVlanId, pType, bType); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - _portProfileDao.persist(portProfileObj); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException(e.getMessage()); - } - - // Return the PortProfileVO object created. - return portProfileObj; + return _portProfileDao.persist(portProfileObj); } @DB @@ -121,16 +99,7 @@ public class PortProfileManagerImpl { // TODO: Should we be putting any checks here before removing // the port profile record from the db? - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. - _portProfileDao.remove(portProfileId); - txn.commit(); - } catch (Exception e) { - s_logger.info("Caught exception when trying to delete Port Profile record.." + e.getMessage()); - throw new CloudRuntimeException("Failed to delete Port Profile"); - } - return true; + // Remove the VSM entry in CiscoNexusVSMDeviceVO's table. + return _portProfileDao.remove(portProfileId); } } \ No newline at end of file diff --git a/server/src/com/cloud/network/StorageNetworkManagerImpl.java b/server/src/com/cloud/network/StorageNetworkManagerImpl.java index 901e2041490..4719a42e8be 100755 --- a/server/src/com/cloud/network/StorageNetworkManagerImpl.java +++ b/server/src/com/cloud/network/StorageNetworkManagerImpl.java @@ -20,18 +20,16 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.List; -import java.util.Map; import javax.ejb.Local; import javax.inject.Inject; -import javax.naming.ConfigurationException; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import org.apache.cloudstack.api.command.admin.network.CreateStorageNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.DeleteStorageNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.ListStorageNetworkIpRangeCmd; import org.apache.cloudstack.api.command.admin.network.UpdateStorageNetworkIpRangeCmd; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; import com.cloud.dc.HostPodVO; import com.cloud.dc.StorageNetworkIpAddressVO; @@ -46,10 +44,13 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.NetworkVO; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; +import com.cloud.utils.db.QueryBuilder; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.SecondaryStorageVmVO; @@ -95,7 +96,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet } } - private void createStorageIpEntires(Transaction txn, long rangeId, String startIp, String endIp, long zoneId) throws SQLException { + private void createStorageIpEntires(TransactionLegacy txn, long rangeId, String startIp, String endIp, long zoneId) throws SQLException { long startIPLong = NetUtils.ip2Long(startIp); long endIPLong = NetUtils.ip2Long(endIp); String insertSql = "INSERT INTO `cloud`.`op_dc_storage_network_ip_address` (range_id, ip_address, mac_address, taken) VALUES (?, ?, (select mac_address from `cloud`.`data_center` where id=?), ?)"; @@ -122,11 +123,11 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet @Override @DB public StorageNetworkIpRange updateIpRange(UpdateStorageNetworkIpRangeCmd cmd) { - Integer vlan = cmd.getVlan(); - Long rangeId = cmd.getId(); + final Integer vlan = cmd.getVlan(); + final Long rangeId = cmd.getId(); String startIp = cmd.getStartIp(); String endIp = cmd.getEndIp(); - String netmask = cmd.getNetmask(); + final String netmask = cmd.getNetmask(); if (netmask != null && !NetUtils.isValidNetmask(netmask)) { throw new CloudRuntimeException("Invalid netmask:" + netmask); @@ -149,45 +150,50 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet checkOverlapStorageIpRange(podId, startIp, endIp); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - try { - range = _sNwIpRangeDao.acquireInLockTable(range.getId()); - if (range == null) { - throw new CloudRuntimeException("Cannot acquire lock on storage ip range " + rangeId); + final String startIpFinal = startIp; + final String endIpFinal = endIp; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + StorageNetworkIpRangeVO range = null; + try { + range = _sNwIpRangeDao.acquireInLockTable(rangeId); + if (range == null) { + throw new CloudRuntimeException("Cannot acquire lock on storage ip range " + rangeId); + } + StorageNetworkIpRangeVO vo = _sNwIpRangeDao.createForUpdate(); + if (vlan != null) { + vo.setVlan(vlan); + } + if (startIpFinal != null) { + vo.setStartIp(startIpFinal); + } + if (endIpFinal != null) { + vo.setEndIp(endIpFinal); + } + if (netmask != null) { + vo.setNetmask(netmask); + } + _sNwIpRangeDao.update(rangeId, vo); + } finally { + if (range != null) { + _sNwIpRangeDao.releaseFromLockTable(range.getId()); + } + } } - StorageNetworkIpRangeVO vo = _sNwIpRangeDao.createForUpdate(); - if (vlan != null) { - vo.setVlan(vlan); - } - if (startIp != null) { - vo.setStartIp(startIp); - } - if (endIp != null) { - vo.setEndIp(endIp); - } - if (netmask != null) { - vo.setNetmask(netmask); - } - _sNwIpRangeDao.update(rangeId, vo); - } finally { - if (range != null) { - _sNwIpRangeDao.releaseFromLockTable(range.getId()); - } - } - txn.commit(); + }); return _sNwIpRangeDao.findById(rangeId); } @Override @DB - public StorageNetworkIpRange createIpRange(CreateStorageNetworkIpRangeCmd cmd) throws SQLException { - Long podId = cmd.getPodId(); - String startIp = cmd.getStartIp(); + public StorageNetworkIpRange createIpRange(final CreateStorageNetworkIpRangeCmd cmd) throws SQLException { + final Long podId = cmd.getPodId(); + final String startIp = cmd.getStartIp(); String endIp = cmd.getEndIp(); - Integer vlan = cmd.getVlan(); - String netmask = cmd.getNetmask(); + final Integer vlan = cmd.getVlan(); + final String netmask = cmd.getNetmask(); if (endIp == null) { endIp = startIp; @@ -201,7 +207,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet if (pod == null) { throw new CloudRuntimeException("Cannot find pod " + podId); } - Long zoneId = pod.getDataCenterId(); + final Long zoneId = pod.getDataCenterId(); List nws = _networkDao.listByZoneAndTrafficType(zoneId, TrafficType.Storage); if (nws.size() == 0) { @@ -210,34 +216,35 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet if (nws.size() > 1) { throw new CloudRuntimeException("Find more than one storage network in zone " + zoneId + "," + nws.size() + " found"); } - NetworkVO nw = nws.get(0); + final NetworkVO nw = nws.get(0); checkOverlapPrivateIpRange(podId, startIp, endIp); checkOverlapStorageIpRange(podId, startIp, endIp); - Transaction txn = Transaction.currentTxn(); StorageNetworkIpRangeVO range = null; - txn.start(); - range = new StorageNetworkIpRangeVO(zoneId, podId, nw.getId(), startIp, endIp, vlan, netmask, cmd.getGateWay()); - _sNwIpRangeDao.persist(range); - try { - createStorageIpEntires(txn, range.getId(), startIp, endIp, zoneId); - } catch (SQLException e) { - txn.rollback(); - StringBuilder err = new StringBuilder(); - err.append("Create storage network range failed."); - err.append("startIp=" + startIp); - err.append("endIp=" + endIp); - err.append("netmask=" + netmask); - err.append("zoneId=" + zoneId); - s_logger.debug(err.toString(), e); - throw e; - } + final String endIpFinal = endIp; + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public StorageNetworkIpRangeVO doInTransaction(TransactionStatus status) throws SQLException { + StorageNetworkIpRangeVO range = new StorageNetworkIpRangeVO(zoneId, podId, nw.getId(), startIp, endIpFinal, vlan, netmask, cmd.getGateWay()); + _sNwIpRangeDao.persist(range); + try { + createStorageIpEntires(TransactionLegacy.currentTxn(), range.getId(), startIp, endIpFinal, zoneId); + } catch (SQLException e) { + StringBuilder err = new StringBuilder(); + err.append("Create storage network range failed."); + err.append("startIp=" + startIp); + err.append("endIp=" + endIpFinal); + err.append("netmask=" + netmask); + err.append("zoneId=" + zoneId); + s_logger.debug(err.toString(), e); + throw e; + } - txn.commit(); - - return range; + return range; + } + }); } private String getInUseIpAddress(long rangeId) { @@ -253,7 +260,7 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet @Override @DB public void deleteIpRange(DeleteStorageNetworkIpRangeCmd cmd) { - long rangeId = cmd.getId(); + final long rangeId = cmd.getId(); StorageNetworkIpRangeVO range = _sNwIpRangeDao.findById(rangeId); if (range == null) { throw new CloudRuntimeException("Can not find storage network ip range " + rangeId); @@ -263,26 +270,30 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet throw new CloudRuntimeException(getInUseIpAddress(rangeId)); } - final Transaction txn = Transaction.currentTxn(); - txn.start(); - try { - range = _sNwIpRangeDao.acquireInLockTable(rangeId); - if (range == null) { - String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; - s_logger.warn(msg); - throw new CloudRuntimeException(msg); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + StorageNetworkIpRangeVO range = null; + try { + range = _sNwIpRangeDao.acquireInLockTable(rangeId); + if (range == null) { + String msg = "Unable to acquire lock on storage network ip range id=" + rangeId + ", delete failed"; + s_logger.warn(msg); + throw new CloudRuntimeException(msg); + } + /* + * entries in op_dc_storage_network_ip_address will be deleted automatically due to + * fk_storage_ip_address__range_id constraint key + */ + _sNwIpRangeDao.remove(rangeId); + } finally { + if (range != null) { + _sNwIpRangeDao.releaseFromLockTable(rangeId); + } + } } - /* - * entries in op_dc_storage_network_ip_address will be deleted automatically due to - * fk_storage_ip_address__range_id constraint key - */ - _sNwIpRangeDao.remove(rangeId); - } finally { - if (range != null) { - _sNwIpRangeDao.releaseFromLockTable(rangeId); - } - } - txn.commit(); + }); + } @Override @@ -339,8 +350,8 @@ public class StorageNetworkManagerImpl extends ManagerBase implements StorageNet @Override public boolean isStorageIpRangeAvailable(long zoneId) { - SearchCriteriaService sc = SearchCriteria2.create(StorageNetworkIpRangeVO.class); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, zoneId); + QueryBuilder sc = QueryBuilder.create(StorageNetworkIpRangeVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ, zoneId); List entries = sc.list(); return entries.size() > 0; } diff --git a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java index 859211bd572..58a90bb1116 100644 --- a/server/src/com/cloud/network/as/AutoScaleManagerImpl.java +++ b/server/src/com/cloud/network/as/AutoScaleManagerImpl.java @@ -95,8 +95,10 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.net.NetUtils; @Local(value = { AutoScaleService.class, AutoScaleManager.class }) @@ -428,9 +430,9 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } @DB - protected AutoScalePolicyVO checkValidityAndPersist(AutoScalePolicyVO autoScalePolicyVO, List conditionIds) { - int duration = autoScalePolicyVO.getDuration(); - int quietTime = autoScalePolicyVO.getQuietTime(); + protected AutoScalePolicyVO checkValidityAndPersist(final AutoScalePolicyVO autoScalePolicyVOFinal, final List conditionIds) { + final int duration = autoScalePolicyVOFinal.getDuration(); + final int quietTime = autoScalePolicyVOFinal.getQuietTime(); if (duration < 0) { throw new InvalidParameterValueException("duration is an invalid value: " + duration); @@ -440,48 +442,49 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale throw new InvalidParameterValueException("quiettime is an invalid value: " + quietTime); } - final Transaction txn = Transaction.currentTxn(); - txn.start(); - - autoScalePolicyVO = _autoScalePolicyDao.persist(autoScalePolicyVO); - - if (conditionIds != null) { - SearchBuilder conditionsSearch = _conditionDao.createSearchBuilder(); - conditionsSearch.and("ids", conditionsSearch.entity().getId(), Op.IN); - conditionsSearch.done(); - SearchCriteria sc = conditionsSearch.create(); - - sc.setParameters("ids", conditionIds.toArray(new Object[0])); - List conditions = _conditionDao.search(sc, null); - - ControlledEntity[] sameOwnerEntities = conditions.toArray(new ControlledEntity[conditions.size() + 1]); - sameOwnerEntities[sameOwnerEntities.length - 1] = autoScalePolicyVO; - _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, sameOwnerEntities); - - if (conditionIds.size() != conditions.size()) { - // TODO report the condition id which could not be found - throw new InvalidParameterValueException("Unable to find the condition specified"); - } - - ArrayList counterIds = new ArrayList(); - for (ConditionVO condition : conditions) { - if (counterIds.contains(condition.getCounterid())) { - throw new InvalidParameterValueException("atleast two conditions in the conditionids have the same counter. It is not right to apply two different conditions for the same counter"); + return Transaction.execute(new TransactionCallback() { + @Override + public AutoScalePolicyVO doInTransaction(TransactionStatus status) { + AutoScalePolicyVO autoScalePolicyVO = _autoScalePolicyDao.persist(autoScalePolicyVOFinal); + + if (conditionIds != null) { + SearchBuilder conditionsSearch = _conditionDao.createSearchBuilder(); + conditionsSearch.and("ids", conditionsSearch.entity().getId(), Op.IN); + conditionsSearch.done(); + SearchCriteria sc = conditionsSearch.create(); + + sc.setParameters("ids", conditionIds.toArray(new Object[0])); + List conditions = _conditionDao.search(sc, null); + + ControlledEntity[] sameOwnerEntities = conditions.toArray(new ControlledEntity[conditions.size() + 1]); + sameOwnerEntities[sameOwnerEntities.length - 1] = autoScalePolicyVO; + _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, sameOwnerEntities); + + if (conditionIds.size() != conditions.size()) { + // TODO report the condition id which could not be found + throw new InvalidParameterValueException("Unable to find the condition specified"); + } + + ArrayList counterIds = new ArrayList(); + for (ConditionVO condition : conditions) { + if (counterIds.contains(condition.getCounterid())) { + throw new InvalidParameterValueException("atleast two conditions in the conditionids have the same counter. It is not right to apply two different conditions for the same counter"); + } + counterIds.add(condition.getCounterid()); + } + + /* For update case remove the existing mappings and create fresh ones */ + _autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(autoScalePolicyVO.getId()); + + for (Long conditionId : conditionIds) { + AutoScalePolicyConditionMapVO policyConditionMapVO = new AutoScalePolicyConditionMapVO(autoScalePolicyVO.getId(), conditionId); + _autoScalePolicyConditionMapDao.persist(policyConditionMapVO); + } } - counterIds.add(condition.getCounterid()); + + return autoScalePolicyVO; } - - /* For update case remove the existing mappings and create fresh ones */ - _autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(autoScalePolicyVO.getId()); - - for (Long conditionId : conditionIds) { - AutoScalePolicyConditionMapVO policyConditionMapVO = new AutoScalePolicyConditionMapVO(autoScalePolicyVO.getId(), conditionId); - _autoScalePolicyConditionMapDao.persist(policyConditionMapVO); - } - } - - txn.commit(); - return autoScalePolicyVO; + }); } @Override @@ -511,7 +514,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale @Override @DB @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEPOLICY_DELETE, eventDescription = "deleting autoscale policy") - public boolean deleteAutoScalePolicy(long id) { + public boolean deleteAutoScalePolicy(final long id) { /* Check if entity is in database */ getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Policy", id, _autoScalePolicyDao); @@ -519,23 +522,25 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale throw new InvalidParameterValueException("Cannot delete AutoScale Policy when it is in use by one or more AutoScale Vm Groups"); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean success = true; + success = _autoScalePolicyDao.remove(id); + if (!success) { + s_logger.warn("Failed to remove AutoScale Policy db object"); + return false; + } + success = _autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(id); + if (!success) { + s_logger.warn("Failed to remove AutoScale Policy Condition mappings"); + return false; + } + s_logger.info("Successfully deleted autoscale policy id : " + id); - boolean success = true; - success = _autoScalePolicyDao.remove(id); - if (!success) { - s_logger.warn("Failed to remove AutoScale Policy db object"); - return false; - } - success = _autoScalePolicyConditionMapDao.removeByAutoScalePolicyId(id); - if (!success) { - s_logger.warn("Failed to remove AutoScale Policy Condition mappings"); - return false; - } - txn.commit(); - s_logger.info("Successfully deleted autoscale policy id : " + id); - return true; // successful + return success; + } + }); } public void checkCallerAccess(String accountName, Long domainId) @@ -745,7 +750,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale @Override @DB @ActionEvent(eventType = EventTypes.EVENT_AUTOSCALEVMGROUP_DELETE, eventDescription = "deleting autoscale vm group") - public boolean deleteAutoScaleVmGroup(long id) { + public boolean deleteAutoScaleVmGroup(final long id) { AutoScaleVmGroupVO autoScaleVmGroupVO = getEntityInDatabase(CallContext.current().getCallingAccount(), "AutoScale Vm Group", id, _autoScaleVmGroupDao); if (autoScaleVmGroupVO.getState().equals(AutoScaleVmGroup.State_New)) { @@ -769,24 +774,27 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - success = _autoScaleVmGroupDao.remove(id); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean success = _autoScaleVmGroupDao.remove(id); + + if (!success) { + s_logger.warn("Failed to remove AutoScale Group db object"); + return false; + } + + success = _autoScaleVmGroupPolicyMapDao.removeByGroupId(id); + if (!success) { + s_logger.warn("Failed to remove AutoScale Group Policy mappings"); + return false; + } + + s_logger.info("Successfully deleted autoscale vm group id : " + id); + return success; // Successfull + } + }); - if (!success) { - s_logger.warn("Failed to remove AutoScale Group db object"); - return false; - } - - success = _autoScaleVmGroupPolicyMapDao.removeByGroupId(id); - if (!success) { - s_logger.warn("Failed to remove AutoScale Group Policy mappings"); - return false; - } - - txn.commit(); - s_logger.info("Successfully deleted autoscale vm group id : " + id); - return success; // Successfull } @Override @@ -831,13 +839,13 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale } @DB - protected AutoScaleVmGroupVO checkValidityAndPersist(AutoScaleVmGroupVO vmGroup, List passedScaleUpPolicyIds, List passedScaleDownPolicyIds) { + protected AutoScaleVmGroupVO checkValidityAndPersist(final AutoScaleVmGroupVO vmGroup, final List passedScaleUpPolicyIds, final List passedScaleDownPolicyIds) { int minMembers = vmGroup.getMinMembers(); int maxMembers = vmGroup.getMaxMembers(); int interval = vmGroup.getInterval(); List counters = new ArrayList(); List policies = new ArrayList(); - List policyIds = new ArrayList(); + final List policyIds = new ArrayList(); List currentScaleUpPolicyIds = new ArrayList(); List currentScaleDownPolicyIds = new ArrayList(); if (vmGroup.getCreated() != null) { @@ -887,20 +895,23 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScale sameOwnerEntities[sameOwnerEntities.length - 1] = profileVO; _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, sameOwnerEntities); - final Transaction txn = Transaction.currentTxn(); - txn.start(); - vmGroup = _autoScaleVmGroupDao.persist(vmGroup); + return Transaction.execute(new TransactionCallback() { + @Override + public AutoScaleVmGroupVO doInTransaction(TransactionStatus status) { + AutoScaleVmGroupVO vmGroupNew = _autoScaleVmGroupDao.persist(vmGroup); + + if (passedScaleUpPolicyIds != null || passedScaleDownPolicyIds != null) { + _autoScaleVmGroupPolicyMapDao.removeByGroupId(vmGroupNew.getId()); + + for (Long policyId : policyIds) { + _autoScaleVmGroupPolicyMapDao.persist(new AutoScaleVmGroupPolicyMapVO(vmGroupNew.getId(), policyId)); + } + } - if (passedScaleUpPolicyIds != null || passedScaleDownPolicyIds != null) { - _autoScaleVmGroupPolicyMapDao.removeByGroupId(vmGroup.getId()); - - for (Long policyId : policyIds) { - _autoScaleVmGroupPolicyMapDao.persist(new AutoScaleVmGroupPolicyMapVO(vmGroup.getId(), policyId)); + return vmGroupNew; } - } - txn.commit(); + }); - return vmGroup; } @Override diff --git a/server/src/com/cloud/network/element/VirtualRouterElement.java b/server/src/com/cloud/network/element/VirtualRouterElement.java index 3607284dbe4..28bfb6fee5a 100755 --- a/server/src/com/cloud/network/element/VirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VirtualRouterElement.java @@ -5,7 +5,7 @@ // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at -// +// // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, @@ -25,13 +25,15 @@ import java.util.Set; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.log4j.Logger; + +import com.google.gson.Gson; + import org.apache.cloudstack.api.command.admin.router.ConfigureVirtualRouterElementCmd; import org.apache.cloudstack.api.command.admin.router.CreateVirtualRouterElementCmd; import org.apache.cloudstack.api.command.admin.router.ListVirtualRouterElementsCmd; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.log4j.Logger; - import com.cloud.agent.api.to.LoadBalancerTO; import com.cloud.configuration.ConfigurationManager; import com.cloud.dc.DataCenter; @@ -55,7 +57,7 @@ import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.PublicIpAddress; import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.VpnUser; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.LoadBalancerDao; @@ -76,13 +78,12 @@ import com.cloud.network.rules.StaticNat; import com.cloud.offering.NetworkOffering; import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; +import com.cloud.user.Account; import com.cloud.user.AccountManager; -import com.cloud.uservm.UserVm; import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicProfile; @@ -91,19 +92,16 @@ import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachine.State; -import com.cloud.vm.VirtualMachine.Type; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.DomainRouterDao; import com.cloud.vm.dao.UserVmDao; -import com.google.gson.Gson; - -@Local(value = {NetworkElement.class, FirewallServiceProvider.class, - DhcpServiceProvider.class, UserDataServiceProvider.class, +@Local(value = {NetworkElement.class, FirewallServiceProvider.class, + DhcpServiceProvider.class, UserDataServiceProvider.class, StaticNatServiceProvider.class, LoadBalancingServiceProvider.class, PortForwardingServiceProvider.class, IpDeployer.class, RemoteAccessVPNServiceProvider.class, NetworkMigrationResponder.class} ) -public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, +public class VirtualRouterElement extends AdapterBase implements VirtualRouterElementService, DhcpServiceProvider, UserDataServiceProvider, SourceNatServiceProvider, StaticNatServiceProvider, FirewallServiceProvider, LoadBalancingServiceProvider, PortForwardingServiceProvider, RemoteAccessVPNServiceProvider, IpDeployer, NetworkMigrationResponder { @@ -166,7 +164,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } } else { if (!_networkMgr.isProviderSupportServiceInNetwork(network.getId(), service, getProvider())) { - s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + s_logger.trace("Element " + getProvider().getName() + " doesn't support service " + service.getName() + " in the network " + network); return false; } @@ -187,19 +185,19 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl Map params = new HashMap(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); - List routers = _routerMgr.deployVirtualRouterInGuestNetwork(network, dest, - _accountMgr.getAccount(network.getAccountId()), params, + List routers = _routerMgr.deployVirtualRouterInGuestNetwork(network, dest, + _accountMgr.getAccount(network.getAccountId()), params, offering.getRedundantRouter()); if ((routers == null) || (routers.size() == 0)) { throw new ResourceUnavailableException("Can't find at least one running router!", DataCenter.class, network.getDataCenterId()); } - return true; + return true; } @Override - public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, + public boolean prepare(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { if (vm.getType() != VirtualMachine.Type.User || vm.getHypervisorType() == HypervisorType.BareMetal) { @@ -219,15 +217,15 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; - List routers = _routerMgr.deployVirtualRouterInGuestNetwork(network, dest, + VirtualMachineProfile uservm = vm; + List routers = _routerMgr.deployVirtualRouterInGuestNetwork(network, dest, _accountMgr.getAccount(network.getAccountId()), uservm.getParameters(), offering.getRedundantRouter()); if ((routers == null) || (routers.size() == 0)) { throw new ResourceUnavailableException("Can't find at least one running router!", DataCenter.class, network.getDataCenterId()); } - return true; + return true; } @Override @@ -306,11 +304,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl expire = value; } if ((expire != null) && !containsOnlyNumbers(expire, timeEndChar)) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: expire is not in timeformat: " + expire); } if ((tablesize != null) && !containsOnlyNumbers(tablesize, "kmg")) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: tablesize is not in size format: " + tablesize); } @@ -337,11 +335,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } if ((length != null) && (!containsOnlyNumbers(length, null))) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: length is not a number: " + length); } if ((holdTime != null) && (!containsOnlyNumbers(holdTime, timeEndChar) && !containsOnlyNumbers(holdTime, null))) { - throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + + throw new InvalidParameterValueException("Failed LB in validation rule id: " + rule.getId() + " Cause: holdtime is not in timeformat: " + holdTime); } } @@ -389,8 +387,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl @Override public String[] applyVpnUsers(RemoteAccessVpn vpn, List users) throws ResourceUnavailableException { - Network network = _networksDao.findById(vpn.getNetworkId()); + if (vpn.getNetworkId() == null) { + return null; + } + Network network = _networksDao.findById(vpn.getNetworkId()); if (canHandle(network, Service.Vpn)) { List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { @@ -400,13 +401,18 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } return _routerMgr.applyVpnUsers(network, users, routers); } else { - s_logger.debug("Element " + this.getName() + " doesn't handle applyVpnUsers command"); + s_logger.debug("Element " + getName() + " doesn't handle applyVpnUsers command"); return null; } } @Override - public boolean startVpn(Network network, RemoteAccessVpn vpn) throws ResourceUnavailableException { + public boolean startVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { + if (vpn.getNetworkId() == null) { + return false; + } + + Network network = _networksDao.findById(vpn.getNetworkId()); if (canHandle(network, Service.Vpn)) { List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { @@ -416,13 +422,18 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } return _routerMgr.startRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug("Element " + this.getName() + " doesn't handle createVpn command"); + s_logger.debug("Element " + getName() + " doesn't handle createVpn command"); return false; } } @Override - public boolean stopVpn(Network network, RemoteAccessVpn vpn) throws ResourceUnavailableException { + public boolean stopVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { + if (vpn.getNetworkId() == null) { + return false; + } + + Network network = _networksDao.findById(vpn.getNetworkId()); if (canHandle(network, Service.Vpn)) { List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { @@ -432,13 +443,13 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } return _routerMgr.deleteRemoteAccessVpn(network, vpn, routers); } else { - s_logger.debug("Element " + this.getName() + " doesn't handle removeVpn command"); + s_logger.debug("Element " + getName() + " doesn't handle removeVpn command"); return false; } } @Override - public boolean applyIps(Network network, List ipAddress, Set services) + public boolean applyIps(Network network, List ipAddress, Set services) throws ResourceUnavailableException { boolean canHandle = true; for (Service service : services) { @@ -636,7 +647,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public boolean shutdown(Network network, ReservationContext context, boolean cleanup) + public boolean shutdown(Network network, ReservationContext context, boolean cleanup) throws ConcurrentOperationException, ResourceUnavailableException { List routers = _routerDao.listByNetworkAndRole(network.getId(), Role.VIRTUAL_ROUTER); if (routers == null || routers.isEmpty()) { @@ -665,14 +676,17 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return true; } boolean result = true; + // NOTE that we need to pass caller account to destroyRouter, otherwise it will fail permission check there. Context passed in from deleteNetwork is the network account, + // not caller account + Account callerAccount = _accountMgr.getAccount(context.getCaller().getAccountId()); for (DomainRouterVO router : routers) { - result = result && (_routerMgr.destroyRouter(router.getId(), context.getAccount(), context.getCaller().getId()) != null); + result = result && (_routerMgr.destroyRouter(router.getId(), callerAccount, context.getCaller().getId()) != null); } return result; } @Override - public boolean savePassword(Network network, NicProfile nic, VirtualMachineProfile vm) + public boolean savePassword(Network network, NicProfile nic, VirtualMachineProfile vm) throws ResourceUnavailableException { if (!canHandle(network, null)) { return false; @@ -684,7 +698,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; return _routerMgr.savePasswordToRouter(network, nic, uservm, routers); } @@ -702,7 +716,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; return _routerMgr.saveSSHPublicKeyToRouter(network, nic, uservm, routers, SSHPublicKey); } @@ -720,7 +734,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; return _routerMgr.saveUserDataToRouter(network, nic, uservm, routers); } @@ -737,7 +751,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl @Override public VirtualRouterProvider configure(ConfigureVirtualRouterElementCmd cmd) { VirtualRouterProviderVO element = _vrProviderDao.findById(cmd.getId()); - if (element == null || !(element.getType() == VirtualRouterProviderType.VirtualRouter || element.getType() == VirtualRouterProviderType.VPCVirtualRouter)) { + if (element == null || !(element.getType() == Type.VirtualRouter || element.getType() == Type.VPCVirtualRouter)) { s_logger.debug("Can't find Virtual Router element with network service provider id " + cmd.getId()); return null; } @@ -749,10 +763,10 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public VirtualRouterProvider addElement(Long nspId, VirtualRouterProviderType providerType) { - if (!(providerType == VirtualRouterProviderType.VirtualRouter || providerType == VirtualRouterProviderType.VPCVirtualRouter)) { - throw new InvalidParameterValueException("Element " + this.getName() + " supports only providerTypes: " + - VirtualRouterProviderType.VirtualRouter.toString() + " and " + VirtualRouterProviderType.VPCVirtualRouter); + public VirtualRouterProvider addElement(Long nspId, Type providerType) { + if (!(providerType == Type.VirtualRouter || providerType == Type.VPCVirtualRouter)) { + throw new InvalidParameterValueException("Element " + getName() + " supports only providerTypes: " + + Type.VirtualRouter.toString() + " and " + Type.VPCVirtualRouter); } VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(nspId, providerType); if (element != null) { @@ -786,7 +800,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl @Override public boolean isReady(PhysicalNetworkServiceProvider provider) { - VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), + VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), getVirtualRouterProvider()); if (element == null) { return false; @@ -795,10 +809,10 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) + public boolean shutdownProviderInstances(PhysicalNetworkServiceProvider provider, ReservationContext context) throws ConcurrentOperationException, ResourceUnavailableException { - VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), + VirtualRouterProviderVO element = _vrProviderDao.findByNspIdAndType(provider.getId(), getVirtualRouterProvider()); if (element == null) { return true; @@ -821,14 +835,14 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } public Long getIdByNspId(Long nspId) { - VirtualRouterProviderVO vr = _vrProviderDao.findByNspIdAndType(nspId, VirtualRouterProviderType.VirtualRouter); + VirtualRouterProviderVO vr = _vrProviderDao.findByNspIdAndType(nspId, Type.VirtualRouter); return vr.getId(); } @Override public VirtualRouterProvider getCreatedElement(long id) { VirtualRouterProvider provider = _vrProviderDao.findById(id); - if (!(provider.getType() == VirtualRouterProviderType.VirtualRouter || provider.getType() == VirtualRouterProviderType.VPCVirtualRouter)) { + if (!(provider.getType() == Type.VirtualRouter || provider.getType() == Type.VPCVirtualRouter)) { throw new InvalidParameterValueException("Unable to find provider by id"); } return provider; @@ -849,7 +863,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return false; } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; List routers = getRouters(network, dest); @@ -880,7 +894,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @Override - public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, + public boolean addDhcpEntry(Network network, NicProfile nic, VirtualMachineProfile vm, DeployDestination dest, ReservationContext context) throws ConcurrentOperationException, InsufficientCapacityException, ResourceUnavailableException { if (canHandle(network, Service.Dhcp)) { @@ -889,7 +903,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; List routers = getRouters(network, dest); @@ -917,7 +931,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl } @SuppressWarnings("unchecked") - VirtualMachineProfile uservm = (VirtualMachineProfile) vm; + VirtualMachineProfile uservm = vm; List routers = getRouters(network, dest); @@ -935,7 +949,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl if (_networkMgr.isProviderSupportServiceInNetwork(network.getId(), Service.SourceNat, getProvider())) { publicNetwork = true; } - boolean isPodBased = (dest.getDataCenter().getNetworkType() == NetworkType.Basic + boolean isPodBased = (dest.getDataCenter().getNetworkType() == NetworkType.Basic || _networkMgr.isSecurityGroupSupportedInNetwork(network)) && network.getTrafficType() == TrafficType.Guest; @@ -969,19 +983,19 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl Long nspId = cmd.getNspId(); Boolean enabled = cmd.getEnabled(); - SearchCriteriaService sc = SearchCriteria2.create(VirtualRouterProviderVO.class); + QueryBuilder sc = QueryBuilder.create(VirtualRouterProviderVO.class); if (id != null) { - sc.addAnd(sc.getEntity().getId(), Op.EQ, id); + sc.and(sc.entity().getId(), Op.EQ, id); } if (nspId != null) { - sc.addAnd(sc.getEntity().getNspId(), Op.EQ, nspId); + sc.and(sc.entity().getNspId(), Op.EQ, nspId); } if (enabled != null) { - sc.addAnd(sc.getEntity().isEnabled(), Op.EQ, enabled); + sc.and(sc.entity().isEnabled(), Op.EQ, enabled); } //return only VR and VPC VR - sc.addAnd(sc.getEntity().getType(), Op.IN, VirtualRouterProvider.VirtualRouterProviderType.VPCVirtualRouter, VirtualRouterProvider.VirtualRouterProviderType.VirtualRouter); + sc.and(sc.entity().getType(), Op.IN, VirtualRouterProvider.Type.VPCVirtualRouter, VirtualRouterProvider.Type.VirtualRouter); return sc.list(); } @@ -989,7 +1003,7 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl @Override public boolean verifyServicesCombination(Set services) { if (!services.contains(Service.SourceNat)) { - if (services.contains(Service.StaticNat) || services.contains(Service.Firewall) || services.contains(Service.Lb) || + if (services.contains(Service.StaticNat) || services.contains(Service.Firewall) || services.contains(Service.Lb) || services.contains(Service.PortForwarding) || services.contains(Service.Vpn)) { String servicesList = "["; for (Service service : services) { @@ -1008,8 +1022,8 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return this; } - protected VirtualRouterProviderType getVirtualRouterProvider() { - return VirtualRouterProviderType.VirtualRouter; + protected VirtualRouterProvider.Type getVirtualRouterProvider() { + return VirtualRouterProvider.Type.VirtualRouter; } @Override @@ -1019,13 +1033,13 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl return null; } private boolean canHandleLbRules(List rules) { - Map lbCaps = this.getCapabilities().get(Service.Lb); + Map lbCaps = getCapabilities().get(Service.Lb); if (!lbCaps.isEmpty()) { String schemeCaps = lbCaps.get(Capability.LbSchemes); if (schemeCaps != null) { for (LoadBalancingRule rule : rules) { if (!schemeCaps.contains(rule.getScheme().toString())) { - s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + this.getName()); + s_logger.debug("Scheme " + rules.get(0).getScheme() + " is not supported by the provider " + getName()); return false; } } @@ -1041,11 +1055,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl if (nic.getBroadcastType() != Networks.BroadcastDomainType.Pvlan) { return true; } - if (vm.getType() == Type.DomainRouter) { + if (vm.getType() == VirtualMachine.Type.DomainRouter) { assert vm instanceof DomainRouterVO; DomainRouterVO router = (DomainRouterVO)vm.getVirtualMachine(); _routerMgr.setupDhcpForPvlan(false, router, router.getHostId(), nic); - } else if (vm.getType() == Type.User){ + } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; UserVmVO userVm = (UserVmVO)vm.getVirtualMachine(); _userVmMgr.setupVmForPvlan(false, userVm.getHostId(), nic); @@ -1060,11 +1074,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl if (nic.getBroadcastType() != Networks.BroadcastDomainType.Pvlan) { return; } - if (vm.getType() == Type.DomainRouter) { + if (vm.getType() == VirtualMachine.Type.DomainRouter) { assert vm instanceof DomainRouterVO; DomainRouterVO router = (DomainRouterVO)vm.getVirtualMachine(); _routerMgr.setupDhcpForPvlan(true, router, router.getHostId(), nic); - } else if (vm.getType() == Type.User){ + } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; UserVmVO userVm = (UserVmVO)vm.getVirtualMachine(); _userVmMgr.setupVmForPvlan(true, userVm.getHostId(), nic); @@ -1078,11 +1092,11 @@ public class VirtualRouterElement extends AdapterBase implements VirtualRouterEl if (nic.getBroadcastType() != Networks.BroadcastDomainType.Pvlan) { return; } - if (vm.getType() == Type.DomainRouter) { + if (vm.getType() == VirtualMachine.Type.DomainRouter) { assert vm instanceof DomainRouterVO; DomainRouterVO router = (DomainRouterVO)vm.getVirtualMachine(); _routerMgr.setupDhcpForPvlan(true, router, router.getHostId(), nic); - } else if (vm.getType() == Type.User){ + } else if (vm.getType() == VirtualMachine.Type.User) { assert vm instanceof UserVmVO; UserVmVO userVm = (UserVmVO)vm.getVirtualMachine(); _userVmMgr.setupVmForPvlan(true, userVm.getHostId(), nic); diff --git a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java index c512dda0058..1f5846b0cb8 100644 --- a/server/src/com/cloud/network/element/VpcVirtualRouterElement.java +++ b/server/src/com/cloud/network/element/VpcVirtualRouterElement.java @@ -39,9 +39,11 @@ import com.cloud.network.Network.Provider; import com.cloud.network.Network.Service; import com.cloud.network.NetworkModel; import com.cloud.network.PublicIpAddress; +import com.cloud.network.RemoteAccessVpn; import com.cloud.network.Site2SiteVpnConnection; import com.cloud.network.Site2SiteVpnGateway; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; +import com.cloud.network.VpnUser; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.Site2SiteVpnGatewayDao; @@ -63,7 +65,7 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.NicProfile; import com.cloud.vm.ReservationContext; -import com.cloud.vm.VirtualMachine.Type; +import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineProfile; @Local(value = {NetworkElement.class, FirewallServiceProvider.class, @@ -210,7 +212,7 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return false; } - if (vm.getType() == Type.User) { + if (vm.getType() == VirtualMachine.Type.User) { Map params = new HashMap(1); params.put(VirtualMachineProfile.Param.ReProgramGuestNetworks, true); List routers = _vpcRouterMgr.deployVirtualRouterInVpc(vpc, dest, @@ -434,8 +436,8 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc } @Override - protected VirtualRouterProviderType getVirtualRouterProvider() { - return VirtualRouterProviderType.VPCVirtualRouter; + protected Type getVirtualRouterProvider() { + return Type.VPCVirtualRouter; } @Override @@ -531,4 +533,47 @@ public class VpcVirtualRouterElement extends VirtualRouterElement implements Vpc return _vpcRouterMgr.stopSite2SiteVpn(conn, routers.get(0)); } + + @Override + public String[] applyVpnUsers(RemoteAccessVpn vpn, List users) throws ResourceUnavailableException { + if (vpn.getVpcId() == null) { + return null; + } + + List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); + if (routers == null || routers.size() != 1) { + s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + return null; + } + return _vpcRouterMgr.applyVpnUsers(vpn, users, routers.get(0)); + } + + @Override + public boolean startVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { + if (vpn.getVpcId() == null) { + return false; + } + + List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); + if (routers == null || routers.size() != 1) { + s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + return true; + } + return _vpcRouterMgr.startRemoteAccessVpn(vpn, routers.get(0)); + } + + @Override + public boolean stopVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { + if (vpn.getVpcId() == null) { + return false; + } + + List routers = _vpcRouterMgr.getVpcRouters(vpn.getVpcId()); + if (routers == null || routers.size() != 1) { + s_logger.debug("Cannot apply vpn users on the backend; virtual router doesn't exist in the network " + vpn.getVpcId()); + return true; + } + return _vpcRouterMgr.stopRemoteAccessVpn(vpn, routers.get(0)); + } + } diff --git a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java index 195f93ef267..89f24c6109d 100644 --- a/server/src/com/cloud/network/firewall/FirewallManagerImpl.java +++ b/server/src/com/cloud/network/firewall/FirewallManagerImpl.java @@ -29,7 +29,6 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.command.user.firewall.ListEgressFirewallRulesCmd; import org.apache.cloudstack.api.command.user.firewall.ListFirewallRulesCmd; import org.apache.cloudstack.context.CallContext; @@ -75,7 +74,7 @@ import com.cloud.network.rules.StaticNat; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.vpc.VpcManager; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -89,6 +88,9 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; @@ -133,13 +135,13 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, NetworkDao _networkDao; @Inject VpcManager _vpcMgr; - @Inject List _firewallElements; + List _firewallElements; - @Inject List _pfElements; + List _pfElements; - @Inject List _staticNatElements; + List _staticNatElements; - @Inject List _networkAclElements; + List _networkAclElements; @Inject IpAddressManager _ipAddrMgr; @@ -150,10 +152,15 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, _name = name; String elbEnabledString = _configDao.getValue(Config.ElasticLoadBalancerEnabled.key()); _elbEnabled = Boolean.parseBoolean(elbEnabledString); - s_logger.info("Firewall provider list is " + _firewallElements.iterator().next()); return true; } + @Override + public boolean start() { + s_logger.info("Firewall provider list is " + _firewallElements.iterator().next()); + return super.start(); + } + @Override @ActionEvent(eventType = EventTypes.EVENT_FIREWALL_OPEN, eventDescription = "creating firewall rule", create = true) public FirewallRule createEgressFirewallRule(FirewallRule rule) throws NetworkRuleConflictException { @@ -181,9 +188,9 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } @DB - protected FirewallRule createFirewallRule(Long ipAddrId, Account caller, String xId, Integer portStart, - Integer portEnd, String protocol, List sourceCidrList, Integer icmpCode, Integer icmpType, - Long relatedRuleId, FirewallRule.FirewallRuleType type, Long networkId, FirewallRule.TrafficType trafficType) throws NetworkRuleConflictException { + protected FirewallRule createFirewallRule(final Long ipAddrId, Account caller, final String xId, final Integer portStart, + final Integer portEnd, final String protocol, final List sourceCidrList, final Integer icmpCode, final Integer icmpType, + final Long relatedRuleId, final FirewallRule.FirewallRuleType type, final Long networkId, final FirewallRule.TrafficType trafficType) throws NetworkRuleConflictException { IPAddressVO ipAddress = null; if (ipAddrId != null){ @@ -222,11 +229,13 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, domainId = network.getDomainId(); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + final Long accountIdFinal = accountId; + final Long domainIdFinal = domainId; + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public FirewallRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { FirewallRuleVO newRule = new FirewallRuleVO(xId, ipAddrId, portStart, portEnd, protocol.toLowerCase(), networkId, - accountId, domainId, Purpose.Firewall, sourceCidrList, icmpCode, icmpType, relatedRuleId, trafficType); + accountIdFinal, domainIdFinal, Purpose.Firewall, sourceCidrList, icmpCode, icmpType, relatedRuleId, trafficType); newRule.setType(type); newRule = _firewallDao.persist(newRule); @@ -238,10 +247,10 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, } CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); - txn.commit(); - return newRule; } + }); + } @Override public Pair, Integer> listFirewallRules(ListFirewallRulesCmd cmd) { @@ -304,7 +313,7 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.FirewallRule.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.FirewallRule.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -553,6 +562,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, throws ResourceUnavailableException { boolean handled = false; switch (purpose){ + /* StaticNatRule would be applied by Firewall provider, since the incompatible of two object */ + case StaticNat: case Firewall: for (FirewallServiceProvider fwElement: _firewallElements) { Network.Provider provider = fwElement.getProvider(); @@ -577,18 +588,6 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, break; } break; - case StaticNat: - for (StaticNatServiceProvider element: _staticNatElements) { - Network.Provider provider = element.getProvider(); - boolean isSnatProvider = _networkModel.isProviderSupportServiceInNetwork(network.getId(), Service.StaticNat, provider); - if (!isSnatProvider) { - continue; - } - handled = element.applyStaticNats(network, (List) rules); - if (handled) - break; - } - break; /* case NetworkACL: for (NetworkACLServiceProvider element: _networkAclElements) { Network.Provider provider = element.getProvider(); @@ -734,15 +733,16 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, @Override @DB - public void revokeRule(FirewallRuleVO rule, Account caller, long userId, boolean needUsageEvent) { + public void revokeRule(final FirewallRuleVO rule, Account caller, long userId, final boolean needUsageEvent) { if (caller != null) { _accountMgr.checkAccess(caller, null, true, rule); } - Transaction txn = Transaction.currentTxn(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { boolean generateUsageEvent = false; - txn.start(); if (rule.getState() == State.Staged) { if (s_logger.isDebugEnabled()) { s_logger.debug("Found a rule that is still in stage state so just removing it: " + rule); @@ -759,8 +759,8 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_DELETE, rule.getAccountId(), 0, rule.getId(), null, rule.getClass().getName(), rule.getUuid()); } - - txn.commit(); + } + }); } @Override @@ -936,4 +936,40 @@ public class FirewallManagerImpl extends ManagerBase implements FirewallService, return true; } + public List getFirewallElements() { + return _firewallElements; + } + + @Inject + public void setFirewallElements(List firewallElements) { + this._firewallElements = firewallElements; + } + + public List getPfElements() { + return _pfElements; + } + + @Inject + public void setPfElements(List pfElements) { + this._pfElements = pfElements; + } + + public List getStaticNatElements() { + return _staticNatElements; + } + + @Inject + public void setStaticNatElements(List staticNatElements) { + this._staticNatElements = staticNatElements; + } + + public List getNetworkAclElements() { + return _networkAclElements; + } + + @Inject + public void setNetworkAclElements(List networkAclElements) { + this._networkAclElements = networkAclElements; + } + } diff --git a/server/src/com/cloud/network/guru/DirectNetworkGuru.java b/server/src/com/cloud/network/guru/DirectNetworkGuru.java index 55da113a148..801d252ef81 100755 --- a/server/src/com/cloud/network/guru/DirectNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectNetworkGuru.java @@ -22,7 +22,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import com.cloud.dc.DataCenter; @@ -33,6 +32,7 @@ import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlan; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; +import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientVirtualNetworkCapcityException; import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.IpAddressManager; @@ -56,6 +56,11 @@ import com.cloud.user.Account; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; +import com.cloud.utils.exception.ExceptionUtil; import com.cloud.vm.Nic; import com.cloud.vm.Nic.ReservationStrategy; import com.cloud.vm.NicProfile; @@ -228,22 +233,30 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { } @DB - protected void allocateDirectIp(NicProfile nic, Network network, VirtualMachineProfile vm, DataCenter dc, String requestedIp4Addr, String requestedIp6Addr) + protected void allocateDirectIp(final NicProfile nic, final Network network, final VirtualMachineProfile vm, final DataCenter dc, final String requestedIp4Addr, final String requestedIp6Addr) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - _ipAddrMgr.allocateDirectIp(nic, dc, vm, network, requestedIp4Addr, requestedIp6Addr); - //save the placeholder nic if the vm is the Virtual router - if (vm.getType() == VirtualMachine.Type.DomainRouter) { - Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); - if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " and ipv6 address " + nic.getIp6Address() + " for the network " + network); - _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), nic.getIp6Address(), VirtualMachine.Type.DomainRouter); - } + + try { + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException { + _ipAddrMgr.allocateDirectIp(nic, dc, vm, network, requestedIp4Addr, requestedIp6Addr); + //save the placeholder nic if the vm is the Virtual router + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); + if (placeholderNic == null) { + s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " and ipv6 address " + nic.getIp6Address() + " for the network " + network); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), nic.getIp6Address(), VirtualMachine.Type.DomainRouter); + } + } + } + }); + } catch (InsufficientCapacityException e) { + ExceptionUtil.rethrow(e, InsufficientVirtualNetworkCapcityException.class); + ExceptionUtil.rethrow(e, InsufficientAddressCapacityException.class); + throw new IllegalStateException(e); } - txn.commit(); } @Override @@ -257,37 +270,37 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { } @Override @DB - public void deallocate(Network network, NicProfile nic, VirtualMachineProfile vm) { + public void deallocate(final Network network, final NicProfile nic, VirtualMachineProfile vm) { if (s_logger.isDebugEnabled()) { s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIp4Address()); } if (nic.getIp4Address() != null) { - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); if (ip != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - // if the ip address a part of placeholder, don't release it - Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); - if (placeholderNic != null && placeholderNic.getIp4Address().equalsIgnoreCase(ip.getAddress().addr())) { - s_logger.debug("Not releasing direct ip " + ip.getId() +" yet as its ip is saved in the placeholder"); - } else { - _ipAddrMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - } - - //unassign nic secondary ip address - s_logger.debug("remove nic " + nic.getId() + " secondary ip "); - List nicSecIps = null; - nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); - for (String secIp: nicSecIps) { - IPAddressVO pubIp = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), secIp); - _ipAddrMgr.markIpAsUnavailable(pubIp.getId()); - _ipAddressDao.unassignIpAddress(pubIp.getId()); - } - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // if the ip address a part of placeholder, don't release it + Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, null); + if (placeholderNic != null && placeholderNic.getIp4Address().equalsIgnoreCase(ip.getAddress().addr())) { + s_logger.debug("Not releasing direct ip " + ip.getId() +" yet as its ip is saved in the placeholder"); + } else { + _ipAddrMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + } + + //unassign nic secondary ip address + s_logger.debug("remove nic " + nic.getId() + " secondary ip "); + List nicSecIps = null; + nicSecIps = _nicSecondaryIpDao.getSecondaryIpAddressesForNic(nic.getId()); + for (String secIp: nicSecIps) { + IPAddressVO pubIp = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), secIp); + _ipAddrMgr.markIpAsUnavailable(pubIp.getId()); + _ipAddressDao.unassignIpAddress(pubIp.getId()); + } + } + }); } } @@ -305,21 +318,23 @@ public class DirectNetworkGuru extends AdapterBase implements NetworkGuru { @DB public boolean trash(Network network, NetworkOffering offering) { //Have to remove all placeholder nics - List nics = _nicDao.listPlaceholderNicsByNetworkId(network.getId()); - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (Nic nic : nics) { - if (nic.getIp4Address() != null) { - s_logger.debug("Releasing ip " + nic.getIp4Address() + " of placeholder nic " + nic); - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); - _ipAddrMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - s_logger.debug("Removing placeholder nic " + nic); - _nicDao.remove(nic.getId()); + final List nics = _nicDao.listPlaceholderNicsByNetworkId(network.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (Nic nic : nics) { + if (nic.getIp4Address() != null) { + s_logger.debug("Releasing ip " + nic.getIp4Address() + " of placeholder nic " + nic); + IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + _ipAddrMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + s_logger.debug("Removing placeholder nic " + nic); + _nicDao.remove(nic.getId()); + } + } } - } - - txn.commit(); + }); + return true; } diff --git a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java index 31bc021ff73..053a786e67b 100755 --- a/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java +++ b/server/src/com/cloud/network/guru/DirectPodBasedNetworkGuru.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import com.cloud.configuration.ZoneConfig; @@ -53,6 +52,9 @@ import com.cloud.offering.NetworkOffering; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.Nic; import com.cloud.vm.Nic.ReservationStrategy; @@ -133,18 +135,18 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { getNewIp = true; } else { // we need to get a new ip address if we try to deploy a vm in a different pod - IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), oldIp); + final IPAddressVO ipVO = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), oldIp); if (ipVO != null) { PodVlanMapVO mapVO = _podVlanDao.listPodVlanMapsByVlan(ipVO.getVlanId()); if (mapVO.getPodId() != dest.getPod().getId()) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - //release the old ip here - _ipAddrMgr.markIpAsUnavailable(ipVO.getId()); - _ipAddressDao.unassignIpAddress(ipVO.getId()); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + //release the old ip here + _ipAddrMgr.markIpAsUnavailable(ipVO.getId()); + _ipAddressDao.unassignIpAddress(ipVO.getId()); + } + }); nic.setIp4Address(null); getNewIp = true; @@ -163,54 +165,55 @@ public class DirectPodBasedNetworkGuru extends DirectNetworkGuru { } @DB - protected void getIp(NicProfile nic, Pod pod, VirtualMachineProfile vm, Network network) throws InsufficientVirtualNetworkCapcityException, + protected void getIp(final NicProfile nic, final Pod pod, final VirtualMachineProfile vm, final Network network) throws InsufficientVirtualNetworkCapcityException, InsufficientAddressCapacityException, ConcurrentOperationException { - DataCenter dc = _dcDao.findById(pod.getDataCenterId()); + final DataCenter dc = _dcDao.findById(pod.getDataCenterId()); if (nic.getIp4Address() == null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - - PublicIp ip = null; - List podRefs = _podVlanDao.listPodVlanMapsByPod(pod.getId()); - String podRangeGateway = null; - if (!podRefs.isEmpty()) { - podRangeGateway = _vlanDao.findById(podRefs.get(0).getVlanDbId()).getVlanGateway(); - } - //Get ip address from the placeholder and don't allocate a new one - if (vm.getType() == VirtualMachine.Type.DomainRouter) { - Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); - if (placeholderNic != null) { - IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIp4Address()); - ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); - s_logger.debug("Nic got an ip address " + placeholderNic.getIp4Address() + " stored in placeholder nic for the network " + network + " and gateway " + podRangeGateway); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InsufficientAddressCapacityException { + PublicIp ip = null; + List podRefs = _podVlanDao.listPodVlanMapsByPod(pod.getId()); + String podRangeGateway = null; + if (!podRefs.isEmpty()) { + podRangeGateway = _vlanDao.findById(podRefs.get(0).getVlanDbId()).getVlanGateway(); + } + //Get ip address from the placeholder and don't allocate a new one + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); + if (placeholderNic != null) { + IPAddressVO userIp = _ipAddressDao.findByIpAndSourceNetworkId(network.getId(), placeholderNic.getIp4Address()); + ip = PublicIp.createFromAddrAndVlan(userIp, _vlanDao.findById(userIp.getVlanId())); + s_logger.debug("Nic got an ip address " + placeholderNic.getIp4Address() + " stored in placeholder nic for the network " + network + " and gateway " + podRangeGateway); + } + } + + if (ip == null) { + ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), null, false); + } + + nic.setIp4Address(ip.getAddress().toString()); + nic.setFormat(AddressFormat.Ip4); + nic.setGateway(ip.getGateway()); + nic.setNetmask(ip.getNetmask()); + if (ip.getVlanTag() != null && ip.getVlanTag().equalsIgnoreCase(Vlan.UNTAGGED)) { + nic.setIsolationUri(IsolationType.Ec2.toUri(Vlan.UNTAGGED)); + nic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(Vlan.UNTAGGED)); + nic.setBroadcastType(BroadcastDomainType.Native); + } + nic.setReservationId(String.valueOf(ip.getVlanTag())); + nic.setMacAddress(ip.getMacAddress()); + + //save the placeholder nic if the vm is the Virtual router + if (vm.getType() == VirtualMachine.Type.DomainRouter) { + Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); + if (placeholderNic == null) { + s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " for the network " + network); + _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), null, VirtualMachine.Type.DomainRouter); + } + } } - } - - if (ip == null) { - ip = _ipAddrMgr.assignPublicIpAddress(dc.getId(), pod.getId(), vm.getOwner(), VlanType.DirectAttached, network.getId(), null, false); - } - - nic.setIp4Address(ip.getAddress().toString()); - nic.setFormat(AddressFormat.Ip4); - nic.setGateway(ip.getGateway()); - nic.setNetmask(ip.getNetmask()); - if (ip.getVlanTag() != null && ip.getVlanTag().equalsIgnoreCase(Vlan.UNTAGGED)) { - nic.setIsolationUri(IsolationType.Ec2.toUri(Vlan.UNTAGGED)); - nic.setBroadcastUri(BroadcastDomainType.Vlan.toUri(Vlan.UNTAGGED)); - nic.setBroadcastType(BroadcastDomainType.Native); - } - nic.setReservationId(String.valueOf(ip.getVlanTag())); - nic.setMacAddress(ip.getMacAddress()); - - //save the placeholder nic if the vm is the Virtual router - if (vm.getType() == VirtualMachine.Type.DomainRouter) { - Nic placeholderNic = _networkModel.getPlaceholderNicForRouter(network, pod.getId()); - if (placeholderNic == null) { - s_logger.debug("Saving placeholder nic with ip4 address " + nic.getIp4Address() + " for the network " + network); - _networkMgr.savePlaceholderNic(network, nic.getIp4Address(), null, VirtualMachine.Type.DomainRouter); - } - } - txn.commit(); + }); } nic.setDns1(dc.getDns1()); nic.setDns2(dc.getDns2()); diff --git a/server/src/com/cloud/network/guru/GuestNetworkGuru.java b/server/src/com/cloud/network/guru/GuestNetworkGuru.java index 20b0ce5b86a..85728c22fbb 100755 --- a/server/src/com/cloud/network/guru/GuestNetworkGuru.java +++ b/server/src/com/cloud/network/guru/GuestNetworkGuru.java @@ -24,7 +24,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; @@ -70,6 +69,8 @@ import com.cloud.utils.Pair; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.Nic.ReservationStrategy; @@ -223,13 +224,15 @@ public abstract class GuestNetworkGuru extends AdapterBase implements NetworkGur s_logger.debug("Deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIp4Address()); } - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); if (ip != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - _ipAddrMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _ipAddrMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + } + }); } nic.deallocate(); } diff --git a/server/src/com/cloud/network/guru/PublicNetworkGuru.java b/server/src/com/cloud/network/guru/PublicNetworkGuru.java index f82e22e8dde..6fed1a6ff7e 100755 --- a/server/src/com/cloud/network/guru/PublicNetworkGuru.java +++ b/server/src/com/cloud/network/guru/PublicNetworkGuru.java @@ -20,7 +20,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import com.cloud.dc.DataCenter; @@ -50,6 +49,8 @@ import com.cloud.user.Account; import com.cloud.utils.component.AdapterBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.Nic.ReservationStrategy; import com.cloud.vm.NicProfile; @@ -192,16 +193,15 @@ public class PublicNetworkGuru extends AdapterBase implements NetworkGuru { s_logger.debug("public network deallocate network: networkId: " + nic.getNetworkId() + ", ip: " + nic.getIp4Address()); } - IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); + final IPAddressVO ip = _ipAddressDao.findByIpAndSourceNetworkId(nic.getNetworkId(), nic.getIp4Address()); if (ip != null && nic.getReservationStrategy() != ReservationStrategy.Managed) { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - _ipAddrMgr.markIpAsUnavailable(ip.getId()); - _ipAddressDao.unassignIpAddress(ip.getId()); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _ipAddrMgr.markIpAsUnavailable(ip.getId()); + _ipAddressDao.unassignIpAddress(ip.getId()); + } + }); } nic.deallocate(); diff --git a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java index 1daa3f0dc1c..4794ee425ba 100644 --- a/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java +++ b/server/src/com/cloud/network/lb/LBHealthCheckManagerImpl.java @@ -29,8 +29,8 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.configuration.Config; import com.cloud.exception.ResourceUnavailableException; @@ -87,9 +87,9 @@ public class LBHealthCheckManagerImpl extends ManagerBase implements LBHealthChe return this.name; } - protected class UpdateLBHealthCheck implements Runnable { + protected class UpdateLBHealthCheck extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { updateLBHealthCheck(Scheme.Public); updateLBHealthCheck(Scheme.Internal); diff --git a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java index c685ee3e40b..0d434784505 100755 --- a/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java +++ b/server/src/com/cloud/network/lb/LoadBalancingRulesManagerImpl.java @@ -130,7 +130,7 @@ import com.cloud.network.rules.StickinessPolicy; import com.cloud.network.vpc.VpcManager; import com.cloud.offering.NetworkOffering; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.tags.ResourceTagVO; @@ -152,6 +152,10 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; @@ -243,7 +247,6 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements DataCenterDao _dcDao = null; @Inject UserDao _userDao; - @Inject List _lbProviders; @Inject ApplicationLoadBalancerRuleDao _appLbRuleDao; @Inject @@ -379,11 +382,11 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @Override @DB - public boolean configureLbAutoScaleVmGroup(long vmGroupid, String currentState) throws ResourceUnavailableException { - AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.findById(vmGroupid); + public boolean configureLbAutoScaleVmGroup(final long vmGroupid, String currentState) throws ResourceUnavailableException { + final AutoScaleVmGroupVO vmGroup = _autoScaleVmGroupDao.findById(vmGroupid); boolean success = false; - LoadBalancerVO loadBalancer = _lbDao.findById(vmGroup.getLoadBalancerId()); + final LoadBalancerVO loadBalancer = _lbDao.findById(vmGroup.getLoadBalancerId()); FirewallRule.State backupState = loadBalancer.getState(); @@ -416,14 +419,17 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (success) { if (vmGroup.getState().equals(AutoScaleVmGroup.State_New)) { - Transaction.currentTxn().start(); - loadBalancer.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); - _lbDao.persist(loadBalancer); - vmGroup.setState(AutoScaleVmGroup.State_Enabled); - _autoScaleVmGroupDao.persist(vmGroup); - s_logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); - Transaction.currentTxn().commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + loadBalancer.setState(FirewallRule.State.Active); + s_logger.debug("LB rule " + loadBalancer.getId() + " state is set to Active"); + _lbDao.persist(loadBalancer); + vmGroup.setState(AutoScaleVmGroup.State_Enabled); + _autoScaleVmGroupDao.persist(vmGroup); + s_logger.debug("LB Auto Scale Vm Group with Id: " + vmGroupid + " is set to Enabled state."); + } + }); } s_logger.info("Successfully configured LB Autoscale Vm Group with Id: " + vmGroupid); } @@ -813,7 +819,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterException("Invalid Load balancer : " + healthCheckPolicy.getLoadBalancerId() + " for HealthCheck policy id: " + healthCheckPolicyId); } - long loadBalancerId = loadBalancer.getId(); + final long loadBalancerId = loadBalancer.getId(); FirewallRule.State backupState = loadBalancer.getState(); _accountMgr.checkAccess(caller.getCallingAccount(), null, true, loadBalancer); @@ -830,17 +836,19 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements + ", healthCheckpolicyID " + healthCheckPolicyId); // removing the state of services set by the monitor. - List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + final List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); if (maps != null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " - + loadBalancerId); - for (LoadBalancerVMMapVO map : maps) { - map.setState(null); - _lb2VmMapDao.persist(map); - } - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + s_logger.debug("Resetting health state policy for services in loadbalancing rule id : " + + loadBalancerId); + for (LoadBalancerVMMapVO map : maps) { + map.setState(null); + _lb2VmMapDao.persist(map); + } + } + }); } try { @@ -957,7 +965,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements CallContext ctx = CallContext.current(); Account caller = ctx.getCallingAccount(); - LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); + final LoadBalancerVO loadBalancer = _lbDao.findById(loadBalancerId); if (loadBalancer == null) { throw new InvalidParameterValueException("Failed to assign to load balancer " + loadBalancerId + ", the load balancer was not found."); @@ -969,7 +977,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements mappedInstanceIds.add(Long.valueOf(mappedInstance.getInstanceId())); } - List vmsToAdd = new ArrayList(); + final List vmsToAdd = new ArrayList(); if (instanceIds == null || instanceIds.isEmpty()) { s_logger.warn("List of vms to assign to the lb, is empty"); @@ -1022,13 +1030,16 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements vmsToAdd.add(vm); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (UserVm vm : vmsToAdd) { - LoadBalancerVMMapVO map = new LoadBalancerVMMapVO(loadBalancer.getId(), vm.getId(), false); - map = _lb2VmMapDao.persist(map); - } - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (UserVm vm : vmsToAdd) { + LoadBalancerVMMapVO map = new LoadBalancerVMMapVO(loadBalancer.getId(), vm.getId(), false); + map = _lb2VmMapDao.persist(map); + } + } + }); + if (_autoScaleVmGroupDao.isAutoScaleLoadBalancer(loadBalancerId)) { // For autoscaled loadbalancer, the rules need not be applied, // meaning the call need not reach the resource layer. @@ -1044,13 +1055,15 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements success = true; } catch (ResourceUnavailableException e) { if (isRollBackAllowedForProvider(loadBalancer)) { - List vmInstanceIds = new ArrayList(); - txn = Transaction.currentTxn(); - txn.start(); - for (UserVm vm : vmsToAdd) { - vmInstanceIds.add(vm.getId()); - } - txn.commit(); + final List vmInstanceIds = new ArrayList(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (UserVm vm : vmsToAdd) { + vmInstanceIds.add(vm.getId()); + } + } + }); if (!vmInstanceIds.isEmpty()) { _lb2VmMapDao.remove(loadBalancer.getId(), vmInstanceIds, null); s_logger.debug("LB Rollback rule id: " + loadBalancer.getId() + " while attaching VM: " @@ -1203,49 +1216,52 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } @DB - public boolean deleteLoadBalancerRule(long loadBalancerId, boolean apply, Account caller, long callerUserId, + public boolean deleteLoadBalancerRule(final long loadBalancerId, boolean apply, Account caller, long callerUserId, boolean rollBack) { - LoadBalancerVO lb = _lbDao.findById(loadBalancerId); - Transaction txn = Transaction.currentTxn(); - boolean generateUsageEvent = false; - boolean success = true; + final LoadBalancerVO lb = _lbDao.findById(loadBalancerId); FirewallRule.State backupState = lb.getState(); - txn.start(); - if (lb.getState() == FirewallRule.State.Staged) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Found a rule that is still in stage state so just removing it: " + lb); + List backupMaps = Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + boolean generateUsageEvent = false; + + if (lb.getState() == FirewallRule.State.Staged) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Found a rule that is still in stage state so just removing it: " + lb); + } + generateUsageEvent = true; + } else if (lb.getState() == FirewallRule.State.Add || lb.getState() == FirewallRule.State.Active) { + lb.setState(FirewallRule.State.Revoke); + _lbDao.persist(lb); + generateUsageEvent = true; + } + List backupMaps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); + if (maps != null) { + for (LoadBalancerVMMapVO map : maps) { + map.setRevoke(true); + _lb2VmMapDao.persist(map); + s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " + + map.getInstanceId()); + } + } + + List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(loadBalancerId); + for (LBHealthCheckPolicyVO lbHealthCheck : hcPolicies) { + lbHealthCheck.setRevoke(true); + _lb2healthcheckDao.persist(lbHealthCheck); + } + + if (generateUsageEvent) { + // Generate usage event right after all rules were marked for revoke + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_DELETE, lb.getAccountId(), 0, lb.getId(), + null, LoadBalancingRule.class.getName(), lb.getUuid()); + } + + return backupMaps; } - generateUsageEvent = true; - } else if (lb.getState() == FirewallRule.State.Add || lb.getState() == FirewallRule.State.Active) { - lb.setState(FirewallRule.State.Revoke); - _lbDao.persist(lb); - generateUsageEvent = true; - } - List backupMaps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); - List maps = _lb2VmMapDao.listByLoadBalancerId(loadBalancerId); - if (maps != null) { - for (LoadBalancerVMMapVO map : maps) { - map.setRevoke(true); - _lb2VmMapDao.persist(map); - s_logger.debug("Set load balancer rule for revoke: rule id " + loadBalancerId + ", vmId " - + map.getInstanceId()); - } - } - - List hcPolicies = _lb2healthcheckDao.listByLoadBalancerId(loadBalancerId); - for (LBHealthCheckPolicyVO lbHealthCheck : hcPolicies) { - lbHealthCheck.setRevoke(true); - _lb2healthcheckDao.persist(lbHealthCheck); - } - - if (generateUsageEvent) { - // Generate usage event right after all rules were marked for revoke - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_DELETE, lb.getAccountId(), 0, lb.getId(), - null, LoadBalancingRule.class.getName(), lb.getUuid()); - } - - txn.commit(); + }); // gather external network usage stats for this lb rule NetworkVO network = _networkDao.findById(lb.getNetworkId()); @@ -1283,7 +1299,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (relatedRule != null) { s_logger.warn("Unable to remove firewall rule id=" + lb.getId() + " as it has related firewall rule id=" + relatedRule.getId() + "; leaving it in Revoke state"); - success = false; + return false; } else { _firewallMgr.removeRule(lb); } @@ -1293,11 +1309,9 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements // Bug CS-15411 opened to document this // _elbMgr.handleDeleteLoadBalancerRule(lb, callerUserId, caller); - if (success) { - s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); - } - - return success; + s_logger.debug("Load balancer with id " + lb.getId() + " is removed successfully"); + + return true; } @Override @@ -1392,8 +1406,8 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @DB @Override - public LoadBalancer createPublicLoadBalancer(String xId, String name, String description, - int srcPort, int destPort, long sourceIpId, String protocol, String algorithm, boolean openFirewall, CallContext caller) + public LoadBalancer createPublicLoadBalancer(final String xId, final String name, final String description, + final int srcPort, final int destPort, final long sourceIpId, final String protocol, final String algorithm, final boolean openFirewall, final CallContext caller) throws NetworkRuleConflictException { if (!NetUtils.isValidPort(destPort)) { @@ -1404,7 +1418,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements throw new InvalidParameterValueException("Invalid algorithm: " + algorithm); } - IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId); + final IPAddressVO ipAddr = _ipAddressDao.findById(sourceIpId); // make sure ip address exists if (ipAddr == null || !ipAddr.readyToUse()) { InvalidParameterValueException ex = new InvalidParameterValueException( @@ -1426,7 +1440,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _accountMgr.checkAccess(caller.getCallingAccount(), null, true, ipAddr); - Long networkId = ipAddr.getAssociatedWithNetworkId(); + final Long networkId = ipAddr.getAssociatedWithNetworkId(); if (networkId == null) { InvalidParameterValueException ex = new InvalidParameterValueException( "Unable to create load balancer rule ; specified sourceip id is not associated with any network"); @@ -1440,61 +1454,60 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements _firewallMgr.validateFirewallRule(caller.getCallingAccount(), ipAddr, srcPort, srcPort, protocol, Purpose.LoadBalancing, FirewallRuleType.User, networkId, null); - LoadBalancerVO newRule = new LoadBalancerVO(xId, name, description, - sourceIpId, srcPort, destPort, algorithm, - networkId, ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); - // verify rule is supported by Lb provider of the network - Ip sourceIp = getSourceIp(newRule); - LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), - new ArrayList(), new ArrayList(), sourceIp); - if (!validateLbRule(loadBalancing)) { - throw new InvalidParameterValueException("LB service provider cannot support this rule"); - } + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public LoadBalancerVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + LoadBalancerVO newRule = new LoadBalancerVO(xId, name, description, + sourceIpId, srcPort, destPort, algorithm, + networkId, ipAddr.getAllocatedToAccountId(), ipAddr.getAllocatedInDomainId()); - Transaction txn = Transaction.currentTxn(); - txn.start(); + // verify rule is supported by Lb provider of the network + Ip sourceIp = getSourceIp(newRule); + LoadBalancingRule loadBalancing = new LoadBalancingRule(newRule, new ArrayList(), + new ArrayList(), new ArrayList(), sourceIp); + if (!validateLbRule(loadBalancing)) { + throw new InvalidParameterValueException("LB service provider cannot support this rule"); + } + + newRule = _lbDao.persist(newRule); - newRule = _lbDao.persist(newRule); - - //create rule for all CIDRs - if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCallingAccount(), srcPort, - srcPort, protocol, null, null, newRule.getId(), networkId); - } - - boolean success = true; - - try { - _firewallMgr.detectRulesConflict(newRule); - if (!_firewallDao.setStateToAdd(newRule)) { - throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + //create rule for all CIDRs + if (openFirewall) { + _firewallMgr.createRuleForAllCidrs(sourceIpId, caller.getCallingAccount(), srcPort, + srcPort, protocol, null, null, newRule.getId(), networkId); + } + + boolean success = true; + + try { + _firewallMgr.detectRulesConflict(newRule); + if (!_firewallDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " + + srcPort + ", private port " + destPort + " is added successfully."); + CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), + ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), + newRule.getUuid()); + + return newRule; + } catch (Exception e) { + success = false; + if (e instanceof NetworkRuleConflictException) { + throw (NetworkRuleConflictException) e; + } + throw new CloudRuntimeException("Unable to add rule for ip address id=" + newRule.getSourceIpAddressId(), e); + } finally { + if (!success && newRule != null) { + _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); + removeLBRule(newRule); + } + } } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address id=" + sourceIpId + ", public port " - + srcPort + ", private port " + destPort + " is added successfully."); - CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, ipAddr.getAllocatedToAccountId(), - ipAddr.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), - newRule.getUuid()); - txn.commit(); + }); - return newRule; - } catch (Exception e) { - success = false; - if (e instanceof NetworkRuleConflictException) { - throw (NetworkRuleConflictException) e; - } - throw new CloudRuntimeException("Unable to add rule for ip address id=" + newRule.getSourceIpAddressId(), e); - } finally { - if (!success && newRule != null) { - - txn.start(); - _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); - removeLBRule(newRule); - - txn.commit(); - } - } } @Override @@ -1583,7 +1596,6 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements @DB protected boolean applyLoadBalancerRules(List lbs, boolean updateRulesInDB) throws ResourceUnavailableException { - Transaction txn = Transaction.currentTxn(); List rules = new ArrayList(); for (LoadBalancerVO lb : lbs) { rules.add(getLoadBalancerRuleToApply(lb)); @@ -1595,57 +1607,63 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } if (updateRulesInDB) { - for (LoadBalancerVO lb : lbs) { - boolean checkForReleaseElasticIp = false; - txn.start(); - if (lb.getState() == FirewallRule.State.Revoke) { - removeLBRule(lb); - s_logger.debug("LB " + lb.getId() + " is successfully removed"); - checkForReleaseElasticIp = true; - } else if (lb.getState() == FirewallRule.State.Add) { - lb.setState(FirewallRule.State.Active); - s_logger.debug("LB rule " + lb.getId() + " state is set to Active"); - _lbDao.persist(lb); - } + for (final LoadBalancerVO lb : lbs) { + boolean checkForReleaseElasticIp = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean checkForReleaseElasticIp = false; - // remove LB-Vm mappings that were state to revoke - List lbVmMaps = _lb2VmMapDao.listByLoadBalancerId(lb.getId(), true); - List instanceIds = new ArrayList(); + if (lb.getState() == FirewallRule.State.Revoke) { + removeLBRule(lb); + s_logger.debug("LB " + lb.getId() + " is successfully removed"); + checkForReleaseElasticIp = true; + } else if (lb.getState() == FirewallRule.State.Add) { + lb.setState(FirewallRule.State.Active); + s_logger.debug("LB rule " + lb.getId() + " state is set to Active"); + _lbDao.persist(lb); + } + + // remove LB-Vm mappings that were state to revoke + List lbVmMaps = _lb2VmMapDao.listByLoadBalancerId(lb.getId(), true); + List instanceIds = new ArrayList(); + + for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { + instanceIds.add(lbVmMap.getInstanceId()); + } + + if (!instanceIds.isEmpty()) { + _lb2VmMapDao.remove(lb.getId(), instanceIds, null); + s_logger.debug("Load balancer rule id " + lb.getId() + " is removed for vms " + instanceIds); + } + + if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { + lb.setState(FirewallRule.State.Add); + _lbDao.persist(lb); + s_logger.debug("LB rule " + lb.getId() + + " state is set to Add as there are no more active LB-VM mappings"); + } + + // remove LB-Stickiness policy mapping that were state to revoke + List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( + lb.getId(), true); + if (!stickinesspolicies.isEmpty()) { + _lb2stickinesspoliciesDao.remove(lb.getId(), true); + s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); + } + + // remove LB-HealthCheck policy mapping that were state to + // revoke + List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), + true); + if (!healthCheckpolicies.isEmpty()) { + _lb2healthcheckDao.remove(lb.getId(), true); + s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); + } + + return checkForReleaseElasticIp; + } + }); - for (LoadBalancerVMMapVO lbVmMap : lbVmMaps) { - instanceIds.add(lbVmMap.getInstanceId()); - } - - if (!instanceIds.isEmpty()) { - _lb2VmMapDao.remove(lb.getId(), instanceIds, null); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed for vms " + instanceIds); - } - - if (_lb2VmMapDao.listByLoadBalancerId(lb.getId()).isEmpty()) { - lb.setState(FirewallRule.State.Add); - _lbDao.persist(lb); - s_logger.debug("LB rule " + lb.getId() - + " state is set to Add as there are no more active LB-VM mappings"); - } - - // remove LB-Stickiness policy mapping that were state to revoke - List stickinesspolicies = _lb2stickinesspoliciesDao.listByLoadBalancerId( - lb.getId(), true); - if (!stickinesspolicies.isEmpty()) { - _lb2stickinesspoliciesDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed stickiness policies"); - } - - // remove LB-HealthCheck policy mapping that were state to - // revoke - List healthCheckpolicies = _lb2healthcheckDao.listByLoadBalancerId(lb.getId(), - true); - if (!healthCheckpolicies.isEmpty()) { - _lb2healthcheckDao.remove(lb.getId(), true); - s_logger.debug("Load balancer rule id " + lb.getId() + " is removed health check monitors policies"); - } - - txn.commit(); if (checkForReleaseElasticIp && lb.getSourceIpAddressId() != null) { boolean success = true; long count = _firewallDao.countRulesByIpId(lb.getSourceIpAddressId()); @@ -2036,7 +2054,7 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.LoadBalancer.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.LoadBalancer.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -2154,4 +2172,13 @@ public class LoadBalancingRulesManagerImpl extends ManagerBase implements } } + public List getLbProviders() { + return _lbProviders; + } + + @Inject + public void setLbProviders(List lbProviders) { + this._lbProviders = lbProviders; + } + } diff --git a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java index 7c026a4cb80..a93480be56d 100755 --- a/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VirtualNetworkApplianceManagerImpl.java @@ -41,16 +41,15 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.admin.router.UpgradeRouterCmd; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.agent.Listener; @@ -155,7 +154,7 @@ import com.cloud.network.Site2SiteVpnConnection; import com.cloud.network.SshKeysDistriMonitor; import com.cloud.network.VirtualNetworkApplianceService; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.VpnUser; import com.cloud.network.VpnUserVO; import com.cloud.network.addr.PublicIp; @@ -202,7 +201,7 @@ import com.cloud.server.ConfigurationServer; import com.cloud.service.ServiceOfferingVO; import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.VMTemplateVO; -import com.cloud.storage.Volume.Type; +import com.cloud.storage.Volume; import com.cloud.storage.VolumeVO; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.VMTemplateDao; @@ -231,6 +230,8 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.MacAddress; @@ -579,34 +580,30 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V @DB public void processStopOrRebootAnswer(final DomainRouterVO router, Answer answer) { - final Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - //FIXME!!! - UserStats command should grab bytesSent/Received for all guest interfaces of the VR - List routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId()); - for (Long guestNtwkId : routerGuestNtwkIds) { - final UserStatisticsVO userStats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), - guestNtwkId, null, router.getId(), router.getType().toString()); - if (userStats != null) { - final long currentBytesRcvd = userStats.getCurrentBytesReceived(); - userStats.setCurrentBytesReceived(0); - userStats.setNetBytesReceived(userStats.getNetBytesReceived() + currentBytesRcvd); - - final long currentBytesSent = userStats.getCurrentBytesSent(); - userStats.setCurrentBytesSent(0); - userStats.setNetBytesSent(userStats.getNetBytesSent() + currentBytesSent); - _userStatsDao.update(userStats.getId(), userStats); - s_logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); - } else { - s_logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + //FIXME!!! - UserStats command should grab bytesSent/Received for all guest interfaces of the VR + List routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId()); + for (Long guestNtwkId : routerGuestNtwkIds) { + final UserStatisticsVO userStats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterId(), + guestNtwkId, null, router.getId(), router.getType().toString()); + if (userStats != null) { + final long currentBytesRcvd = userStats.getCurrentBytesReceived(); + userStats.setCurrentBytesReceived(0); + userStats.setNetBytesReceived(userStats.getNetBytesReceived() + currentBytesRcvd); + + final long currentBytesSent = userStats.getCurrentBytesSent(); + userStats.setCurrentBytesSent(0); + userStats.setNetBytesSent(userStats.getNetBytesSent() + currentBytesSent); + _userStatsDao.update(userStats.getId(), userStats); + s_logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop"); + } else { + s_logger.warn("User stats were not created for account " + router.getAccountId() + " and dc " + router.getDataCenterId()); + } + } } - } - - txn.commit(); - } catch (final Exception e) { - txn.rollback(); - throw new CloudRuntimeException("Problem updating stats after reboot/stop ", e); - } + }); } @Override @ActionEvent(eventType = EventTypes.EVENT_ROUTER_REBOOT, eventDescription = "rebooting router Vm", async = true) @@ -697,7 +694,6 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V s_logger.info("Router configurations: " + "ramsize=" + _routerRamSize); _agentMgr.registerForHostEvents(new SshKeysDistriMonitor(_agentMgr, _hostDao, _configDao), true, false, false); - _itMgr.registerGuru(VirtualMachine.Type.DomainRouter, this); boolean useLocalStorage = Boolean.parseBoolean(configs.get(Config.SystemVMUseLocalStorage.key())); _offering = new ServiceOfferingVO("System Offering For Software Router", 1, _routerRamSize, _routerCpuMHz, null, @@ -852,33 +848,32 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } - protected class NetworkUsageTask implements Runnable { + protected class NetworkUsageTask extends ManagedContextRunnable { public NetworkUsageTask() { } @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { try{ final List routers = _routerDao.listByStateAndNetworkType(State.Running, GuestType.Isolated, mgmtSrvrId); s_logger.debug("Found " + routers.size() + " running routers. "); - for (DomainRouterVO router : routers) { + for (final DomainRouterVO router : routers) { String privateIP = router.getPrivateIpAddress(); if (privateIP != null) { - boolean forVpc = router.getVpcId() != null; + final boolean forVpc = router.getVpcId() != null; List routerNics = _nicDao.listByVmId(router.getId()); - for (Nic routerNic : routerNics) { - Network network = _networkModel.getNetwork(routerNic.getNetworkId()); + for (final Nic routerNic : routerNics) { + final Network network = _networkModel.getNetwork(routerNic.getNetworkId()); //Send network usage command for public nic in VPC VR //Send network usage command for isolated guest nic of non VPC VR if ((forVpc && network.getTrafficType() == TrafficType.Public) || (!forVpc && network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Isolated)) { final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIp4Address()); - String routerType = router.getType().toString(); - UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), + final String routerType = router.getType().toString(); + final UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); NetworkUsageAnswer answer = null; try { @@ -893,62 +888,63 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { if ((answer.getBytesReceived() == 0) && (answer.getBytesSent() == 0)) { s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); continue; } - txn.start(); - UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), - router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); - if (stats == null) { - s_logger.warn("unable to find stats for account: " + router.getAccountId()); - continue; - } - - if (previousStats != null - && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) - || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))) { - s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + - "Ignoring current answer. Router: " + answer.getRouterName() + " Rcvd: " + - answer.getBytesReceived() + "Sent: " + answer.getBytesSent()); - continue; - } - - if (stats.getCurrentBytesReceived() > answer.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName() + " Reported: " + answer.getBytesReceived() - + " Stored: " + stats.getCurrentBytesReceived()); + final NetworkUsageAnswer answerFinal = answer; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), + router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); + if (stats == null) { + s_logger.warn("unable to find stats for account: " + router.getAccountId()); + return; + } + + if (previousStats != null + && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) + || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))) { + s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + + "Ignoring current answer. Router: " + answerFinal.getRouterName() + " Rcvd: " + + answerFinal.getBytesReceived() + "Sent: " + answerFinal.getBytesSent()); + return; + } + + if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Received # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Router: " + + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesReceived() + + " Stored: " + stats.getCurrentBytesReceived()); + } + stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); + } + stats.setCurrentBytesReceived(answerFinal.getBytesReceived()); + if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Received # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Router: " + + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesSent() + + " Stored: " + stats.getCurrentBytesSent()); + } + stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); + } + stats.setCurrentBytesSent(answerFinal.getBytesSent()); + if (! _dailyOrHourly) { + //update agg bytes + stats.setAggBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); + stats.setAggBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); + } + _userStatsDao.update(stats.getId(), stats); } - stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); - } - stats.setCurrentBytesReceived(answer.getBytesReceived()); - if (stats.getCurrentBytesSent() > answer.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName() + " Reported: " + answer.getBytesSent() - + " Stored: " + stats.getCurrentBytesSent()); - } - stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); - } - stats.setCurrentBytesSent(answer.getBytesSent()); - if (! _dailyOrHourly) { - //update agg bytes - stats.setAggBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); - stats.setAggBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); - } - _userStatsDao.update(stats.getId(), stats); - txn.commit(); + }); + } catch (Exception e) { - txn.rollback(); s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + answer.getBytesSent()); - } finally { - txn.close(); } } } @@ -957,19 +953,17 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } } catch (Exception e) { s_logger.warn("Error while collecting network stats", e); - } finally { - ServerContexts.unregisterSystemContext(); } } } - protected class NetworkStatsUpdateTask implements Runnable { + protected class NetworkStatsUpdateTask extends ManagedContextRunnable { public NetworkStatsUpdateTask() { } @Override - public void run() { + protected void runInContext() { GlobalLock scanLock = GlobalLock.getInternLock("network.stats"); try { if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { @@ -981,30 +975,30 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V scanLock.unlock(); return; } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { - txn.start(); - //get all stats with delta > 0 - List updatedStats = _userStatsDao.listUpdatedStats(); - Date updatedTime = new Date(); - for(UserStatisticsVO stat : updatedStats){ - //update agg bytes - stat.setAggBytesReceived(stat.getCurrentBytesReceived() + stat.getNetBytesReceived()); - stat.setAggBytesSent(stat.getCurrentBytesSent() + stat.getNetBytesSent()); - _userStatsDao.update(stat.getId(), stat); - //insert into op_user_stats_log - UserStatsLogVO statsLog = new UserStatsLogVO(stat.getId(), stat.getNetBytesReceived(), stat.getNetBytesSent(), stat.getCurrentBytesReceived(), - stat.getCurrentBytesSent(), stat.getAggBytesReceived(), stat.getAggBytesSent(), updatedTime); - _userStatsLogDao.persist(statsLog); - } - s_logger.debug("Successfully updated aggregate network stats"); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + //get all stats with delta > 0 + List updatedStats = _userStatsDao.listUpdatedStats(); + Date updatedTime = new Date(); + for(UserStatisticsVO stat : updatedStats){ + //update agg bytes + stat.setAggBytesReceived(stat.getCurrentBytesReceived() + stat.getNetBytesReceived()); + stat.setAggBytesSent(stat.getCurrentBytesSent() + stat.getNetBytesSent()); + _userStatsDao.update(stat.getId(), stat); + //insert into op_user_stats_log + UserStatsLogVO statsLog = new UserStatsLogVO(stat.getId(), stat.getNetBytesReceived(), stat.getNetBytesSent(), stat.getCurrentBytesReceived(), + stat.getCurrentBytesSent(), stat.getAggBytesReceived(), stat.getAggBytesSent(), updatedTime); + _userStatsLogDao.persist(statsLog); + } + s_logger.debug("Successfully updated aggregate network stats"); + } + }); } catch (Exception e){ - txn.rollback(); s_logger.debug("Failed to update aggregate network stats", e); } finally { scanLock.unlock(); - txn.close(); } } } catch (Exception e){ @@ -1141,17 +1135,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } } if (updated) { - Transaction txn = Transaction.open(Transaction.CLOUD_DB); - try { - txn.start(); - _routerDao.update(router.getId(), router); - txn.commit(); - } catch (Exception e) { - txn.rollback(); - s_logger.warn("Unable to update router status for account: " + router.getAccountId()); - } finally { - txn.close(); - } + _routerDao.update(router.getId(), router); } RedundantState currState = router.getRedundantState(); if (prevState != currState) { @@ -1201,7 +1185,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V return priority; } - protected class RvRStatusUpdateTask implements Runnable { + protected class RvRStatusUpdateTask extends ManagedContextRunnable { public RvRStatusUpdateTask() { } @@ -1280,60 +1264,54 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } @Override - public void run() { - ServerContexts.registerSystemContext(); - try { - while (true) { - try { - Long networkId = _vrUpdateQueue.take(); // This is a blocking call so this thread won't run all the time if no work item in queue. - List routers = _routerDao.listByNetworkAndRole(networkId, Role.VIRTUAL_ROUTER); - - if (routers.size() != 2) { - continue; - } - /* - * We update the router pair which the lower id router owned by this mgmt server, in order - * to prevent duplicate update of router status from cluster mgmt servers - */ - DomainRouterVO router0 = routers.get(0); - DomainRouterVO router1 = routers.get(1); - DomainRouterVO router = router0; - if ((router0.getId() < router1.getId()) && router0.getHostId() != null) { - router = router0; - } else { - router = router1; - } - if (router.getHostId() == null) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); - continue; - } - HostVO host = _hostDao.findById(router.getHostId()); - if (host == null || host.getManagementServerId() == null || - host.getManagementServerId() != ManagementServerNode.getManagementServerId()) { - s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); - continue; - } - updateRoutersRedundantState(routers); - checkDuplicateMaster(routers); - checkSanity(routers); - } catch (Exception ex) { - s_logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); - } + protected void runInContext() { + while (true) { + try { + Long networkId = _vrUpdateQueue.take(); // This is a blocking call so this thread won't run all the time if no work item in queue. + List routers = _routerDao.listByNetworkAndRole(networkId, Role.VIRTUAL_ROUTER); + + if (routers.size() != 2) { + continue; + } + /* + * We update the router pair which the lower id router owned by this mgmt server, in order + * to prevent duplicate update of router status from cluster mgmt servers + */ + DomainRouterVO router0 = routers.get(0); + DomainRouterVO router1 = routers.get(1); + DomainRouterVO router = router0; + if ((router0.getId() < router1.getId()) && router0.getHostId() != null) { + router = router0; + } else { + router = router1; + } + if (router.getHostId() == null) { + s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to can't find host"); + continue; + } + HostVO host = _hostDao.findById(router.getHostId()); + if (host == null || host.getManagementServerId() == null || + host.getManagementServerId() != ManagementServerNode.getManagementServerId()) { + s_logger.debug("Skip router pair (" + router0.getInstanceName() + "," + router1.getInstanceName() + ") due to not belong to this mgmt server"); + continue; + } + updateRoutersRedundantState(routers); + checkDuplicateMaster(routers); + checkSanity(routers); + } catch (Exception ex) { + s_logger.error("Fail to complete the RvRStatusUpdateTask! ", ex); } - } finally { - ServerContexts.unregisterSystemContext(); } } } - protected class CheckRouterTask implements Runnable { + protected class CheckRouterTask extends ManagedContextRunnable { public CheckRouterTask() { } @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { try { final List routers = _routerDao.listIsolatedByHostId(null); s_logger.debug("Found " + routers.size() + " routers to update status. "); @@ -1350,8 +1328,6 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } } catch (Exception ex) { s_logger.error("Fail to complete the CheckRouterTask! ", ex); - } finally { - ServerContexts.unregisterSystemContext(); } } } @@ -1424,6 +1400,26 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V throw new CloudRuntimeException(errMsg); } + private void checkAndResetPriorityOfRedundantRouter(List routers) { + boolean allStopped = true; + for (DomainRouterVO router : routers) { + if (!router.getIsRedundantRouter() || router.getState() != VirtualMachine.State.Stopped) { + allStopped = false; + break; + } + } + if (!allStopped) { + return; + } + + for (DomainRouterVO router : routers) { + // getUpdatedPriority() would update the value later + router.setPriority(0); + router.setIsPriorityBumpUp(false); + _routerDao.update(router.getId(), router); + } + } + @DB protected List findOrDeployVirtualRouterInGuestNetwork(Network guestNetwork, DeployDestination dest, Account owner, boolean isRedundant, Map params) throws ConcurrentOperationException, @@ -1494,6 +1490,10 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V int routerCount = 1; if (isRedundant) { routerCount = 2; + //Check current redundant routers, if possible(all routers are stopped), reset the priority + if (routers.size() != 0) { + checkAndResetPriorityOfRedundantRouter(routers); + } } // If old network is redundant but new is single router, then routers.size() = 2 but routerCount = 1 @@ -1506,7 +1506,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } // Check if providers are supported in the physical networks - VirtualRouterProviderType type = VirtualRouterProviderType.VirtualRouter; + Type type = Type.VirtualRouter; Long physicalNetworkId = _networkModel.getPhysicalNetworkId(guestNetwork); PhysicalNetworkServiceProvider provider = _physicalProviderDao.findByServiceProvider(physicalNetworkId, type.toString()); if (provider == null) { @@ -1930,7 +1930,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V avoids[1] = new ExcludeList(); avoids[1].addCluster(_hostDao.findById(routerToBeAvoid.getHostId()).getClusterId()); avoids[2] = new ExcludeList(); - List volumes = _volumeDao.findByInstanceAndType(routerToBeAvoid.getId(), Type.ROOT); + List volumes = _volumeDao.findByInstanceAndType(routerToBeAvoid.getId(), Volume.Type.ROOT); if (volumes != null && volumes.size() != 0) { avoids[2].addPool(volumes.get(0).getPoolId()); } @@ -2008,7 +2008,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V //1) Set router details DomainRouterVO router = _routerDao.findById(profile.getVirtualMachine().getId()); - Map details = _vmDetailsDao.findDetails(router.getId()); + Map details = _vmDetailsDao.listDetailsKeyPairs(router.getId()); router.setDetails(details); //2) Prepare boot loader elements related with Control network @@ -2471,7 +2471,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V s_logger.debug("Found " + vpns.size() + " vpn(s) to apply as a part of domR " + router + " start."); if (!vpns.isEmpty()) { for (RemoteAccessVpn vpn : vpns) { - createApplyVpnCommands(vpn, router, cmds); + createApplyVpnCommands(true, vpn, router, cmds); } } @@ -2673,7 +2673,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } Commands cmds = new Commands(Command.OnError.Stop); - createApplyVpnCommands(vpn, router, cmds); + createApplyVpnCommands(true, vpn, router, cmds); try { _agentMgr.send(router.getHostId(), cmds); @@ -2717,19 +2717,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V for (VirtualRouter router : routers) { if (router.getState() == State.Running) { Commands cmds = new Commands(Command.OnError.Continue); - IpAddress ip = _networkModel.getIp(vpn.getServerAddressId()); - - RemoteAccessVpnCfgCommand removeVpnCmd = new RemoteAccessVpnCfgCommand(false, ip.getAddress().addr(), - vpn.getLocalIp(), vpn.getIpRange(), vpn.getIpsecPresharedKey()); - removeVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); - removeVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, getRouterIpInNetwork(network.getId(), router.getId())); - removeVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); - - DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); - removeVpnCmd.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); - - cmds.addCommand(removeVpnCmd); - + createApplyVpnCommands(false, vpn, router, cmds); result = result && sendCommandsToRouter(router, cmds); } else if (router.getState() == State.Stopped) { s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); @@ -2859,12 +2847,15 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V configDnsMasq(router, network, cmds); boolean result = sendCommandsToRouter(router, cmds); if (result == false) { - NicIpAliasVO ipAliasVO = _nicIpAliasDao.findByInstanceIdAndNetworkId(network.getId(), router.getId()); - Transaction txn = Transaction.currentTxn(); - txn.start(); - _nicIpAliasDao.expunge(ipAliasVO.getId()); - _ipAddressDao.unassignIpAddress(routerPublicIP.getId()); - txn.commit(); + final NicIpAliasVO ipAliasVO = _nicIpAliasDao.findByInstanceIdAndNetworkId(network.getId(), router.getId()); + final PublicIp routerPublicIPFinal = routerPublicIP; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _nicIpAliasDao.expunge(ipAliasVO.getId()); + _ipAddressDao.unassignIpAddress(routerPublicIPFinal.getId()); + } + }); throw new CloudRuntimeException("failed to configure ip alias on the router as a part of dhcp config"); } } @@ -2889,7 +2880,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } Commands cmds = new Commands(Command.OnError.Continue); - List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.revoked); + final List revokedIpAliasVOs = _nicIpAliasDao.listByNetworkIdAndState(network.getId(), NicIpAlias.state.revoked); s_logger.debug("Found" + revokedIpAliasVOs.size() + "ip Aliases to revoke on the router as a part of dhcp configuration"); List revokedIpAliasTOs = new ArrayList(); for (NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { @@ -2905,12 +2896,14 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V configDnsMasq(router, network, cmds); boolean result = sendCommandsToRouter(router, cmds); if (result) { - Transaction txn= Transaction.currentTxn(); - txn.start(); - for (NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { - _nicIpAliasDao.expunge(revokedAliasVO.getId()); - } - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (NicIpAliasVO revokedAliasVO : revokedIpAliasVOs) { + _nicIpAliasDao.expunge(revokedAliasVO.getId()); + } + } + }); return true; } } @@ -3030,6 +3023,28 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V }); } + protected void createApplyVpnUsersCommand(List users, VirtualRouter router, Commands cmds) + { + List addUsers = new ArrayList(); + List removeUsers = new ArrayList(); + for (VpnUser user : users) { + if (user.getState() == VpnUser.State.Add || user.getState() == VpnUser.State.Active) { + addUsers.add(user); + } else if (user.getState() == VpnUser.State.Revoke) { + removeUsers.add(user); + } + } + + VpnUsersCfgCommand cmd = new VpnUsersCfgCommand(addUsers, removeUsers); + cmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(router.getAccountId())); + cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); + cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); + DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); + cmd.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); + + cmds.addCommand("users", cmd); + } + @Override //FIXME add partial success and STOP state support public String[] applyVpnUsers(Network network, List users, List routers) throws ResourceUnavailableException { @@ -3049,27 +3064,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } Commands cmds = new Commands(Command.OnError.Continue); - List addUsers = new ArrayList(); - List removeUsers = new ArrayList(); - for (VpnUser user : users) { - if (user.getState() == VpnUser.State.Add || user.getState() == VpnUser.State.Active) { - addUsers.add(user); - } else if (user.getState() == VpnUser.State.Revoke) { - removeUsers.add(user); - } - } - - VpnUsersCfgCommand cmd = new VpnUsersCfgCommand(addUsers, removeUsers); - cmd.setAccessDetail(NetworkElementCommand.ACCOUNT_ID, String.valueOf(router.getAccountId())); - cmd.setAccessDetail(NetworkElementCommand.GUEST_NETWORK_CIDR, network.getCidr()); - cmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); - cmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, getRouterIpInNetwork(network.getId(), router.getId())); - cmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); - DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); - cmd.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); - - cmds.addCommand(cmd); - + createApplyVpnUsersCommand(users, router, cmds); // Currently we receive just one answer from the agent. In the future we have to parse individual answers and set // results accordingly @@ -3326,9 +3321,10 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V else { maxconn = offering.getConcurrentConnections().toString(); } + LoadBalancerConfigCommand cmd = new LoadBalancerConfigCommand(lbs, routerPublicIp, getRouterIpInNetwork(guestNetworkId, router.getId()), router.getPrivateIpAddress(), - _itMgr.toNicTO(nicProfile, router.getHypervisorType()), router.getVpcId(), maxconn); + _itMgr.toNicTO(nicProfile, router.getHypervisorType()), router.getVpcId(), maxconn, offering.isKeepAliveEnabled()); cmd.lbStatsVisibility = _configDao.getValue(Config.NetworkLBHaproxyStatsVisbility.key()); cmd.lbStatsUri = _configDao.getValue(Config.NetworkLBHaproxyStatsUri.key()); @@ -3345,34 +3341,28 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V } - private void createApplyVpnCommands(RemoteAccessVpn vpn, VirtualRouter router, Commands cmds) { + protected String getVpnCidr(RemoteAccessVpn vpn) + { + Network network = _networkDao.findById(vpn.getNetworkId()); + return network.getCidr(); + } + + protected void createApplyVpnCommands(boolean isCreate, RemoteAccessVpn vpn, VirtualRouter router, Commands cmds) { List vpnUsers = _vpnUsersDao.listByAccount(vpn.getAccountId()); - List addUsers = new ArrayList(); - List removeUsers = new ArrayList(); - for (VpnUser user : vpnUsers) { - if (user.getState() == VpnUser.State.Add) { - addUsers.add(user); - } else if (user.getState() == VpnUser.State.Revoke) { - removeUsers.add(user); - } - } - VpnUsersCfgCommand addUsersCmd = new VpnUsersCfgCommand(addUsers, removeUsers); - addUsersCmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); - addUsersCmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, getRouterIpInNetwork(vpn.getNetworkId(), router.getId())); - addUsersCmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); + createApplyVpnUsersCommand(vpnUsers, router, cmds); IpAddress ip = _networkModel.getIp(vpn.getServerAddressId()); - RemoteAccessVpnCfgCommand startVpnCmd = new RemoteAccessVpnCfgCommand(true, ip.getAddress().addr(), - vpn.getLocalIp(), vpn.getIpRange(), vpn.getIpsecPresharedKey()); + String cidr = getVpnCidr(vpn); + RemoteAccessVpnCfgCommand startVpnCmd = new RemoteAccessVpnCfgCommand(isCreate, ip.getAddress().addr(), + vpn.getLocalIp(), vpn.getIpRange(), vpn.getIpsecPresharedKey(), (vpn.getVpcId() != null)); + startVpnCmd.setLocalCidr(cidr); startVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_IP, getRouterControlIp(router.getId())); - startVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_GUEST_IP, getRouterIpInNetwork(vpn.getNetworkId(), router.getId())); startVpnCmd.setAccessDetail(NetworkElementCommand.ROUTER_NAME, router.getInstanceName()); DataCenterVO dcVo = _dcDao.findById(router.getDataCenterId()); startVpnCmd.setAccessDetail(NetworkElementCommand.ZONE_NETWORK_TYPE, dcVo.getNetworkType().toString()); - cmds.addCommand("users", addUsersCmd); cmds.addCommand("startVpn", startVpnCmd); } @@ -3978,7 +3968,7 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V public void prepareStop(VirtualMachineProfile profile){ //Collect network usage before stopping Vm - DomainRouterVO router = _routerDao.findById(profile.getVirtualMachine().getId()); + final DomainRouterVO router = _routerDao.findById(profile.getVirtualMachine().getId()); if(router == null){ return; } @@ -3986,17 +3976,17 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V String privateIP = router.getPrivateIpAddress(); if (privateIP != null) { - boolean forVpc = router.getVpcId() != null; + final boolean forVpc = router.getVpcId() != null; List routerNics = _nicDao.listByVmId(router.getId()); - for (Nic routerNic : routerNics) { - Network network = _networkModel.getNetwork(routerNic.getNetworkId()); + for (final Nic routerNic : routerNics) { + final Network network = _networkModel.getNetwork(routerNic.getNetworkId()); //Send network usage command for public nic in VPC VR //Send network usage command for isolated guest nic of non VPC VR if ((forVpc && network.getTrafficType() == TrafficType.Public) || (!forVpc && network.getTrafficType() == TrafficType.Guest && network.getGuestType() == Network.GuestType.Isolated)) { final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(), forVpc, routerNic.getIp4Address()); - String routerType = router.getType().toString(); - UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), + final String routerType = router.getType().toString(); + final UserStatisticsVO previousStats = _userStatsDao.findBy(router.getAccountId(), router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); NetworkUsageAnswer answer = null; try { @@ -4011,62 +4001,63 @@ public class VirtualNetworkApplianceManagerImpl extends ManagerBase implements V s_logger.warn("Error while collecting network stats from router: " + router.getInstanceName() + " from host: " + router.getHostId() + "; details: " + answer.getDetails()); continue; } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { if ((answer.getBytesReceived() == 0) && (answer.getBytesSent() == 0)) { s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics"); continue; } - txn.start(); - UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), - router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); - if (stats == null) { - s_logger.warn("unable to find stats for account: " + router.getAccountId()); - continue; - } - - if (previousStats != null - && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) - || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))){ - s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + - "Ignoring current answer. Router: " + answer.getRouterName() + " Rcvd: " + - answer.getBytesReceived() + "Sent: " + answer.getBytesSent()); - continue; - } - - if (stats.getCurrentBytesReceived() > answer.getBytesReceived()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName() + " Reported: " + answer.getBytesReceived() - + " Stored: " + stats.getCurrentBytesReceived()); + + final NetworkUsageAnswer answerFinal = answer; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + UserStatisticsVO stats = _userStatsDao.lock(router.getAccountId(), + router.getDataCenterId(), network.getId(), (forVpc ? routerNic.getIp4Address() : null), router.getId(), routerType); + if (stats == null) { + s_logger.warn("unable to find stats for account: " + router.getAccountId()); + return; + } + + if (previousStats != null + && ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived()) + || (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))){ + s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " + + "Ignoring current answer. Router: " + answerFinal.getRouterName() + " Rcvd: " + + answerFinal.getBytesReceived() + "Sent: " + answerFinal.getBytesSent()); + return; + } + + if (stats.getCurrentBytesReceived() > answerFinal.getBytesReceived()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Received # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Router: " + + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesReceived() + + " Stored: " + stats.getCurrentBytesReceived()); + } + stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); + } + stats.setCurrentBytesReceived(answerFinal.getBytesReceived()); + if (stats.getCurrentBytesSent() > answerFinal.getBytesSent()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Received # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Router: " + + answerFinal.getRouterName() + " Reported: " + answerFinal.getBytesSent() + + " Stored: " + stats.getCurrentBytesSent()); + } + stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); + } + stats.setCurrentBytesSent(answerFinal.getBytesSent()); + if (! _dailyOrHourly) { + //update agg bytes + stats.setAggBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); + stats.setAggBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); + } + _userStatsDao.update(stats.getId(), stats); } - stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); - } - stats.setCurrentBytesReceived(answer.getBytesReceived()); - if (stats.getCurrentBytesSent() > answer.getBytesSent()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Received # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Router: " + - answer.getRouterName() + " Reported: " + answer.getBytesSent() - + " Stored: " + stats.getCurrentBytesSent()); - } - stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); - } - stats.setCurrentBytesSent(answer.getBytesSent()); - if (! _dailyOrHourly) { - //update agg bytes - stats.setAggBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent()); - stats.setAggBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived()); - } - _userStatsDao.update(stats.getId(), stats); - txn.commit(); + }); } catch (Exception e) { - txn.rollback(); s_logger.warn("Unable to update user statistics for account: " + router.getAccountId() + " Rx: " + answer.getBytesReceived() + "; Tx: " + answer.getBytesSent()); - } finally { - txn.close(); } } } diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManager.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManager.java index d12280af869..fa34b0dd70e 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManager.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManager.java @@ -23,8 +23,10 @@ import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.network.Network; +import com.cloud.network.RemoteAccessVpn; import com.cloud.network.Site2SiteVpnConnection; import com.cloud.network.VpcVirtualNetworkApplianceService; +import com.cloud.network.VpnUser; import com.cloud.network.vpc.*; import com.cloud.user.Account; import com.cloud.vm.DomainRouterVO; @@ -106,4 +108,31 @@ public interface VpcVirtualNetworkApplianceManager extends VirtualNetworkApplian * @return */ List getVpcRouters(long vpcId); + + /** + * @param vpn + * @param router + * @return + * @throws ResourceUnavailableException + */ + boolean startRemoteAccessVpn(RemoteAccessVpn vpn, VirtualRouter router) + throws ResourceUnavailableException; + + /** + * @param vpn + * @param router + * @return + * @throws ResourceUnavailableException + */ + boolean stopRemoteAccessVpn(RemoteAccessVpn vpn, VirtualRouter router) + throws ResourceUnavailableException; + + /** + * @param vpn + * @param users + * @param routers + * @return + * @throws ResourceUnavailableException + */ + String[] applyVpnUsers(RemoteAccessVpn vpn, List users, VirtualRouter router) throws ResourceUnavailableException; } diff --git a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java index ea2d7c42884..63eb75b672e 100644 --- a/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java +++ b/server/src/com/cloud/network/router/VpcVirtualNetworkApplianceManagerImpl.java @@ -27,6 +27,7 @@ import java.util.TreeSet; import javax.ejb.Local; import javax.inject.Inject; +import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -39,10 +40,12 @@ import com.cloud.agent.api.SetupGuestNetworkAnswer; import com.cloud.agent.api.SetupGuestNetworkCommand; import com.cloud.agent.api.routing.IpAssocVpcCommand; import com.cloud.agent.api.routing.NetworkElementCommand; +import com.cloud.agent.api.routing.RemoteAccessVpnCfgCommand; import com.cloud.agent.api.routing.SetNetworkACLCommand; import com.cloud.agent.api.routing.SetSourceNatCommand; import com.cloud.agent.api.routing.SetStaticRouteCommand; import com.cloud.agent.api.routing.Site2SiteVpnCfgCommand; +import com.cloud.agent.api.routing.VpnUsersCfgCommand; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.NetworkACLTO; import com.cloud.agent.api.to.NicTO; @@ -52,10 +55,12 @@ import com.cloud.dc.DataCenterVO; import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeployDestination; import com.cloud.deploy.DeploymentPlan; +import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientAddressCapacityException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InsufficientServerCapacityException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; import com.cloud.network.IpAddress; @@ -70,15 +75,18 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.network.PhysicalNetwork; import com.cloud.network.PhysicalNetworkServiceProvider; import com.cloud.network.PublicIpAddress; +import com.cloud.network.RemoteAccessVpn; import com.cloud.network.Site2SiteVpnConnection; import com.cloud.network.VirtualRouterProvider; -import com.cloud.network.VirtualRouterProvider.VirtualRouterProviderType; +import com.cloud.network.VirtualRouterProvider.Type; import com.cloud.network.VpcVirtualNetworkApplianceService; +import com.cloud.network.VpnUser; import com.cloud.network.addr.PublicIp; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; import com.cloud.network.dao.PhysicalNetworkDao; +import com.cloud.network.dao.RemoteAccessVpnVO; import com.cloud.network.dao.Site2SiteCustomerGatewayVO; import com.cloud.network.dao.Site2SiteVpnConnectionDao; import com.cloud.network.dao.Site2SiteVpnGatewayDao; @@ -161,6 +169,12 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian @Inject EntityManager _entityMgr; + @Override + public boolean configure(final String name, final Map params) throws ConfigurationException { + _itMgr.registerGuru(VirtualMachine.Type.DomainRouter, this); + return super.configure(name, params); + } + @Override public List deployVirtualRouterInVpc(Vpc vpc, DeployDestination dest, Account owner, Map params) throws InsufficientCapacityException, @@ -203,13 +217,13 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian for (PhysicalNetwork pNtwk : pNtwks) { PhysicalNetworkServiceProvider provider = _physicalProviderDao.findByServiceProvider(pNtwk.getId(), - VirtualRouterProviderType.VPCVirtualRouter.toString()); + Type.VPCVirtualRouter.toString()); if (provider == null) { throw new CloudRuntimeException("Cannot find service provider " + - VirtualRouterProviderType.VPCVirtualRouter.toString() + " in physical network " + pNtwk.getId()); + Type.VPCVirtualRouter.toString() + " in physical network " + pNtwk.getId()); } vpcVrProvider = _vrProviderDao.findByNspIdAndType(provider.getId(), - VirtualRouterProviderType.VPCVirtualRouter); + Type.VPCVirtualRouter); if (vpcVrProvider != null) { break; } @@ -839,7 +853,13 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian createStaticRouteCommands(staticRouteProfiles, router, cmds); } - //5) REPROGRAM GUEST NETWORK + //5) RE-APPLY ALL REMOTE ACCESS VPNs + RemoteAccessVpnVO vpn = _vpnDao.findByAccountAndVpc(router.getAccountId(), router.getVpcId()); + if (vpn != null) { + createApplyVpnCommands(true, vpn, router, cmds); + } + + //6) REPROGRAM GUEST NETWORK boolean reprogramGuestNtwks = true; if (profile.getParameter(Param.ReProgramGuestNetworks) != null && (Boolean) profile.getParameter(Param.ReProgramGuestNetworks) == false) { @@ -1342,4 +1362,100 @@ public class VpcVirtualNetworkApplianceManagerImpl extends VirtualNetworkApplian return _routerDao.listByVpcId(vpcId); } + @Override + public String[] applyVpnUsers(RemoteAccessVpn vpn, List users, VirtualRouter router) throws ResourceUnavailableException { + Vpc vpc = _vpcDao.findById(vpn.getVpcId()); + + if (router.getState() != State.Running) { + s_logger.warn("Failed to add/remove Remote Access VPN users: router not in running state"); + throw new ResourceUnavailableException("Failed to add/remove Remote Access VPN users: router not in running state: " + + router.getState(), DataCenter.class, vpc.getZoneId()); + } + + Commands cmds = new Commands(Command.OnError.Continue); + + createApplyVpnUsersCommand(users, router, cmds); + + // Currently we receive just one answer from the agent. In the future we have to parse individual answers and set + // results accordingly + boolean agentResult = sendCommandsToRouter(router, cmds); + + String[] result = new String[users.size()]; + for (int i = 0; i < result.length; i++) { + if (agentResult) { + result[i] = null; + } else { + result[i] = String.valueOf(agentResult); + } + } + + return result; + } + + protected String getVpnCidr(RemoteAccessVpn vpn) + { + if (vpn.getVpcId() == null) { + return super.getVpnCidr(vpn); + } + Vpc vpc = _vpcDao.findById(vpn.getVpcId()); + return vpc.getCidr(); + } + + @Override + public boolean startRemoteAccessVpn(RemoteAccessVpn vpn, VirtualRouter router) throws ResourceUnavailableException { + if (router.getState() != State.Running) { + s_logger.warn("Unable to apply remote access VPN configuration, virtual router is not in the right state " + router.getState()); + throw new ResourceUnavailableException("Unable to apply remote access VPN configuration," + + " virtual router is not in the right state", DataCenter.class, router.getDataCenterId()); + } + + Commands cmds = new Commands(Command.OnError.Stop); + createApplyVpnCommands(true, vpn, router, cmds); + + try { + _agentMgr.send(router.getHostId(), cmds); + } catch (OperationTimedoutException e) { + s_logger.debug("Failed to start remote access VPN: ", e); + throw new AgentUnavailableException("Unable to send commands to virtual router ", router.getHostId(), e); + } + Answer answer = cmds.getAnswer("users"); + if (!answer.getResult()) { + s_logger.error("Unable to start vpn: unable add users to vpn in zone " + router.getDataCenterId() + + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + + " due to " + answer.getDetails()); + throw new ResourceUnavailableException("Unable to start vpn: Unable to add users to vpn in zone " + + router.getDataCenterId() + " for account " + vpn.getAccountId() + " on domR: " + + router.getInstanceName() + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); + } + answer = cmds.getAnswer("startVpn"); + if (!answer.getResult()) { + s_logger.error("Unable to start vpn in zone " + router.getDataCenterId() + " for account " + + vpn.getAccountId() + " on domR: " + router.getInstanceName() + " due to " + + answer.getDetails()); + throw new ResourceUnavailableException("Unable to start vpn in zone " + router.getDataCenterId() + + " for account " + vpn.getAccountId() + " on domR: " + router.getInstanceName() + + " due to " + answer.getDetails(), DataCenter.class, router.getDataCenterId()); + } + + return true; + } + + @Override + public boolean stopRemoteAccessVpn(RemoteAccessVpn vpn, VirtualRouter router) throws ResourceUnavailableException { + boolean result = true; + + if (router.getState() == State.Running) { + Commands cmds = new Commands(Command.OnError.Continue); + createApplyVpnCommands(false, vpn, router, cmds); + result = result && sendCommandsToRouter(router, cmds); + } else if (router.getState() == State.Stopped) { + s_logger.debug("Router " + router + " is in Stopped state, not sending deleteRemoteAccessVpn command to it"); + } else { + s_logger.warn("Failed to delete remote access VPN: domR " + router + " is not in right state " + router.getState()); + throw new ResourceUnavailableException("Failed to delete remote access VPN: domR is not in right state " + + router.getState(), DataCenter.class, router.getDataCenterId()); + } + + return true; + } } diff --git a/server/src/com/cloud/network/rules/RulesManagerImpl.java b/server/src/com/cloud/network/rules/RulesManagerImpl.java index 6e326b0f652..2ea10211cce 100755 --- a/server/src/com/cloud/network/rules/RulesManagerImpl.java +++ b/server/src/com/cloud/network/rules/RulesManagerImpl.java @@ -26,7 +26,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.user.firewall.ListPortForwardingRulesCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -60,7 +59,7 @@ import com.cloud.network.vpc.VpcManager; import com.cloud.network.vpc.VpcService; import com.cloud.offering.NetworkOffering; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -76,8 +75,12 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; @@ -197,12 +200,12 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override @DB @ActionEvent(eventType = EventTypes.EVENT_NET_RULE_ADD, eventDescription = "creating forwarding rule", create = true) - public PortForwardingRule createPortForwardingRule(PortForwardingRule rule, Long vmId, Ip vmIp, boolean openFirewall) + public PortForwardingRule createPortForwardingRule(final PortForwardingRule rule, final Long vmId, Ip vmIp, final boolean openFirewall) throws NetworkRuleConflictException { CallContext ctx = CallContext.current(); - Account caller = ctx.getCallingAccount(); + final Account caller = ctx.getCallingAccount(); - Long ipAddrId = rule.getSourceIpAddressId(); + final Long ipAddrId = rule.getSourceIpAddressId(); IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId); @@ -213,7 +216,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules throw new InvalidParameterValueException("Unable to create port forwarding rule; ip id=" + ipAddrId + " has static nat enabled"); } - Long networkId = rule.getNetworkId(); + final Long networkId = rule.getNetworkId(); Network network = _networkModel.getNetwork(networkId); //associate ip address to network (if needed) boolean performedIpAssoc = false; @@ -245,8 +248,8 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.PortForwarding, FirewallRuleType.User, networkId, rule.getTrafficType()); - Long accountId = ipAddress.getAllocatedToAccountId(); - Long domainId = ipAddress.getAllocatedInDomainId(); + final Long accountId = ipAddress.getAllocatedToAccountId(); + final Long domainId = ipAddress.getAllocatedInDomainId(); // start port can't be bigger than end port if (rule.getDestinationPortStart() > rule.getDestinationPortEnd()) { @@ -308,46 +311,48 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - PortForwardingRuleVO newRule = new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), - rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIp, rule.getDestinationPortStart(), - rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId); - newRule = _portForwardingDao.persist(newRule); - - // create firewallRule for 0.0.0.0/0 cidr - if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), - rule.getProtocol(), null, null, newRule.getId(), networkId); - } - - try { - _firewallMgr.detectRulesConflict(newRule); - if (!_firewallDao.setStateToAdd(newRule)) { - throw new CloudRuntimeException("Unable to update the state to add for " + newRule); - } - CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), - ipAddress.getDataCenterId(), newRule.getId(), null, PortForwardingRule.class.getName(), - newRule.getUuid()); - txn.commit(); - return newRule; - } catch (Exception e) { - if (newRule != null) { - txn.start(); - // no need to apply the rule as it wasn't programmed on the backend yet - _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); - removePFRule(newRule); - txn.commit(); + final Ip dstIpFinal = dstIp; + final IPAddressVO ipAddressFinal = ipAddress; + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public PortForwardingRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + PortForwardingRuleVO newRule = new PortForwardingRuleVO(rule.getXid(), rule.getSourceIpAddressId(), + rule.getSourcePortStart(), rule.getSourcePortEnd(), dstIpFinal, rule.getDestinationPortStart(), + rule.getDestinationPortEnd(), rule.getProtocol().toLowerCase(), networkId, accountId, domainId, vmId); + newRule = _portForwardingDao.persist(newRule); + + // create firewallRule for 0.0.0.0/0 cidr + if (openFirewall) { + _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), + rule.getProtocol(), null, null, newRule.getId(), networkId); + } + + try { + _firewallMgr.detectRulesConflict(newRule); + if (!_firewallDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), + ipAddressFinal.getDataCenterId(), newRule.getId(), null, PortForwardingRule.class.getName(), + newRule.getUuid()); + return newRule; + } catch (Exception e) { + if (newRule != null) { + // no need to apply the rule as it wasn't programmed on the backend yet + _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); + removePFRule(newRule); + } + + if (e instanceof NetworkRuleConflictException) { + throw (NetworkRuleConflictException) e; + } + + throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e); + } } + }); - if (e instanceof NetworkRuleConflictException) { - throw (NetworkRuleConflictException) e; - } - - throw new CloudRuntimeException("Unable to add rule for the ip id=" + ipAddrId, e); - } } finally { // release ip address if ipassoc was perfored if (performedIpAssoc) { @@ -361,10 +366,10 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override @DB @ActionEvent(eventType = EventTypes.EVENT_NET_RULE_ADD, eventDescription = "creating static nat rule", create = true) - public StaticNatRule createStaticNatRule(StaticNatRule rule, boolean openFirewall) throws NetworkRuleConflictException { - Account caller = CallContext.current().getCallingAccount(); + public StaticNatRule createStaticNatRule(final StaticNatRule rule, final boolean openFirewall) throws NetworkRuleConflictException { + final Account caller = CallContext.current().getCallingAccount(); - Long ipAddrId = rule.getSourceIpAddressId(); + final Long ipAddrId = rule.getSourceIpAddressId(); IPAddressVO ipAddress = _ipAddressDao.findById(ipAddrId); @@ -377,9 +382,9 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules _firewallMgr.validateFirewallRule(caller, ipAddress, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), Purpose.StaticNat, FirewallRuleType.User,null, rule.getTrafficType() ); - Long networkId = ipAddress.getAssociatedWithNetworkId(); - Long accountId = ipAddress.getAllocatedToAccountId(); - Long domainId = ipAddress.getAllocatedInDomainId(); + final Long networkId = ipAddress.getAssociatedWithNetworkId(); + final Long accountId = ipAddress.getAllocatedToAccountId(); + final Long domainId = ipAddress.getAllocatedInDomainId(); _networkModel.checkIpForService(ipAddress, Service.StaticNat, null); @@ -390,48 +395,48 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules } //String dstIp = _networkModel.getIpInNetwork(ipAddress.getAssociatedWithVmId(), networkId); - String dstIp = ipAddress.getVmIp(); - Transaction txn = Transaction.currentTxn(); - txn.start(); + final String dstIp = ipAddress.getVmIp(); + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public StaticNatRule doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { - FirewallRuleVO newRule = new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(), - networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null); + FirewallRuleVO newRule = new FirewallRuleVO(rule.getXid(), rule.getSourceIpAddressId(), rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol().toLowerCase(), + networkId, accountId, domainId, rule.getPurpose(), null, null, null, null, null); - newRule = _firewallDao.persist(newRule); + newRule = _firewallDao.persist(newRule); - // create firewallRule for 0.0.0.0/0 cidr - if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null, newRule.getId(), networkId); - } + // create firewallRule for 0.0.0.0/0 cidr + if (openFirewall) { + _firewallMgr.createRuleForAllCidrs(ipAddrId, caller, rule.getSourcePortStart(), rule.getSourcePortEnd(), rule.getProtocol(), null, null, newRule.getId(), networkId); + } - try { - _firewallMgr.detectRulesConflict(newRule); - if (!_firewallDao.setStateToAdd(newRule)) { - throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + try { + _firewallMgr.detectRulesConflict(newRule); + if (!_firewallDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), 0, newRule.getId(), + null, FirewallRule.class.getName(), newRule.getUuid()); + + StaticNatRule staticNatRule = new StaticNatRuleImpl(newRule, dstIp); + + return staticNatRule; + } catch (Exception e) { + if (newRule != null) { + // no need to apply the rule as it wasn't programmed on the backend yet + _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); + _firewallMgr.removeRule(newRule); + } + + if (e instanceof NetworkRuleConflictException) { + throw (NetworkRuleConflictException) e; + } + throw new CloudRuntimeException("Unable to add static nat rule for the ip id=" + newRule.getSourceIpAddressId(), e); + } } - CallContext.current().setEventDetails("Rule Id: " + newRule.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_NET_RULE_ADD, newRule.getAccountId(), 0, newRule.getId(), - null, FirewallRule.class.getName(), newRule.getUuid()); + }); - txn.commit(); - StaticNatRule staticNatRule = new StaticNatRuleImpl(newRule, dstIp); - - return staticNatRule; - } catch (Exception e) { - - if (newRule != null) { - txn.start(); - // no need to apply the rule as it wasn't programmed on the backend yet - _firewallMgr.revokeRelatedFirewallRule(newRule.getId(), false); - _firewallMgr.removeRule(newRule); - txn.commit(); - } - - if (e instanceof NetworkRuleConflictException) { - throw (NetworkRuleConflictException) e; - } - throw new CloudRuntimeException("Unable to add static nat rule for the ip id=" + newRule.getSourceIpAddressId(), e); - } } @Override @@ -826,7 +831,7 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.PortForwardingRule.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.PortForwardingRule.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -1146,23 +1151,25 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules @Override @DB - public FirewallRuleVO[] reservePorts(IpAddress ip, String protocol, FirewallRule.Purpose purpose, - boolean openFirewall, Account caller, int... ports) throws NetworkRuleConflictException { - FirewallRuleVO[] rules = new FirewallRuleVO[ports.length]; + public FirewallRuleVO[] reservePorts(final IpAddress ip, final String protocol, final FirewallRule.Purpose purpose, + final boolean openFirewall, final Account caller, final int... ports) throws NetworkRuleConflictException { + final FirewallRuleVO[] rules = new FirewallRuleVO[ports.length]; - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (int i = 0; i < ports.length; i++) { - - rules[i] = new FirewallRuleVO(null, ip.getId(), ports[i], protocol, ip.getAssociatedWithNetworkId(), ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), purpose, null, null, null, null); - rules[i] = _firewallDao.persist(rules[i]); - - if (openFirewall) { - _firewallMgr.createRuleForAllCidrs(ip.getId(), caller, ports[i], ports[i], protocol, null, null, - rules[i].getId(), ip.getAssociatedWithNetworkId()); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws NetworkRuleConflictException { + for (int i = 0; i < ports.length; i++) { + + rules[i] = new FirewallRuleVO(null, ip.getId(), ports[i], protocol, ip.getAssociatedWithNetworkId(), ip.getAllocatedToAccountId(), ip.getAllocatedInDomainId(), purpose, null, null, null, null); + rules[i] = _firewallDao.persist(rules[i]); + + if (openFirewall) { + _firewallMgr.createRuleForAllCidrs(ip.getId(), caller, ports[i], ports[i], protocol, null, null, + rules[i].getId(), ip.getAssociatedWithNetworkId()); + } + } } - } - txn.commit(); + }); boolean success = false; try { @@ -1173,12 +1180,14 @@ public class RulesManagerImpl extends ManagerBase implements RulesManager, Rules return rules; } finally { if (!success) { - txn.start(); - - for (FirewallRuleVO newRule : rules) { - _firewallMgr.removeRule(newRule); - } - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (FirewallRuleVO newRule : rules) { + _firewallMgr.removeRule(newRule); + } + } + }); } } } diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java index 18ee0f1124f..c7b6e1eb3f7 100755 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.ConcurrentModificationException; import java.util.Date; import java.util.HashMap; import java.util.HashSet; @@ -34,13 +35,13 @@ import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import javax.ejb.ConcurrentAccessException; import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.commons.codec.digest.DigestUtils; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupEgressCmd; import org.apache.cloudstack.api.command.user.securitygroup.AuthorizeSecurityGroupIngressCmd; import org.apache.cloudstack.api.command.user.securitygroup.CreateSecurityGroupCmd; @@ -50,6 +51,7 @@ import org.apache.cloudstack.api.command.user.securitygroup.RevokeSecurityGroupI import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.utils.identity.ManagementServerNode; import com.cloud.agent.AgentManager; @@ -96,6 +98,10 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; import com.cloud.utils.net.NetUtils; @@ -187,16 +193,11 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } } - public class WorkerThread implements Runnable { + public class WorkerThread extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { - Transaction txn = Transaction.open("SG Work"); - try { - work(); - } finally { - txn.close("SG Work"); - } + work(); } catch (Throwable th) { try { s_logger.error("Problem with SG work", th); @@ -204,24 +205,15 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } } } - - WorkerThread() { - - } } - public class CleanupThread implements Runnable { + public class CleanupThread extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { - Transaction txn = Transaction.open("SG Cleanup"); - try { - cleanupFinishedWork(); - cleanupUnfinishedWork(); - //processScheduledWork(); - } finally { - txn.close("SG Cleanup"); - } + cleanupFinishedWork(); + cleanupUnfinishedWork(); + //processScheduledWork(); } catch (Throwable th) { try { s_logger.error("Problem with SG Cleanup", th); @@ -229,10 +221,6 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } } } - - CleanupThread() { - - } } public static class PortAndProto implements Comparable { @@ -400,7 +388,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } @DB - public void scheduleRulesetUpdateToHosts(List affectedVms, boolean updateSeqno, Long delayMs) { + public void scheduleRulesetUpdateToHosts(final List affectedVms, final boolean updateSeqno, Long delayMs) { if (affectedVms.size() == 0) { return; } @@ -422,39 +410,43 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro if (s_logger.isTraceEnabled()) { s_logger.trace("Security Group Mgr: acquired global work lock"); } - Transaction txn = Transaction.currentTxn(); + try { - txn.start(); - for (Long vmId : affectedVms) { - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: scheduling ruleset update for " + vmId); - } - VmRulesetLogVO log = null; - SecurityGroupWorkVO work = null; - - log = _rulesetLogDao.findByVmId(vmId); - if (log == null) { - log = new VmRulesetLogVO(vmId); - log = _rulesetLogDao.persist(log); - } - - if (log != null && updateSeqno) { - log.incrLogsequence(); - _rulesetLogDao.update(log.getId(), log); - } - work = _workDao.findByVmIdStep(vmId, Step.Scheduled); - if (work == null) { - work = new SecurityGroupWorkVO(vmId, null, null, SecurityGroupWork.Step.Scheduled, null); - work = _workDao.persist(work); - if (s_logger.isTraceEnabled()) { - s_logger.trace("Security Group Mgr: created new work item for " + vmId + "; id = " + work.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (Long vmId : affectedVms) { + if (s_logger.isTraceEnabled()) { + s_logger.trace("Security Group Mgr: scheduling ruleset update for " + vmId); + } + VmRulesetLogVO log = null; + SecurityGroupWorkVO work = null; + + log = _rulesetLogDao.findByVmId(vmId); + if (log == null) { + log = new VmRulesetLogVO(vmId); + log = _rulesetLogDao.persist(log); + } + + if (log != null && updateSeqno) { + log.incrLogsequence(); + _rulesetLogDao.update(log.getId(), log); + } + work = _workDao.findByVmIdStep(vmId, Step.Scheduled); + if (work == null) { + work = new SecurityGroupWorkVO(vmId, null, null, SecurityGroupWork.Step.Scheduled, null); + work = _workDao.persist(work); + if (s_logger.isTraceEnabled()) { + s_logger.trace("Security Group Mgr: created new work item for " + vmId + "; id = " + work.getId()); + } + } + + work.setLogsequenceNumber(log.getLogsequence()); + _workDao.update(work.getId(), work); } } + }); - work.setLogsequenceNumber(log.getLogsequence()); - _workDao.update(work.getId(), work); - } - txn.commit(); for (Long vmId : affectedVms) { _executorPool.schedule(new WorkerThread(), delayMs, TimeUnit.MILLISECONDS); } @@ -595,7 +587,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro return authorizeSecurityGroupRule(securityGroupId,protocol,startPort,endPort,icmpType,icmpCode,cidrList,groupList,SecurityRuleType.IngressRule); } - private List authorizeSecurityGroupRule(Long securityGroupId,String protocol,Integer startPort,Integer endPort,Integer icmpType,Integer icmpCode,List cidrList,Map groupList,SecurityRuleType ruleType) { + private List authorizeSecurityGroupRule(final Long securityGroupId, String protocol,Integer startPort,Integer endPort,Integer icmpType,Integer icmpCode,final List cidrList,Map groupList, final SecurityRuleType ruleType) { Integer startPortOrType = null; Integer endPortOrCode = null; @@ -713,66 +705,71 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } } - final Transaction txn = Transaction.currentTxn(); final Set authorizedGroups2 = new TreeSet(new SecurityGroupVOComparator()); authorizedGroups2.addAll(authorizedGroups); // Ensure we don't re-lock the same row - txn.start(); - // Prevents other threads/management servers from creating duplicate security rules - securityGroup = _securityGroupDao.acquireInLockTable(securityGroupId); - if (securityGroup == null) { - s_logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); - return null; - } - List newRules = new ArrayList(); - try { - for (final SecurityGroupVO ngVO : authorizedGroups2) { - final Long ngId = ngVO.getId(); - // Don't delete the referenced group from under us - if (ngVO.getId() != securityGroup.getId()) { - final SecurityGroupVO tmpGrp = _securityGroupDao.lockRow(ngId, false); - if (tmpGrp == null) { - s_logger.warn("Failed to acquire lock on security group: " + ngId); - txn.rollback(); - return null; + final Integer startPortOrTypeFinal = startPortOrType; + final Integer endPortOrCodeFinal = endPortOrCode; + final String protocolFinal = protocol; + return Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + // Prevents other threads/management servers from creating duplicate security rules + SecurityGroup securityGroup = _securityGroupDao.acquireInLockTable(securityGroupId); + if (securityGroup == null) { + s_logger.warn("Could not acquire lock on network security group: id= " + securityGroupId); + return null; + } + List newRules = new ArrayList(); + try { + for (final SecurityGroupVO ngVO : authorizedGroups2) { + final Long ngId = ngVO.getId(); + // Don't delete the referenced group from under us + if (ngVO.getId() != securityGroup.getId()) { + final SecurityGroupVO tmpGrp = _securityGroupDao.lockRow(ngId, false); + if (tmpGrp == null) { + s_logger.warn("Failed to acquire lock on security group: " + ngId); + throw new ConcurrentAccessException("Failed to acquire lock on security group: " + ngId); + } + } + SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndAllowedGroupId(securityGroup.getId(), protocolFinal, startPortOrTypeFinal, endPortOrCodeFinal, ngVO.getId()); + if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { + continue; // rule already exists. + } + securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrTypeFinal, endPortOrCodeFinal, protocolFinal, ngVO.getId()); + securityGroupRule = _securityGroupRuleDao.persist(securityGroupRule); + newRules.add(securityGroupRule); + } + if (cidrList != null) { + for (String cidr : cidrList) { + SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndCidr(securityGroup.getId(), protocolFinal, startPortOrTypeFinal, endPortOrCodeFinal, cidr); + if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { + continue; + } + securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrTypeFinal, endPortOrCodeFinal, protocolFinal, cidr); + securityGroupRule = _securityGroupRuleDao.persist(securityGroupRule); + newRules.add(securityGroupRule); + } + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); + } + final ArrayList affectedVms = new ArrayList(); + affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(securityGroup.getId())); + scheduleRulesetUpdateToHosts(affectedVms, true, null); + return newRules; + } catch (Exception e) { + s_logger.warn("Exception caught when adding security group rules ", e); + throw new CloudRuntimeException("Exception caught when adding security group rules", e); + } finally { + if (securityGroup != null) { + _securityGroupDao.releaseFromLockTable(securityGroup.getId()); } } - SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndAllowedGroupId(securityGroup.getId(), protocol, startPortOrType, endPortOrCode, ngVO.getId()); - if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { - continue; // rule already exists. - } - securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrType, endPortOrCode, protocol, ngVO.getId()); - securityGroupRule = _securityGroupRuleDao.persist(securityGroupRule); - newRules.add(securityGroupRule); } - if (cidrList != null) { - for (String cidr : cidrList) { - SecurityGroupRuleVO securityGroupRule = _securityGroupRuleDao.findByProtoPortsAndCidr(securityGroup.getId(), protocol, startPortOrType, endPortOrCode, cidr); - if ((securityGroupRule != null) && (securityGroupRule.getRuleType() == ruleType)) { - continue; - } - securityGroupRule = new SecurityGroupRuleVO(ruleType, securityGroup.getId(), startPortOrType, endPortOrCode, protocol, cidr); - securityGroupRule = _securityGroupRuleDao.persist(securityGroupRule); - newRules.add(securityGroupRule); - } - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Added " + newRules.size() + " rules to security group " + securityGroup.getName()); - } - txn.commit(); - final ArrayList affectedVms = new ArrayList(); - affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(securityGroup.getId())); - scheduleRulesetUpdateToHosts(affectedVms, true, null); - return newRules; - } catch (Exception e) { - s_logger.warn("Exception caught when adding security group rules ", e); - throw new CloudRuntimeException("Exception caught when adding security group rules", e); - } finally { - if (securityGroup != null) { - _securityGroupDao.releaseFromLockTable(securityGroup.getId()); - } - } + }); + } @Override @@ -792,11 +789,11 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro return revokeSecurityGroupRule(id, SecurityRuleType.IngressRule); } - private boolean revokeSecurityGroupRule(Long id, SecurityRuleType type) { + private boolean revokeSecurityGroupRule(final Long id, SecurityRuleType type) { // input validation Account caller = CallContext.current().getCallingAccount(); - SecurityGroupRuleVO rule = _securityGroupRuleDao.findById(id); + final SecurityGroupRuleVO rule = _securityGroupRuleDao.findById(id); if (rule == null) { s_logger.debug("Unable to find security rule with id " + id); throw new InvalidParameterValueException("Unable to find security rule with id " + id); @@ -812,36 +809,37 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro SecurityGroup securityGroup = _securityGroupDao.findById(rule.getSecurityGroupId()); _accountMgr.checkAccess(caller, null, true, securityGroup); - SecurityGroupVO groupHandle = null; - final Transaction txn = Transaction.currentTxn(); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + SecurityGroupVO groupHandle = null; - try { - txn.start(); - // acquire lock on parent group (preserving this logic) - groupHandle = _securityGroupDao.acquireInLockTable(rule.getSecurityGroupId()); - if (groupHandle == null) { - s_logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); - return false; + try { + // acquire lock on parent group (preserving this logic) + groupHandle = _securityGroupDao.acquireInLockTable(rule.getSecurityGroupId()); + if (groupHandle == null) { + s_logger.warn("Could not acquire lock on security group id: " + rule.getSecurityGroupId()); + return false; + } + + _securityGroupRuleDao.remove(id); + s_logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); + + final ArrayList affectedVms = new ArrayList(); + affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(groupHandle.getId())); + scheduleRulesetUpdateToHosts(affectedVms, true, null); + + return true; + } catch (Exception e) { + s_logger.warn("Exception caught when deleting security rules ", e); + throw new CloudRuntimeException("Exception caught when deleting security rules", e); + } finally { + if (groupHandle != null) { + _securityGroupDao.releaseFromLockTable(groupHandle.getId()); + } + } } - - _securityGroupRuleDao.remove(id); - s_logger.debug("revokeSecurityGroupRule succeeded for security rule id: " + id); - - final ArrayList affectedVms = new ArrayList(); - affectedVms.addAll(_securityGroupVMMapDao.listVmIdsBySecurityGroup(groupHandle.getId())); - scheduleRulesetUpdateToHosts(affectedVms, true, null); - - return true; - } catch (Exception e) { - s_logger.warn("Exception caught when deleting security rules ", e); - throw new CloudRuntimeException("Exception caught when deleting security rules", e); - } finally { - if (groupHandle != null) { - _securityGroupDao.releaseFromLockTable(groupHandle.getId()); - } - txn.commit(); - } - + }); } @Override @@ -939,7 +937,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro } return; } - Long userVmId = work.getInstanceId(); + final Long userVmId = work.getInstanceId(); if (work.getStep() == Step.Done) { if (s_logger.isDebugEnabled()) { s_logger.debug("Security Group work: found a job in done state, rescheduling for vm: " + userVmId); @@ -949,68 +947,73 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro scheduleRulesetUpdateToHosts(affectedVms, false, _timeBetweenCleanups*1000l); return; } - UserVm vm = null; - Long seqnum = null; s_logger.debug("Working on " + work); - final Transaction txn = Transaction.currentTxn(); - txn.start(); - boolean locked = false; - try { - vm = _userVMDao.acquireInLockTable(work.getInstanceId()); - if (vm == null) { - vm = _userVMDao.findById(work.getInstanceId()); - if (vm == null) { - s_logger.info("VM " + work.getInstanceId() + " is removed"); - locked = true; - return; - } - s_logger.warn("Unable to acquire lock on vm id=" + userVmId); - return; - } - locked = true; - Long agentId = null; - VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId); - if (log == null) { - s_logger.warn("Cannot find log record for vm id=" + userVmId); - return; - } - seqnum = log.getLogsequence(); - if (vm != null && vm.getState() == State.Running) { - Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); - Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); - agentId = vm.getHostId(); - if (agentId != null) { - // get nic secondary ip address - String privateIp = vm.getPrivateIpAddress(); - NicVO nic = _nicDao.findByIp4AddressAndVmId(privateIp, vm.getId()); - List nicSecIps = null; - if (nic != null) { - if (nic.getSecondaryIp()) { - //get secondary ips of the vm - long networkId = nic.getNetworkId(); - nicSecIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nic.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + UserVm vm = null; + Long seqnum = null; + + boolean locked = false; + try { + vm = _userVMDao.acquireInLockTable(work.getInstanceId()); + if (vm == null) { + vm = _userVMDao.findById(work.getInstanceId()); + if (vm == null) { + s_logger.info("VM " + work.getInstanceId() + " is removed"); + locked = true; + return; + } + s_logger.warn("Unable to acquire lock on vm id=" + userVmId); + return; + } + locked = true; + Long agentId = null; + VmRulesetLogVO log = _rulesetLogDao.findByVmId(userVmId); + if (log == null) { + s_logger.warn("Cannot find log record for vm id=" + userVmId); + return; + } + seqnum = log.getLogsequence(); + + if (vm != null && vm.getState() == State.Running) { + Map> ingressRules = generateRulesForVM(userVmId, SecurityRuleType.IngressRule); + Map> egressRules = generateRulesForVM(userVmId, SecurityRuleType.EgressRule); + agentId = vm.getHostId(); + if (agentId != null) { + // get nic secondary ip address + String privateIp = vm.getPrivateIpAddress(); + NicVO nic = _nicDao.findByIp4AddressAndVmId(privateIp, vm.getId()); + List nicSecIps = null; + if (nic != null) { + if (nic.getSecondaryIp()) { + //get secondary ips of the vm + long networkId = nic.getNetworkId(); + nicSecIps = _nicSecIpDao.getSecondaryIpAddressesForNic(nic.getId()); + } + } + SecurityGroupRulesCmd cmd = generateRulesetCmd( vm.getInstanceName(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), generateRulesetSignature(ingressRules, egressRules), seqnum, + ingressRules, egressRules, nicSecIps); + Commands cmds = new Commands(cmd); + try { + _agentMgr.send(agentId, cmds, _answerListener); + } catch (AgentUnavailableException e) { + s_logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); + _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done); + } + } } - SecurityGroupRulesCmd cmd = generateRulesetCmd( vm.getInstanceName(), vm.getPrivateIpAddress(), vm.getPrivateMacAddress(), vm.getId(), generateRulesetSignature(ingressRules, egressRules), seqnum, - ingressRules, egressRules, nicSecIps); - Commands cmds = new Commands(cmd); - try { - _agentMgr.send(agentId, cmds, _answerListener); - } catch (AgentUnavailableException e) { - s_logger.debug("Unable to send ingress rules updates for vm: " + userVmId + "(agentid=" + agentId + ")"); - _workDao.updateStep(work.getInstanceId(), seqnum, Step.Done); + } finally { + if (locked) { + _userVMDao.releaseFromLockTable(userVmId); + _workDao.updateStep(work.getId(), Step.Done); } - } } - } finally { - if (locked) { - _userVMDao.releaseFromLockTable(userVmId); - _workDao.updateStep(work.getId(), Step.Done); - } - txn.commit(); - } + }); + } @Override @@ -1021,41 +1024,40 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro return false; } if (groups != null && !groups.isEmpty()) { - - final Transaction txn = Transaction.currentTxn(); - txn.start(); - UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created. - List sgs = new ArrayList(); - for (Long sgId : groups) { - sgs.add(_securityGroupDao.findById(sgId)); - } - final Set uniqueGroups = new TreeSet(new SecurityGroupVOComparator()); - uniqueGroups.addAll(sgs); - if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); - } - try { - for (SecurityGroupVO securityGroup : uniqueGroups) { - // don't let the group be deleted from under us. - SecurityGroupVO ngrpLock = _securityGroupDao.lockRow(securityGroup.getId(), false); - if (ngrpLock == null) { - s_logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); - txn.rollback(); - return false; + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created. + List sgs = new ArrayList(); + for (Long sgId : groups) { + sgs.add(_securityGroupDao.findById(sgId)); } - if (_securityGroupVMMapDao.findByVmIdGroupId(userVmId, securityGroup.getId()) == null) { - SecurityGroupVMMapVO groupVmMapVO = new SecurityGroupVMMapVO(securityGroup.getId(), userVmId); - _securityGroupVMMapDao.persist(groupVmMapVO); + final Set uniqueGroups = new TreeSet(new SecurityGroupVOComparator()); + uniqueGroups.addAll(sgs); + if (userVm == null) { + s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + } + try { + for (SecurityGroupVO securityGroup : uniqueGroups) { + // don't let the group be deleted from under us. + SecurityGroupVO ngrpLock = _securityGroupDao.lockRow(securityGroup.getId(), false); + if (ngrpLock == null) { + s_logger.warn("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); + throw new ConcurrentModificationException("Failed to acquire lock on network group id=" + securityGroup.getId() + " name=" + securityGroup.getName()); + } + if (_securityGroupVMMapDao.findByVmIdGroupId(userVmId, securityGroup.getId()) == null) { + SecurityGroupVMMapVO groupVmMapVO = new SecurityGroupVMMapVO(securityGroup.getId(), userVmId); + _securityGroupVMMapDao.persist(groupVmMapVO); + } + } + return true; + } finally { + if (userVm != null) { + _userVMDao.releaseFromLockTable(userVmId); + } } } - txn.commit(); - return true; - } finally { - if (userVm != null) { - _userVMDao.releaseFromLockTable(userVmId); - } - } - + }); } return false; @@ -1063,22 +1065,24 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Override @DB - public void removeInstanceFromGroups(long userVmId) { + public void removeInstanceFromGroups(final long userVmId) { if (_securityGroupVMMapDao.countSGForVm(userVmId) < 1) { s_logger.trace("No security groups found for vm id=" + userVmId + ", returning"); return; } - final Transaction txn = Transaction.currentTxn(); - txn.start(); - UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created in - // addInstance - if (userVm == null) { - s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); - } - int n = _securityGroupVMMapDao.deleteVM(userVmId); - s_logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); - _userVMDao.releaseFromLockTable(userVmId); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + UserVm userVm = _userVMDao.acquireInLockTable(userVmId); // ensures that duplicate entries are not created in + // addInstance + if (userVm == null) { + s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); + } + int n = _securityGroupVMMapDao.deleteVM(userVmId); + s_logger.info("Disassociated " + n + " network groups " + " from uservm " + userVmId); + _userVMDao.releaseFromLockTable(userVmId); + } + }); s_logger.debug("Security group mappings are removed successfully for vm id=" + userVmId); } @@ -1086,7 +1090,7 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro @Override @ActionEvent(eventType = EventTypes.EVENT_SECURITY_GROUP_DELETE, eventDescription = "deleting security group") public boolean deleteSecurityGroup(DeleteSecurityGroupCmd cmd) throws ResourceInUseException { - Long groupId = cmd.getId(); + final Long groupId = cmd.getId(); Account caller = CallContext.current().getCallingAccount(); SecurityGroupVO group = _securityGroupDao.findById(groupId); @@ -1097,32 +1101,34 @@ public class SecurityGroupManagerImpl extends ManagerBase implements SecurityGro // check permissions _accountMgr.checkAccess(caller, null, true, group); - final Transaction txn = Transaction.currentTxn(); - txn.start(); + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public Boolean doInTransaction(TransactionStatus status) throws ResourceInUseException { + SecurityGroupVO group = _securityGroupDao.lockRow(groupId, true); + if (group == null) { + throw new InvalidParameterValueException("Unable to find security group by id " + groupId); + } + + if (group.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { + throw new InvalidParameterValueException("The network group default is reserved"); + } + + List allowingRules = _securityGroupRuleDao.listByAllowedSecurityGroupId(groupId); + List securityGroupVmMap = _securityGroupVMMapDao.listBySecurityGroup(groupId); + if (!allowingRules.isEmpty()) { + throw new ResourceInUseException("Cannot delete group when there are security rules that allow this group"); + } else if (!securityGroupVmMap.isEmpty()) { + throw new ResourceInUseException("Cannot delete group when it's in use by virtual machines"); + } + + _securityGroupDao.expunge(groupId); - group = _securityGroupDao.lockRow(groupId, true); - if (group == null) { - throw new InvalidParameterValueException("Unable to find security group by id " + groupId); - } + s_logger.debug("Deleted security group id=" + groupId); + + return true; + } + }); - if (group.getName().equalsIgnoreCase(SecurityGroupManager.DEFAULT_GROUP_NAME)) { - throw new InvalidParameterValueException("The network group default is reserved"); - } - - List allowingRules = _securityGroupRuleDao.listByAllowedSecurityGroupId(groupId); - List securityGroupVmMap = _securityGroupVMMapDao.listBySecurityGroup(groupId); - if (!allowingRules.isEmpty()) { - throw new ResourceInUseException("Cannot delete group when there are security rules that allow this group"); - } else if (!securityGroupVmMap.isEmpty()) { - throw new ResourceInUseException("Cannot delete group when it's in use by virtual machines"); - } - - _securityGroupDao.expunge(groupId); - txn.commit(); - - s_logger.debug("Deleted security group id=" + groupId); - - return true; } diff --git a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java index a42881ec905..2fee7f312c8 100644 --- a/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java +++ b/server/src/com/cloud/network/security/SecurityGroupManagerImpl2.java @@ -25,8 +25,10 @@ import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; import javax.ejb.Local; +import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.managed.context.ManagedContext; import org.springframework.context.annotation.Primary; import org.springframework.stereotype.Component; @@ -54,6 +56,9 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl{ SecurityGroupWorkTracker _workTracker; SecurityManagerMBeanImpl _mBean; + @Inject + ManagedContext _managedContext; + WorkerThread[] _workers; private Set _disabledVms = Collections.newSetFromMap(new ConcurrentHashMap()); private boolean _schedulerDisabled = false; @@ -68,7 +73,12 @@ public class SecurityGroupManagerImpl2 extends SecurityGroupManagerImpl{ public void run() { while (true) { try{ - work(); + _managedContext.runWithContext(new Runnable() { + @Override + public void run() { + work(); + } + }); } catch (final Throwable th) { s_logger.error("SG Work: Caught this throwable, ", th); } diff --git a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java index 9923db56461..1ea5dd02d4f 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLManagerImpl.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.inject.Inject; import org.apache.log4j.Logger; - import org.apache.cloudstack.context.CallContext; import com.cloud.configuration.ConfigurationManager; @@ -48,6 +47,8 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; @@ -67,7 +68,6 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana NetworkACLDao _networkACLDao; @Inject NetworkACLItemDao _networkACLItemDao; - @Inject List _networkAclElements; @Inject NetworkModel _networkModel; @@ -214,30 +214,35 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana @Override @DB @ActionEvent(eventType = EventTypes.EVENT_NETWORK_ACL_ITEM_CREATE, eventDescription = "creating network ACL Item", create = true) - public NetworkACLItem createNetworkACLItem(Integer portStart, Integer portEnd, String protocol, List sourceCidrList, - Integer icmpCode, Integer icmpType, NetworkACLItem.TrafficType trafficType, Long aclId, - String action, Integer number) { - NetworkACLItem.Action ruleAction = NetworkACLItem.Action.Allow; - if("deny".equalsIgnoreCase(action)){ - ruleAction = NetworkACLItem.Action.Deny; - } + public NetworkACLItem createNetworkACLItem(final Integer portStart, final Integer portEnd, final String protocol, final List sourceCidrList, + final Integer icmpCode, final Integer icmpType, final NetworkACLItem.TrafficType trafficType, final Long aclId, + final String action, Integer number) { // If number is null, set it to currentMax + 1 (for backward compatibility) if(number == null){ number = _networkACLItemDao.getMaxNumberByACL(aclId) + 1; } - Transaction txn = Transaction.currentTxn(); - txn.start(); + final Integer numberFinal = number; + NetworkACLItemVO newRule = Transaction.execute(new TransactionCallback() { + @Override + public NetworkACLItemVO doInTransaction(TransactionStatus status) { + NetworkACLItem.Action ruleAction = NetworkACLItem.Action.Allow; + if("deny".equalsIgnoreCase(action)){ + ruleAction = NetworkACLItem.Action.Deny; + } - NetworkACLItemVO newRule = new NetworkACLItemVO(portStart, portEnd, protocol.toLowerCase(), aclId, sourceCidrList, icmpCode, icmpType, trafficType, ruleAction, number); - newRule = _networkACLItemDao.persist(newRule); + NetworkACLItemVO newRule = new NetworkACLItemVO(portStart, portEnd, protocol.toLowerCase(), aclId, sourceCidrList, icmpCode, icmpType, trafficType, ruleAction, numberFinal); + newRule = _networkACLItemDao.persist(newRule); - if (!_networkACLItemDao.setStateToAdd(newRule)) { - throw new CloudRuntimeException("Unable to update the state to add for " + newRule); - } - CallContext.current().setEventDetails("ACL Item Id: " + newRule.getId()); + if (!_networkACLItemDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + CallContext.current().setEventDetails("ACL Item Id: " + newRule.getId()); + + return newRule; + } + }); - txn.commit(); return getNetworkACLItem(newRule.getId()); } @@ -468,4 +473,13 @@ public class NetworkACLManagerImpl extends ManagerBase implements NetworkACLMana return handled; } + public List getNetworkAclElements() { + return _networkAclElements; + } + + @Inject + public void setNetworkAclElements(List networkAclElements) { + this._networkAclElements = networkAclElements; + } + } diff --git a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java index 50dd8e30741..d12e57701cc 100644 --- a/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java +++ b/server/src/com/cloud/network/vpc/NetworkACLServiceImpl.java @@ -43,7 +43,7 @@ import com.cloud.network.dao.NetworkVO; import com.cloud.network.vpc.dao.NetworkACLDao; import com.cloud.network.vpc.dao.VpcGatewayDao; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -484,7 +484,7 @@ public class NetworkACLServiceImpl extends ManagerBase implements NetworkACLServ if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.NetworkACL.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.NetworkACL.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); diff --git a/server/src/com/cloud/network/vpc/VpcManagerImpl.java b/server/src/com/cloud/network/vpc/VpcManagerImpl.java index 6d3223ece02..0b9d306143b 100644 --- a/server/src/com/cloud/network/vpc/VpcManagerImpl.java +++ b/server/src/com/cloud/network/vpc/VpcManagerImpl.java @@ -31,8 +31,6 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.log4j.Logger; - import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.api.command.user.vpc.ListPrivateGatewaysCmd; import org.apache.cloudstack.api.command.user.vpc.ListStaticRoutesCmd; @@ -40,6 +38,8 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; +import org.apache.log4j.Logger; import com.cloud.configuration.Config; import com.cloud.configuration.ConfigurationManager; @@ -99,7 +99,7 @@ import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.org.Grouping; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.ConfigurationServer; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -118,9 +118,15 @@ import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.exception.ExceptionUtil; import com.cloud.utils.net.NetUtils; import com.cloud.vm.ReservationContext; import com.cloud.vm.ReservationContextImpl; @@ -206,9 +212,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB public boolean configure(String name, Map params) throws ConfigurationException { //configure default vpc offering - Transaction txn = Transaction.currentTxn(); - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { if (_vpcOffDao.findByUniqueName(VpcOffering.defaultVPCOfferingName) == null) { s_logger.debug("Creating default VPC offering " + VpcOffering.defaultVPCOfferingName); @@ -248,8 +254,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis createVpcOffering(VpcOffering.defaultVPCNSOfferingName, VpcOffering.defaultVPCNSOfferingName, svcProviderMap, false, State.Enabled); } - - txn.commit(); + } + }); Map configs = _configDao.getConfiguration(params); String value = configs.get(Config.VpcCleanupInterval.key()); @@ -375,10 +381,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB - protected VpcOffering createVpcOffering(String name, String displayText, Map> svcProviderMap, boolean isDefault, State state) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + protected VpcOffering createVpcOffering(final String name, final String displayText, final Map> svcProviderMap, final boolean isDefault, final State state) { + return Transaction.execute(new TransactionCallback() { + @Override + public VpcOffering doInTransaction(TransactionStatus status) { // create vpc offering object VpcOfferingVO offering = new VpcOfferingVO(name, displayText, isDefault, null); @@ -402,10 +409,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } } - txn.commit(); return offering; } + }); + } @Override public Vpc getActiveVpc(long vpcId) { @@ -635,8 +643,8 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB - protected Vpc createVpc(long zoneId, long vpcOffId, Account vpcOwner, String vpcName, String displayText, String cidr, - String networkDomain) { + protected Vpc createVpc(final long zoneId, final long vpcOffId, final Account vpcOwner, final String vpcName, final String displayText, final String cidr, + final String networkDomain) { //Validate CIDR if (!NetUtils.isValidCIDR(cidr)) { @@ -657,31 +665,26 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis + "and the hyphen ('-'); can't start or end with \"-\""); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public VpcVO doInTransaction(TransactionStatus status) { VpcVO vpc = new VpcVO (zoneId, vpcName, displayText, vpcOwner.getId(), vpcOwner.getDomainId(), vpcOffId, cidr, networkDomain); vpc = _vpcDao.persist(vpc, finalizeServicesAndProvidersForVpc(zoneId, vpcOffId)); _resourceLimitMgr.incrementResourceCount(vpcOwner.getId(), ResourceType.vpc); - txn.commit(); s_logger.debug("Created VPC " + vpc); return vpc; } + }); + } - private Map finalizeServicesAndProvidersForVpc(long zoneId, long offeringId) { - Map svcProviders = new HashMap(); - Map> providerSvcs = new HashMap>(); + private Map> finalizeServicesAndProvidersForVpc(long zoneId, long offeringId) { + Map> svcProviders = new HashMap>(); List servicesMap = _vpcOffSvcMapDao.listByVpcOffId(offeringId); for (VpcOfferingServiceMapVO serviceMap : servicesMap) { - if (svcProviders.containsKey(serviceMap.getService())) { - // FIXME - right now we pick up the first provider from the list, need to add more logic based on - // provider load, etc - continue; - } - String service = serviceMap.getService(); String provider = serviceMap.getProvider(); @@ -696,12 +699,14 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis " should be enabled in at least one physical network of the zone specified"); } - svcProviders.put(service, provider); - List l = providerSvcs.get(provider); - if (l == null) { - providerSvcs.put(provider, l = new ArrayList()); + List providers = null; + if (svcProviders.get(service) == null) { + providers = new ArrayList(); + } else { + providers = svcProviders.get(service); } - l.add(service); + providers.add(provider); + svcProviders.put(service, providers); } return svcProviders; @@ -727,7 +732,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override @DB - public boolean destroyVpc(Vpc vpc, Account caller, Long callerUserId) throws ConcurrentOperationException, ResourceUnavailableException { + public boolean destroyVpc(final Vpc vpc, Account caller, Long callerUserId) throws ConcurrentOperationException, ResourceUnavailableException { s_logger.debug("Destroying vpc " + vpc); //don't allow to delete vpc if it's in use by existing non system networks (system networks are networks of a private gateway of the VPC, @@ -740,16 +745,18 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis //mark VPC as inactive if (vpc.getState() != Vpc.State.Inactive) { s_logger.debug("Updating VPC " + vpc + " with state " + Vpc.State.Inactive + " as a part of vpc delete"); - VpcVO vpcVO = _vpcDao.findById(vpc.getId()); + final VpcVO vpcVO = _vpcDao.findById(vpc.getId()); vpcVO.setState(Vpc.State.Inactive); - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { _vpcDao.update(vpc.getId(), vpcVO); //decrement resource count _resourceLimitMgr.decrementResourceCount(vpc.getAccountId(), ResourceType.vpc); - txn.commit(); + } + }); } //shutdown VPC @@ -868,7 +875,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.Vpc.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.Vpc.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -1145,10 +1152,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } @DB - protected void validateNewVpcGuestNetwork(String cidr, String gateway, Account networkOwner, Vpc vpc, String networkDomain) { + protected void validateNewVpcGuestNetwork(final String cidr, final String gateway, final Account networkOwner, final Vpc vpc, final String networkDomain) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { Vpc locked = _vpcDao.acquireInLockTable(vpc.getId()); if (locked == null) { throw new CloudRuntimeException("Unable to acquire lock on " + vpc); @@ -1200,16 +1208,16 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (NetUtils.getCidrSubNet(cidr).equalsIgnoreCase(gateway)) { throw new InvalidParameterValueException("Invalid gateway specified. It should never be equal to the cidr subnet value"); } - - txn.commit(); } finally { s_logger.debug("Releasing lock for " + locked); _vpcDao.releaseFromLockTable(locked.getId()); } } + }); + } - protected List getVpcElements() { + public List getVpcElements() { if (vpcElements == null) { vpcElements = new ArrayList(); vpcElements.add((VpcProvider)_ntwkModel.getElementImplementingProvider(Provider.VPCVirtualRouter.getName())); @@ -1366,12 +1374,12 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override @DB @ActionEvent(eventType = EventTypes.EVENT_PRIVATE_GATEWAY_CREATE, eventDescription = "creating vpc private gateway", create=true) - public PrivateGateway createVpcPrivateGateway(long vpcId, Long physicalNetworkId, String broadcastUri, String ipAddress, - String gateway, String netmask, long gatewayOwnerId, Long networkOfferingId, Boolean isSourceNat, Long aclId) throws ResourceAllocationException, + public PrivateGateway createVpcPrivateGateway(final long vpcId, Long physicalNetworkId, final String broadcastUri, final String ipAddress, + final String gateway, final String netmask, final long gatewayOwnerId, final Long networkOfferingId, final Boolean isSourceNat, final Long aclId) throws ResourceAllocationException, ConcurrentOperationException, InsufficientCapacityException { //Validate parameters - Vpc vpc = getActiveVpc(vpcId); + final Vpc vpc = getActiveVpc(vpcId); if (vpc == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find Enabled VPC by id specified"); ex.addProxyObject(String.valueOf(vpcId), "VPC"); @@ -1392,10 +1400,15 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (physNet == null) { physNet = _entityMgr.findById(PhysicalNetwork.class,physicalNetworkId); } - Long dcId = physNet.getDataCenterId(); + final Long dcId = physNet.getDataCenterId(); - Transaction txn = Transaction.currentTxn(); - txn.start(); + final Long physicalNetworkIdFinal = physicalNetworkId; + final PhysicalNetwork physNetFinal = physNet; + VpcGatewayVO gatewayVO = null; + try { + gatewayVO = Transaction.execute(new TransactionCallbackWithException() { + @Override + public VpcGatewayVO doInTransaction(TransactionStatus status) throws ResourceAllocationException, ConcurrentOperationException, InsufficientCapacityException { s_logger.debug("Creating Private gateway for VPC " + vpc); //1) create private network unless it is existing and lswitch'd Network privateNtwk = null; @@ -1409,10 +1422,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (privateNtwk == null) { s_logger.info("creating new network for vpc " + vpc + " using broadcast uri: " + broadcastUri); String networkName = "vpc-" + vpc.getName() + "-privateNetwork"; - privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkId, + privateNtwk = _ntwkSvc.createPrivateNetwork(networkName, networkName, physicalNetworkIdFinal, broadcastUri, ipAddress, null, gateway, netmask, gatewayOwnerId, vpcId, isSourceNat, networkOfferingId); } else { // create the nic/ip as createPrivateNetwork doesn''t do that work for us now - DataCenterVO dc = _dcDao.lockRow(physNet.getDataCenterId(), true); + DataCenterVO dc = _dcDao.lockRow(physNetFinal.getDataCenterId(), true); //add entry to private_ip_address table PrivateIpVO privateIp = _privateIpDao.findByIpAndSourceNetworkId(privateNtwk.getId(), ipAddress); @@ -1451,7 +1464,15 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis s_logger.debug("Created vpc gateway entry " + gatewayVO); - txn.commit(); + return gatewayVO; + } + }); + } catch (Exception e) { + ExceptionUtil.rethrowRuntime(e); + ExceptionUtil.rethrow(e, InsufficientCapacityException.class); + ExceptionUtil.rethrow(e, ResourceAllocationException.class); + throw new IllegalStateException(e); + } return getVpcPrivateGateway(gatewayVO.getId()); } @@ -1501,14 +1522,15 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB public boolean deleteVpcPrivateGateway(long gatewayId) throws ConcurrentOperationException, ResourceUnavailableException { - Transaction txn = Transaction.currentTxn(); - txn.start(); - VpcGatewayVO gatewayVO = _vpcGatewayDao.acquireInLockTable(gatewayId); + final VpcGatewayVO gatewayVO = _vpcGatewayDao.acquireInLockTable(gatewayId); if (gatewayVO == null || gatewayVO.getType() != VpcGateway.Type.Private) { throw new ConcurrentOperationException("Unable to lock gateway " + gatewayId); } try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { //don't allow to remove gateway when there are static routes associated with it long routeCount = _staticRouteDao.countRoutesByGateway(gatewayVO.getId()); if (routeCount > 0) { @@ -1519,8 +1541,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis gatewayVO.setState(VpcGateway.State.Deleting); _vpcGatewayDao.update(gatewayVO.getId(), gatewayVO); s_logger.debug("Marked gateway " + gatewayVO + " with state " + VpcGateway.State.Deleting); + } + }); - txn.commit(); //1) delete the gateway on the backend PrivateGateway gateway = getVpcPrivateGateway(gatewayId); @@ -1544,9 +1567,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } @DB - protected boolean deletePrivateGatewayFromTheDB(PrivateGateway gateway) { + protected boolean deletePrivateGatewayFromTheDB(final PrivateGateway gateway) { //check if there are ips allocted in the network - long networkId = gateway.getNetworkId(); + final long networkId = gateway.getNetworkId(); boolean deleteNetwork = true; List privateIps = _privateIpDao.listByNetworkId(networkId); @@ -1555,16 +1578,19 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis deleteNetwork = false; } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + //TODO: Clean this up, its bad. There is a DB transaction wrapping calls to NetworkElements (destroyNetwork will + // call network elements). + final boolean deleteNetworkFinal = deleteNetwork; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { PrivateIpVO ip = _privateIpDao.findByIpAndVpcId(gateway.getVpcId(), gateway.getIp4Address()); if (ip != null) { _privateIpDao.remove(ip.getId()); s_logger.debug("Deleted private ip " + ip); } - if (deleteNetwork) { + if (deleteNetworkFinal) { User callerUser = _accountMgr.getActiveUser(CallContext.current().getCallingUserId()); Account owner = _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM); ReservationContext context = new ReservationContextImpl(null, null, callerUser, owner); @@ -1574,8 +1600,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis _vpcGatewayDao.remove(gateway.getId()); s_logger.debug("Deleted private gateway " + gateway); + } + }); - txn.commit(); return true; } @@ -1734,18 +1761,20 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } @DB - protected boolean revokeStaticRoutesForVpc(long vpcId, Account caller) throws ResourceUnavailableException { + protected boolean revokeStaticRoutesForVpc(long vpcId, final Account caller) throws ResourceUnavailableException { //get all static routes for the vpc - List routes = _staticRouteDao.listByVpcId(vpcId); + final List routes = _staticRouteDao.listByVpcId(vpcId); s_logger.debug("Found " + routes.size() + " to revoke for the vpc " + vpcId); if (!routes.isEmpty()) { //mark all of them as revoke - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { for (StaticRouteVO route : routes) { markStaticRouteForRevoke(route, caller); } - txn.commit(); + } + }); return applyStaticRoutes(vpcId); } @@ -1755,11 +1784,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @Override @DB @ActionEvent(eventType = EventTypes.EVENT_STATIC_ROUTE_CREATE, eventDescription = "creating static route", create=true) - public StaticRoute createStaticRoute(long gatewayId, String cidr) throws NetworkRuleConflictException { + public StaticRoute createStaticRoute(long gatewayId, final String cidr) throws NetworkRuleConflictException { Account caller = CallContext.current().getCallingAccount(); //parameters validation - VpcGateway gateway = _vpcGatewayDao.findById(gatewayId); + final VpcGateway gateway = _vpcGatewayDao.findById(gatewayId); if (gateway == null) { throw new InvalidParameterValueException("Invalid gateway id is given"); } @@ -1768,7 +1797,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis throw new InvalidParameterValueException("Gateway is not in the " + VpcGateway.State.Ready + " state: " + gateway.getState()); } - Vpc vpc = getActiveVpc(gateway.getVpcId()); + final Vpc vpc = getActiveVpc(gateway.getVpcId()); if (vpc == null) { throw new InvalidParameterValueException("Can't add static route to VPC that is being deleted"); } @@ -1794,9 +1823,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis throw new InvalidParameterValueException("The static gateway cidr overlaps with one of the blacklisted routes of the zone the VPC belongs to"); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public StaticRouteVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { StaticRouteVO newRoute = new StaticRouteVO(gateway.getId(), cidr, vpc.getId(), vpc.getAccountId(), vpc.getDomainId()); s_logger.debug("Adding static route " + newRoute); newRoute = _staticRouteDao.persist(newRoute); @@ -1808,10 +1837,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } CallContext.current().setEventDetails("Static route Id: " + newRoute.getId()); - txn.commit(); - return newRoute; } + }); + } protected boolean isCidrBlacklisted(String cidr, long zoneId) { String routesStr = NetworkOrchestrationService.GuestDomainSuffix.valueIn(zoneId); @@ -1889,7 +1918,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.StaticRoute.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.StaticRoute.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -1937,9 +1966,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis } } - protected class VpcCleanupTask implements Runnable { + protected class VpcCleanupTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { GlobalLock lock = GlobalLock.getInternLock("VpcCleanup"); if (lock == null) { @@ -1952,10 +1981,10 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return; } - Transaction txn = null; try { - txn = Transaction.open(Transaction.CLOUD_DB); - + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws Exception { // Cleanup inactive VPCs List inactiveVpcs = _vpcDao.listInactiveVpcs(); s_logger.info("Found " + inactiveVpcs.size() + " removed VPCs to cleanup"); @@ -1963,12 +1992,11 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis s_logger.debug("Cleaning up " + vpc); destroyVpc(vpc, _accountMgr.getAccount(Account.ACCOUNT_ID_SYSTEM), User.UID_SYSTEM); } + } + }); } catch (Exception e) { s_logger.error("Exception ", e); } finally { - if (txn != null) { - txn.close(); - } lock.unlock(); } } catch (Exception e) { @@ -1981,7 +2009,7 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis @DB @Override @ActionEvent(eventType = EventTypes.EVENT_NET_IP_ASSIGN, eventDescription = "associating Ip", async = true) - public IpAddress associateIPToVpc(long ipId, long vpcId) throws ResourceAllocationException, ResourceUnavailableException, + public IpAddress associateIPToVpc(final long ipId, final long vpcId) throws ResourceAllocationException, ResourceUnavailableException, InsufficientAddressCapacityException, ConcurrentOperationException { Account caller = CallContext.current().getCallingAccount(); Account owner = null; @@ -2010,17 +2038,20 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis s_logger.debug("Associating ip " + ipToAssoc + " to vpc " + vpc); - Transaction txn = Transaction.currentTxn(); - txn.start(); + final boolean isSourceNatFinal = isSourceNat; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { IPAddressVO ip = _ipAddressDao.findById(ipId); //update ip address with networkId ip.setVpcId(vpcId); - ip.setSourceNat(isSourceNat); + ip.setSourceNat(isSourceNatFinal); _ipAddressDao.update(ipId, ip); //mark ip as allocated _ipAddrMgr.markPublicIpAsAllocated(ip); - txn.commit(); + } + }); s_logger.debug("Successfully assigned ip " + ipToAssoc + " to vpc " + vpc); @@ -2183,4 +2214,9 @@ public class VpcManagerImpl extends ManagerBase implements VpcManager, VpcProvis return new ArrayList(providers.values()); } + + @Inject + public void setVpcElements(List vpcElements) { + this.vpcElements = vpcElements; + } } diff --git a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java index 6cd164dc3f1..504e5e88f45 100755 --- a/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/RemoteAccessVpnManagerImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.user.vpn.ListRemoteAccessVpnsCmd; import org.apache.cloudstack.api.command.user.vpn.ListVpnUsersCmd; import org.apache.cloudstack.context.CallContext; @@ -64,6 +63,8 @@ import com.cloud.network.rules.FirewallRule; import com.cloud.network.rules.FirewallRule.Purpose; import com.cloud.network.rules.FirewallRuleVO; import com.cloud.network.rules.RulesManager; +import com.cloud.network.vpc.Vpc; +import com.cloud.network.vpc.dao.VpcDao; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.server.ConfigurationServer; import com.cloud.user.Account; @@ -80,8 +81,12 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.net.NetUtils; @Local(value = RemoteAccessVpnService.class) @@ -106,7 +111,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Inject ConfigurationDao _configDao; @Inject List _vpnServiceProviders; @Inject ConfigurationServer _configServer; - + @Inject VpcDao _vpcDao; int _userLimit; int _pskLength; @@ -114,13 +119,15 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Override @DB - public RemoteAccessVpn createRemoteAccessVpn(long publicIpId, String ipRange, boolean openFirewall, long networkId) + public RemoteAccessVpn createRemoteAccessVpn(final long publicIpId, String ipRange, boolean openFirewall) throws NetworkRuleConflictException { CallContext ctx = CallContext.current(); - Account caller = ctx.getCallingAccount(); + final Account caller = ctx.getCallingAccount(); + + Long networkId = null; // make sure ip address exists - PublicIpAddress ipAddr = _networkMgr.getPublicIpAddress(publicIpId); + final PublicIpAddress ipAddr = _networkMgr.getPublicIpAddress(publicIpId); if (ipAddr == null) { throw new InvalidParameterValueException("Unable to create remote access vpn, invalid public IP address id" + publicIpId); } @@ -132,7 +139,26 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } IPAddressVO ipAddress = _ipAddressDao.findById(publicIpId); - _networkMgr.checkIpForService(ipAddress, Service.Vpn, null); + + networkId = ipAddress.getAssociatedWithNetworkId(); + if (networkId != null) { + _networkMgr.checkIpForService(ipAddress, Service.Vpn, null); + } + + final Long vpcId = ipAddress.getVpcId(); + /* IP Address used for VPC must be the source NAT IP of whole VPC */ + if (vpcId != null && ipAddress.isSourceNat()) { + assert networkId == null; + // No firewall setting for VPC, it would be open internally + openFirewall = false; + } + + final boolean openFirewallFinal = openFirewall; + + if (networkId == null && vpcId == null) { + throw new InvalidParameterValueException("Unable to create remote access vpn for the ipAddress: " + ipAddr.getAddress().addr() + + " as ip is not associated with any network or VPC"); + } RemoteAccessVpnVO vpnVO = _remoteAccessVpnDao.findByPublicIpAddress(publicIpId); @@ -144,26 +170,10 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc throw new InvalidParameterValueException("A Remote Access VPN already exists for this public Ip address"); } - // TODO: assumes one virtual network / domr per account per zone - vpnVO = _remoteAccessVpnDao.findByAccountAndNetwork(ipAddr.getAccountId(), networkId); - if (vpnVO != null) { - //if vpn is in Added state, return it to the api - if (vpnVO.getState() == RemoteAccessVpn.State.Added) { - return vpnVO; - } - throw new InvalidParameterValueException("A Remote Access VPN already exists for this account"); - } - - //Verify that vpn service is enabled for the network - Network network = _networkMgr.getNetwork(networkId); - if (!_networkMgr.areServicesSupportedInNetwork(network.getId(), Service.Vpn)) { - throw new InvalidParameterValueException("Vpn service is not supported in network id=" + ipAddr.getAssociatedWithNetworkId()); - } - if (ipRange == null) { ipRange = RemoteAccessVpnClientIpRange.valueIn(ipAddr.getAccountId()); } - String[] range = ipRange.split("-"); + final String[] range = ipRange.split("-"); if (range.length != 2) { throw new InvalidParameterValueException("Invalid ip range"); } @@ -174,7 +184,28 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc throw new InvalidParameterValueException("Invalid ip range " + ipRange); } - Pair cidr = NetUtils.getCidr(network.getCidr()); + Pair cidr = null; + + // TODO: assumes one virtual network / domr per account per zone + if (networkId != null) { + vpnVO = _remoteAccessVpnDao.findByAccountAndNetwork(ipAddr.getAccountId(), networkId); + if (vpnVO != null) { + //if vpn is in Added state, return it to the api + if (vpnVO.getState() == RemoteAccessVpn.State.Added) { + return vpnVO; + } + throw new InvalidParameterValueException("A Remote Access VPN already exists for this account"); + } + //Verify that vpn service is enabled for the network + Network network = _networkMgr.getNetwork(networkId); + if (!_networkMgr.areServicesSupportedInNetwork(network.getId(), Service.Vpn)) { + throw new InvalidParameterValueException("Vpn service is not supported in network id=" + ipAddr.getAssociatedWithNetworkId()); + } + cidr = NetUtils.getCidr(network.getCidr()); + } else { // Don't need to check VPC because there is only one IP(source NAT IP) available for VPN + Vpc vpc = _vpcDao.findById(vpcId); + cidr = NetUtils.getCidr(vpc.getCidr()); + } // FIXME: This check won't work for the case where the guest ip range // changes depending on the vlan allocated. @@ -187,19 +218,20 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc // TODO: check overlap with private and public ip ranges in datacenter long startIp = NetUtils.ip2Long(range[0]); - String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1]; - String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength); - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - _rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewall, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT, NetUtils.VPN_NATT_PORT); - vpnVO = new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), - publicIpId, range[0], newIpRange, sharedSecret); - RemoteAccessVpn vpn = _remoteAccessVpnDao.persist(vpnVO); - - txn.commit(); - return vpn; + final String newIpRange = NetUtils.long2Ip(++startIp) + "-" + range[1]; + final String sharedSecret = PasswordGenerator.generatePresharedKey(_pskLength); + + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public RemoteAccessVpn doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + if (vpcId == null) { + _rulesMgr.reservePorts(ipAddr, NetUtils.UDP_PROTO, Purpose.Vpn, openFirewallFinal, caller, NetUtils.VPN_PORT, NetUtils.VPN_L2TP_PORT, NetUtils.VPN_NATT_PORT); + } + RemoteAccessVpnVO vpnVO = new RemoteAccessVpnVO(ipAddr.getAccountId(), ipAddr.getDomainId(), ipAddr.getAssociatedWithNetworkId(), + publicIpId, vpcId, range[0], newIpRange, sharedSecret); + return _remoteAccessVpnDao.persist(vpnVO); + } + }); } private void validateRemoteAccessVpnConfiguration() throws ConfigurationException { @@ -230,7 +262,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Override @DB public void destroyRemoteAccessVpnForIp(long ipId, Account caller) throws ResourceUnavailableException { - RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId); + final RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipId); if (vpn == null) { s_logger.debug("there are no Remote access vpns for public ip address id=" + ipId); return; @@ -238,16 +270,13 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc _accountMgr.checkAccess(caller, null, true, vpn); - Network network = _networkMgr.getNetwork(vpn.getNetworkId()); - vpn.setState(RemoteAccessVpn.State.Removed); _remoteAccessVpnDao.update(vpn.getId(), vpn); - boolean success = false; try { for (RemoteAccessVPNServiceProvider element : _vpnServiceProviders) { - if (element.stopVpn(network, vpn)) { + if (element.stopVpn(vpn)) { success = true; break; } @@ -255,28 +284,28 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } finally { if (success) { //Cleanup corresponding ports - List vpnFwRules = _rulesDao.listByIpAndPurpose(ipId, Purpose.Vpn); - Transaction txn = Transaction.currentTxn(); - + final List vpnFwRules = _rulesDao.listByIpAndPurpose(ipId, Purpose.Vpn); + boolean applyFirewall = false; - List fwRules = new ArrayList(); + final List fwRules = new ArrayList(); //if related firewall rule is created for the first vpn port, it would be created for the 2 other ports as well, so need to cleanup the backend - if (_rulesDao.findByRelatedId(vpnFwRules.get(0).getId()) != null) { + if (vpnFwRules.size() != 0 && _rulesDao.findByRelatedId(vpnFwRules.get(0).getId()) != null) { applyFirewall = true; } if (applyFirewall) { - txn.start(); - - for (FirewallRule vpnFwRule : vpnFwRules) { - //don't apply on the backend yet; send all 3 rules in a banch - _firewallMgr.revokeRelatedFirewallRule(vpnFwRule.getId(), false); - fwRules.add(_rulesDao.findByRelatedId(vpnFwRule.getId())); - } - - s_logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); - - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (FirewallRule vpnFwRule : vpnFwRules) { + //don't apply on the backend yet; send all 3 rules in a banch + _firewallMgr.revokeRelatedFirewallRule(vpnFwRule.getId(), false); + fwRules.add(_rulesDao.findByRelatedId(vpnFwRule.getId())); + } + + s_logger.debug("Marked " + fwRules.size() + " firewall rules as Revoked as a part of disable remote access vpn"); + } + }); //now apply vpn rules on the backend s_logger.debug("Reapplying firewall rules for ip id=" + ipId + " as a part of disable remote access vpn"); @@ -285,26 +314,28 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (success) { try { - txn.start(); - _remoteAccessVpnDao.remove(vpn.getId()); - // Stop billing of VPN users when VPN is removed. VPN_User_ADD events will be generated when VPN is created again - List vpnUsers = _vpnUsersDao.listByAccount(vpn.getAccountId()); - for(VpnUserVO user : vpnUsers){ - // VPN_USER_REMOVE event is already generated for users in Revoke state - if(user.getState() != VpnUser.State.Revoke){ - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), - 0, user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _remoteAccessVpnDao.remove(vpn.getId()); + // Stop billing of VPN users when VPN is removed. VPN_User_ADD events will be generated when VPN is created again + List vpnUsers = _vpnUsersDao.listByAccount(vpn.getAccountId()); + for(VpnUserVO user : vpnUsers){ + // VPN_USER_REMOVE event is already generated for users in Revoke state + if(user.getState() != VpnUser.State.Revoke){ + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), + 0, user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); + } + } + if (vpnFwRules != null) { + for (FirewallRule vpnFwRule : vpnFwRules) { + _rulesDao.remove(vpnFwRule.getId()); + s_logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + vpnFwRule.getSourcePortStart() + " as a part of vpn cleanup"); + } + } } - } - if (vpnFwRules != null) { - for (FirewallRule vpnFwRule : vpnFwRules) { - _rulesDao.remove(vpnFwRule.getId()); - s_logger.debug("Successfully removed firewall rule with ip id=" + vpnFwRule.getSourceIpAddressId() + " and port " + vpnFwRule.getSourcePortStart() + " as a part of vpn cleanup"); - } - } - txn.commit(); + }); } catch (Exception ex) { - txn.rollback(); s_logger.warn("Unable to release the three vpn ports from the firewall rules", ex); } } @@ -314,8 +345,8 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc @Override @DB - public VpnUser addVpnUser(long vpnOwnerId, String username, String password) { - Account caller = CallContext.current().getCallingAccount(); + public VpnUser addVpnUser(final long vpnOwnerId, final String username, final String password) { + final Account caller = CallContext.current().getCallingAccount(); if (!username.matches("^[a-zA-Z0-9][a-zA-Z0-9@._-]{2,63}$")) { throw new InvalidParameterValueException( @@ -324,46 +355,54 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (!password.matches("^[a-zA-Z0-9][a-zA-Z0-9@#+=._-]{2,31}$")) { throw new InvalidParameterValueException("Password has to be 3-32 characters including alphabets, numbers and the set '@#+=.-_'"); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - Account owner = _accountDao.lockRow(vpnOwnerId, true); - if (owner == null) { - throw new InvalidParameterValueException("Unable to add vpn user: Another operation active"); - } - _accountMgr.checkAccess(caller, null, true, owner); - //don't allow duplicated user names for the same account - VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(owner.getId(), username); - if (vpnUser != null) { - throw new InvalidParameterValueException("VPN User with name " + username + " is already added for account " + owner); - } - - long userCount = _vpnUsersDao.getVpnUserCount(owner.getId()); - if (userCount >= _userLimit) { - throw new AccountLimitException("Cannot add more than " + _userLimit + " remote access vpn users"); - } - - VpnUser user = _vpnUsersDao.persist(new VpnUserVO(vpnOwnerId, owner.getDomainId(), username, password)); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_ADD, user.getAccountId(), 0, user.getId(), - user.getUsername(), user.getClass().getName(), user.getUuid()); - txn.commit(); - return user; + return Transaction.execute(new TransactionCallback() { + @Override + public VpnUser doInTransaction(TransactionStatus status) { + Account owner = _accountDao.lockRow(vpnOwnerId, true); + if (owner == null) { + throw new InvalidParameterValueException("Unable to add vpn user: Another operation active"); + } + _accountMgr.checkAccess(caller, null, true, owner); + + //don't allow duplicated user names for the same account + VpnUserVO vpnUser = _vpnUsersDao.findByAccountAndUsername(owner.getId(), username); + if (vpnUser != null) { + throw new InvalidParameterValueException("VPN User with name " + username + " is already added for account " + owner); + } + + long userCount = _vpnUsersDao.getVpnUserCount(owner.getId()); + if (userCount >= _userLimit) { + throw new AccountLimitException("Cannot add more than " + _userLimit + " remote access vpn users"); + } + + VpnUser user = _vpnUsersDao.persist(new VpnUserVO(vpnOwnerId, owner.getDomainId(), username, password)); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_ADD, user.getAccountId(), 0, user.getId(), + user.getUsername(), user.getClass().getName(), user.getUuid()); + + return user; + } + }); } @DB @Override public boolean removeVpnUser(long vpnOwnerId, String username, Account caller) { - VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); + final VpnUserVO user = _vpnUsersDao.findByAccountAndUsername(vpnOwnerId, username); if (user == null) { throw new InvalidParameterValueException("Could not find vpn user " + username); } _accountMgr.checkAccess(caller, null, true, user); - Transaction txn = Transaction.currentTxn(); - txn.start(); - user.setState(State.Revoke); - _vpnUsersDao.update(user.getId(), user); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), 0, user.getId(), - user.getUsername(), user.getClass().getName(), user.getUuid()); - txn.commit(); + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + user.setState(State.Revoke); + _vpnUsersDao.update(user.getId(), user); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), 0, user.getId(), + user.getUsername(), user.getClass().getName(), user.getUuid()); + } + }); + return true; } @@ -379,15 +418,17 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc public RemoteAccessVpnVO startRemoteAccessVpn(long ipAddressId, boolean openFirewall) throws ResourceUnavailableException { Account caller = CallContext.current().getCallingAccount(); - RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipAddressId); + final RemoteAccessVpnVO vpn = _remoteAccessVpnDao.findByPublicIpAddress(ipAddressId); if (vpn == null) { throw new InvalidParameterValueException("Unable to find your vpn: " + ipAddressId); } + + if (vpn.getVpcId() != null) { + openFirewall = false; + } _accountMgr.checkAccess(caller, null, true, vpn); - Network network = _networkMgr.getNetwork(vpn.getNetworkId()); - boolean started = false; try { boolean firewallOpened = true; @@ -397,7 +438,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc if (firewallOpened) { for (RemoteAccessVPNServiceProvider element : _vpnServiceProviders) { - if (element.startVpn(network, vpn)) { + if (element.startVpn(vpn)) { started = true; break; } @@ -407,20 +448,22 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc return vpn; } finally { if (started) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - vpn.setState(RemoteAccessVpn.State.Running); - _remoteAccessVpnDao.update(vpn.getId(), vpn); - - // Start billing of existing VPN users in ADD and Active state - List vpnUsers = _vpnUsersDao.listByAccount(vpn.getAccountId()); - for(VpnUserVO user : vpnUsers){ - if(user.getState() != VpnUser.State.Revoke){ - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_ADD, user.getAccountId(), 0, - user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + vpn.setState(RemoteAccessVpn.State.Running); + _remoteAccessVpnDao.update(vpn.getId(), vpn); + + // Start billing of existing VPN users in ADD and Active state + List vpnUsers = _vpnUsersDao.listByAccount(vpn.getAccountId()); + for(VpnUserVO user : vpnUsers){ + if(user.getState() != VpnUser.State.Revoke){ + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_ADD, user.getAccountId(), 0, + user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); + } + } } - } - txn.commit(); + }); } } } @@ -479,7 +522,7 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } for (int i = 0; i < finals.length; i++) { - VpnUserVO user = users.get(i); + final VpnUserVO user = users.get(i); if (finals[i]) { if (user.getState() == State.Add) { user.setState(State.Active); @@ -489,12 +532,14 @@ public class RemoteAccessVpnManagerImpl extends ManagerBase implements RemoteAcc } } else { if (user.getState() == State.Add && (user.getUsername()).equals(userName)) { - Transaction txn = Transaction.currentTxn(); - txn.start(); - _vpnUsersDao.remove(user.getId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), - 0, user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _vpnUsersDao.remove(user.getId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VPN_USER_REMOVE, user.getAccountId(), + 0, user.getId(), user.getUsername(), user.getClass().getName(), user.getUuid()); + } + }); } s_logger.warn("Failed to apply vpn for user " + user.getUsername() + ", accountId=" + user.getAccountId()); } diff --git a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java index d66fd7b4fce..f29a8c87054 100644 --- a/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java +++ b/server/src/com/cloud/network/vpn/Site2SiteVpnManagerImpl.java @@ -37,7 +37,6 @@ import org.apache.cloudstack.api.command.user.vpn.ResetVpnConnectionCmd; import org.apache.cloudstack.api.command.user.vpn.UpdateVpnCustomerGatewayCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -86,7 +85,7 @@ import com.cloud.vm.DomainRouterVO; public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpnManager { private static final Logger s_logger = Logger.getLogger(Site2SiteVpnManagerImpl.class); - @Inject List _s2sProviders; + List _s2sProviders; @Inject Site2SiteCustomerGatewayDao _customerGatewayDao; @Inject Site2SiteVpnGatewayDao _vpnGatewayDao; @Inject Site2SiteVpnConnectionDao _vpnConnectionDao; @@ -761,4 +760,13 @@ public class Site2SiteVpnManagerImpl extends ManagerBase implements Site2SiteVpn } } } + + public List getS2sProviders() { + return _s2sProviders; + } + + @Inject + public void setS2sProviders(List s2sProviders) { + this._s2sProviders = s2sProviders; + } } diff --git a/server/src/com/cloud/projects/ProjectManagerImpl.java b/server/src/com/cloud/projects/ProjectManagerImpl.java index b4987cbde0f..b97f1e8c610 100755 --- a/server/src/com/cloud/projects/ProjectManagerImpl.java +++ b/server/src/com/cloud/projects/ProjectManagerImpl.java @@ -42,7 +42,7 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; - +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -82,8 +82,11 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; - import com.sun.mail.smtp.SMTPMessage; import com.sun.mail.smtp.SMTPSSLTransport; import com.sun.mail.smtp.SMTPTransport; @@ -176,7 +179,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @Override @ActionEvent(eventType = EventTypes.EVENT_PROJECT_CREATE, eventDescription = "creating project", create=true) @DB - public Project createProject(String name, String displayText, String accountName, Long domainId) throws ResourceAllocationException{ + public Project createProject(final String name, final String displayText, String accountName, final Long domainId) throws ResourceAllocationException{ Account caller = CallContext.current().getCallingAccount(); Account owner = caller; @@ -202,31 +205,33 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //do resource limit check _resourceLimitMgr.checkResourceLimit(owner, ResourceType.project); - Transaction txn = Transaction.currentTxn(); - txn.start(); + final Account ownerFinal = owner; + return Transaction.execute(new TransactionCallback() { + @Override + public Project doInTransaction(TransactionStatus status) { //Create an account associated with the project StringBuilder acctNm = new StringBuilder("PrjAcct-"); - acctNm.append(name).append("-").append(owner.getDomainId()); + acctNm.append(name).append("-").append(ownerFinal.getDomainId()); Account projectAccount = _accountMgr.createAccount(acctNm.toString(), Account.ACCOUNT_TYPE_PROJECT, domainId, null, null, UUID.randomUUID().toString()); - Project project = _projectDao.persist(new ProjectVO(name, displayText, owner.getDomainId(), projectAccount.getId())); + Project project = _projectDao.persist(new ProjectVO(name, displayText, ownerFinal.getDomainId(), projectAccount.getId())); //assign owner to the project - assignAccountToProject(project, owner.getId(), ProjectAccount.Role.Admin); + assignAccountToProject(project, ownerFinal.getId(), ProjectAccount.Role.Admin); if (project != null) { CallContext.current().setEventDetails("Project id=" + project.getId()); } //Increment resource count - _resourceLimitMgr.incrementResourceCount(owner.getId(), ResourceType.project); - - txn.commit(); + _resourceLimitMgr.incrementResourceCount(ownerFinal.getId(), ResourceType.project); return project; } + }); + } @Override @@ -269,10 +274,11 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @DB @Override - public boolean deleteProject(Account caller, long callerUserId, ProjectVO project) { + public boolean deleteProject(Account caller, long callerUserId, final ProjectVO project) { //mark project as inactive first, so you can't add resources to it - Transaction txn = Transaction.currentTxn(); - txn.start(); + boolean updateResult = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { s_logger.debug("Marking project id=" + project.getId() + " with state " + State.Disabled + " as a part of project delete..."); project.setState(State.Disabled); boolean updateResult = _projectDao.update(project.getId(), project); @@ -282,7 +288,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { _resourceLimitMgr.decrementResourceCount(projectOwner.getId(), ResourceType.project); } - txn.commit(); + return updateResult; + } + }); + if (updateResult) { //pass system caller when clenaup projects account @@ -299,7 +308,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } @DB - private boolean cleanupProject(Project project, AccountVO caller, Long callerUserId) { + private boolean cleanupProject(final Project project, AccountVO caller, Long callerUserId) { boolean result=true; //Delete project's account AccountVO account = _accountDao.findById(project.getProjectAccountId()); @@ -309,10 +318,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { if (result) { //Unassign all users from the project - - Transaction txn = Transaction.currentTxn(); - txn.start(); - + result = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean result = true; s_logger.debug("Unassigning all accounts from project " + project + " as a part of project cleanup..."); List projectAccounts = _projectAccountDao.listByProjectId(project.getId()); for (ProjectAccount projectAccount : projectAccounts) { @@ -322,7 +331,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { s_logger.debug("Removing all invitations for the project " + project + " as a part of project cleanup..."); _projectInvitationDao.cleanupInvitations(project.getId()); - txn.commit(); + return result; + } + }); if (result) { s_logger.debug("Accounts are unassign successfully from project " + project + " as a part of project cleanup..."); } @@ -367,10 +378,11 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } @Override @DB - public boolean deleteAccountFromProject(long projectId, long accountId) { + public boolean deleteAccountFromProject(final long projectId, final long accountId) { + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { boolean success = true; - Transaction txn = Transaction.currentTxn(); - txn.start(); //remove account ProjectAccountVO projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); @@ -385,9 +397,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } } - txn.commit(); return success; } + }); + } @Override public Account getProjectOwner(long projectId) { @@ -443,11 +456,11 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_PROJECT_UPDATE, eventDescription = "updating project", async=true) - public Project updateProject(long projectId, String displayText, String newOwnerName) throws ResourceAllocationException{ + public Project updateProject(final long projectId, final String displayText, final String newOwnerName) throws ResourceAllocationException{ Account caller = CallContext.current().getCallingAccount(); //check that the project exists - ProjectVO project = getProject(projectId); + final ProjectVO project = getProject(projectId); if (project == null) { throw new InvalidParameterValueException("Unable to find the project id=" + projectId); @@ -456,8 +469,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { //verify permissions _accountMgr.checkAccess(caller,AccessType.ModifyProject, true, _accountMgr.getAccount(project.getProjectAccountId())); - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws ResourceAllocationException { if (displayText != null) { project.setDisplayText(displayText); _projectDao.update(projectId, project); @@ -495,8 +509,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { s_logger.trace("Future owner " + newOwnerName + "is already the owner of the project id=" + projectId); } } + } + }); - txn.commit(); return _projectDao.findById(projectId); @@ -650,9 +665,10 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } @DB - public boolean activeInviteExists(Project project, Long accountId, String email) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + public boolean activeInviteExists(final Project project, final Long accountId, final String email) { + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { //verify if the invitation was already generated ProjectInvitationVO invite = null; if (accountId != null) { @@ -679,9 +695,11 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { _projectInvitationDao.expunge(invite.getId()); } } - txn.commit(); + return false; } + }); + } public ProjectInvitation generateTokenBasedInvitation(Project project, String email, String token) { //verify if the invitation was already generated @@ -711,7 +729,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @Override @DB @ActionEvent(eventType = EventTypes.EVENT_PROJECT_INVITATION_UPDATE, eventDescription = "updating project invitation", async=true) - public boolean updateInvitation(long projectId, String accountName, String token, boolean accept) { + public boolean updateInvitation(final long projectId, String accountName, String token, final boolean accept) { Account caller = CallContext.current().getCallingAccount(); Long accountId = null; boolean result = true; @@ -722,7 +740,7 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } //check that the project exists - Project project = getProject(projectId); + final Project project = getProject(projectId); if (project == null) { throw new InvalidParameterValueException("Unable to find the project id=" + projectId); @@ -756,29 +774,37 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { expireInvitation(invite); throw new InvalidParameterValueException("Invitation is expired for account id=" + accountName + " to the project id=" + projectId); } else { - Transaction txn = Transaction.currentTxn(); - txn.start(); + + final ProjectInvitationVO inviteFinal = invite; + final Long accountIdFinal = accountId; + final String accountNameFinal = accountName; + result = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean result = true; ProjectInvitation.State newState = accept ? ProjectInvitation.State.Completed : ProjectInvitation.State.Declined; //update invitation - s_logger.debug("Marking invitation " + invite + " with state " + newState); - invite.setState(newState); - result = _projectInvitationDao.update(invite.getId(), invite); + s_logger.debug("Marking invitation " + inviteFinal + " with state " + newState); + inviteFinal.setState(newState); + result = _projectInvitationDao.update(inviteFinal.getId(), inviteFinal); if (result && accept) { //check if account already exists for the project (was added before invitation got accepted) - ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountId); + ProjectAccount projectAccount = _projectAccountDao.findByProjectIdAccountId(projectId, accountIdFinal); if (projectAccount != null) { - s_logger.debug("Account " + accountName + " already added to the project id=" + projectId); + s_logger.debug("Account " + accountNameFinal + " already added to the project id=" + projectId); } else { - assignAccountToProject(project, accountId, ProjectAccount.Role.Regular); + assignAccountToProject(project, accountIdFinal, ProjectAccount.Role.Regular); } } else { - s_logger.warn("Failed to update project invitation " + invite + " with state " + newState); + s_logger.warn("Failed to update project invitation " + inviteFinal + " with state " + newState); } - txn.commit(); + return result; + } + }); } } else { throw new InvalidParameterValueException("Unable to find invitation for account name=" + accountName + " to the project id=" + projectId); @@ -795,11 +821,11 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { @Override @ActionEvent(eventType = EventTypes.EVENT_PROJECT_ACTIVATE, eventDescription = "activating project") @DB - public Project activateProject(long projectId) { + public Project activateProject(final long projectId) { Account caller = CallContext.current().getCallingAccount(); //check that the project exists - ProjectVO project = getProject(projectId); + final ProjectVO project = getProject(projectId); if (project == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find project with specified id"); @@ -822,15 +848,15 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { throw new InvalidParameterValueException("Can't activate the project in " + currentState + " state"); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { project.setState(Project.State.Active); _projectDao.update(projectId, project); _accountMgr.enableAccount(project.getProjectAccountId()); - - txn.commit(); + } + }); return _projectDao.findById(projectId); } @@ -1003,9 +1029,9 @@ public class ProjectManagerImpl extends ManagerBase implements ProjectManager { } } - public class ExpiredInvitationsCleanup implements Runnable { + public class ExpiredInvitationsCleanup extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { TimeZone.getDefault(); List invitationsToExpire = _projectInvitationDao.listInvitationsToExpire(_invitationTimeOut); diff --git a/server/src/com/cloud/resource/ResourceManagerImpl.java b/server/src/com/cloud/resource/ResourceManagerImpl.java index b36e03ae62d..5682d6f292c 100755 --- a/server/src/com/cloud/resource/ResourceManagerImpl.java +++ b/server/src/com/cloud/resource/ResourceManagerImpl.java @@ -140,12 +140,14 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; @@ -784,10 +786,10 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } @DB - protected boolean doDeleteHost(long hostId, boolean isForced, boolean isForceDeleteStorage) { + protected boolean doDeleteHost(final long hostId, boolean isForced, final boolean isForceDeleteStorage) { User caller = _accountMgr.getActiveUser(CallContext.current().getCallingUserId()); // Verify that host exists - HostVO host = _hostDao.findById(hostId); + final HostVO host = _hostDao.findById(hostId); if (host == null) { throw new InvalidParameterValueException("Host with id " + hostId + " doesn't exist"); } @@ -800,7 +802,7 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, // Get storage pool host mappings here because they can be removed as a // part of handleDisconnect later // TODO: find out the bad boy, what's a buggy logic! - List pools = _storagePoolHostDao.listByHostIdIncludingRemoved(hostId); + final List pools = _storagePoolHostDao.listByHostIdIncludingRemoved(hostId); ResourceStateAdapter.DeleteHostAnswer answer = (ResourceStateAdapter.DeleteHostAnswer)dispatchToStateAdapters(ResourceStateAdapter.Event.DELETE_HOST, false, host, new Boolean(isForced), new Boolean(isForceDeleteStorage)); @@ -818,8 +820,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, return true; } - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { _dcDao.releasePrivateIpAddress(host.getPrivateIpAddress(), host.getDataCenterId(), null); _agentMgr.disconnectWithoutInvestigation(hostId, Status.Event.Remove); @@ -885,7 +888,9 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (dr != null) { _dedicatedDao.remove(dr.getId()); } - txn.commit(); + } + }); + return true; } @@ -905,16 +910,16 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override @DB - public boolean deleteCluster(DeleteClusterCmd cmd) { - Transaction txn = Transaction.currentTxn(); + public boolean deleteCluster(final DeleteClusterCmd cmd) { try { - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { ClusterVO cluster = _clusterDao.lockRow(cmd.getId(), true); if (cluster == null) { if (s_logger.isDebugEnabled()) { s_logger.debug("Cluster: " + cmd.getId() + " does not even exist. Delete call is ignored."); } - txn.rollback(); throw new CloudRuntimeException("Cluster: " + cmd.getId() + " does not exist"); } @@ -925,7 +930,6 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (s_logger.isDebugEnabled()) { s_logger.debug("Cluster: " + cmd.getId() + " still has hosts, can't remove"); } - txn.rollback(); throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has hosts"); } @@ -936,7 +940,6 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (s_logger.isDebugEnabled()) { s_logger.debug("Cluster: " + cmd.getId() + " still has storage pools, can't remove"); } - txn.rollback(); throw new CloudRuntimeException("Cluster: " + cmd.getId() + " cannot be removed. Cluster still has storage pools"); } @@ -955,13 +958,13 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } } - txn.commit(); + } + }); return true; } catch (CloudRuntimeException e) { throw e; } catch (Throwable t) { s_logger.error("Unable to delete cluster: " + cmd.getId(), t); - txn.rollback(); return false; } } @@ -1035,26 +1038,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, } if (doUpdate) { - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); _clusterDao.update(cluster.getId(), cluster); - txn.commit(); - } catch (Exception e) { - s_logger.error("Unable to update cluster due to " + e.getMessage(), e); - throw new CloudRuntimeException("Failed to update cluster. Please contact Cloud Support."); - } } if (newManagedState != null && !newManagedState.equals(oldManagedState)) { - Transaction txn = Transaction.currentTxn(); if (newManagedState.equals(Managed.ManagedState.Unmanaged)) { boolean success = false; try { - txn.start(); cluster.setManagedState(Managed.ManagedState.PrepareUnmanaged); _clusterDao.update(cluster.getId(), cluster); - txn.commit(); List hosts = listAllUpAndEnabledHosts(Host.Type.Routing, cluster.getId(), cluster.getPodId(), cluster.getDataCenterId()); for (HostVO host : hosts) { if (host.getType().equals(Host.Type.Routing) && !host.getStatus().equals(Status.Down) && !host.getStatus().equals(Status.Disconnected) && @@ -1093,16 +1085,12 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, throw new CloudRuntimeException("PrepareUnmanaged Failed due to some hosts are still in UP status after 5 Minutes, please try later "); } } finally { - txn.start(); cluster.setManagedState(success ? Managed.ManagedState.Unmanaged : Managed.ManagedState.PrepareUnmanagedError); _clusterDao.update(cluster.getId(), cluster); - txn.commit(); } } else if (newManagedState.equals(Managed.ManagedState.Managed)) { - txn.start(); cluster.setManagedState(Managed.ManagedState.Managed); _clusterDao.update(cluster.getId(), cluster); - txn.commit(); } } @@ -2256,27 +2244,27 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public List findDirectlyConnectedHosts() { /* The resource column is not null for direct connected resource */ - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getResource(), Op.NNULL); - sc.addAnd(sc.getEntity().getResourceState(), Op.NIN, ResourceState.Disabled); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getResource(), Op.NNULL); + sc.and(sc.entity().getResourceState(), Op.NIN, ResourceState.Disabled); return sc.list(); } @Override public List listAllUpAndEnabledHosts(Type type, Long clusterId, Long podId, long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); + QueryBuilder sc = QueryBuilder.create(HostVO.class); if (type != null) { - sc.addAnd(sc.getEntity().getType(), Op.EQ, type); + sc.and(sc.entity().getType(), Op.EQ,type); } if (clusterId != null) { - sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, clusterId); + sc.and(sc.entity().getClusterId(), Op.EQ,clusterId); } if (podId != null) { - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, podId); + sc.and(sc.entity().getPodId(), Op.EQ,podId); } - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - sc.addAnd(sc.getEntity().getResourceState(), Op.EQ, ResourceState.Enabled); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); + sc.and(sc.entity().getStatus(), Op.EQ,Status.Up); + sc.and(sc.entity().getResourceState(), Op.EQ,ResourceState.Enabled); return sc.list(); } @@ -2288,60 +2276,60 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public List findHostByGuid(long dcId, String guid) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); - sc.addAnd(sc.getEntity().getGuid(), Op.EQ, guid); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); + sc.and(sc.entity().getGuid(), Op.EQ,guid); return sc.list(); } @Override public List listAllHostsInCluster(long clusterId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, clusterId); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getClusterId(), Op.EQ,clusterId); return sc.list(); } @Override public List listHostsInClusterByStatus(long clusterId, Status status) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, clusterId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, status); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getClusterId(), Op.EQ,clusterId); + sc.and(sc.entity().getStatus(), Op.EQ,status); return sc.list(); } @Override public List listAllUpAndEnabledHostsInOneZoneByType(Type type, long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - sc.addAnd(sc.getEntity().getResourceState(), Op.EQ, ResourceState.Enabled); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ,type); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); + sc.and(sc.entity().getStatus(), Op.EQ,Status.Up); + sc.and(sc.entity().getResourceState(), Op.EQ,ResourceState.Enabled); return sc.list(); } @Override public List listAllNotInMaintenanceHostsInOneZone(Type type, Long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); + QueryBuilder sc = QueryBuilder.create(HostVO.class); if (dcId != null) { - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); } - sc.addAnd(sc.getEntity().getType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getResourceState(), Op.NIN, ResourceState.Maintenance, ResourceState.ErrorInMaintenance, ResourceState.PrepareForMaintenance, ResourceState.Error); + sc.and(sc.entity().getType(), Op.EQ,type); + sc.and(sc.entity().getResourceState(), Op.NIN, ResourceState.Maintenance, ResourceState.ErrorInMaintenance, ResourceState.PrepareForMaintenance, ResourceState.Error); return sc.list(); } @Override public List listAllHostsInOneZoneByType(Type type, long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ,type); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); return sc.list(); } @Override public List listAllHostsInAllZonesByType(Type type) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, type); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ,type); return sc.list(); } @@ -2370,15 +2358,15 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public HostVO findHostByGuid(String guid) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getGuid(), Op.EQ, guid); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getGuid(), Op.EQ,guid); return sc.find(); } @Override public HostVO findHostByName(String name) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getName(), Op.EQ, name); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getName(), Op.EQ,name); return sc.find(); } @@ -2449,21 +2437,22 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, @Override public List listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType type, long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getHypervisorType(), Op.EQ, type); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); - sc.addAnd(sc.getEntity().getStatus(), Op.EQ, Status.Up); - sc.addAnd(sc.getEntity().getResourceState(), Op.EQ, ResourceState.Enabled); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getHypervisorType(), Op.EQ,type); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); + sc.and(sc.entity().getStatus(), Op.EQ,Status.Up); + sc.and(sc.entity().getResourceState(), Op.EQ,ResourceState.Enabled); return sc.list(); } @Override @DB @ActionEvent(eventType = EventTypes.EVENT_HOST_RESERVATION_RELEASE, eventDescription = "releasing host reservation", async = true) - public boolean releaseHostReservation(Long hostId) { - Transaction txn = Transaction.currentTxn(); + public boolean releaseHostReservation(final Long hostId) { try { - txn.start(); + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { PlannerHostReservationVO reservationEntry = _plannerHostReserveDao.findByHostId(hostId); if (reservationEntry != null) { long id = reservationEntry.getId(); @@ -2472,24 +2461,33 @@ public class ResourceManagerImpl extends ManagerBase implements ResourceManager, if (s_logger.isDebugEnabled()) { s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } - txn.rollback(); return false; } hostReservation.setResourceUsage(null); _plannerHostReserveDao.persist(hostReservation); - txn.commit(); return true; } + if (s_logger.isDebugEnabled()) { s_logger.debug("Host reservation for host: " + hostId + " does not even exist. Release reservartion call is ignored."); } + return false; + } + }); } catch (CloudRuntimeException e) { throw e; } catch (Throwable t) { s_logger.error("Unable to release host reservation for host: " + hostId, t); - txn.rollback(); return false; } } + + @Override + public boolean start() { + // TODO Auto-generated method stub + return super.start(); + } + + } diff --git a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java index c0d3cb96441..55097ce6068 100755 --- a/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java +++ b/server/src/com/cloud/resourcelimit/ResourceLimitManagerImpl.java @@ -32,9 +32,9 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -90,6 +90,9 @@ import com.cloud.utils.db.GenericSearchBuilder; import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.db.SearchCriteria.Func; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; @@ -362,8 +365,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim @Override @DB - public void checkResourceLimit(Account account, ResourceType type, long... count) throws ResourceAllocationException { - long numResources = ((count.length == 0) ? 1 : count[0]); + public void checkResourceLimit(final Account account, final ResourceType type, long... count) throws ResourceAllocationException { + final long numResources = ((count.length == 0) ? 1 : count[0]); Project project = null; // Don't place any limits on system or root admin accounts @@ -375,9 +378,10 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim project = _projectDao.findByProjectAccountId(account.getId()); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - try { + final Project projectFinal = project; + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws ResourceAllocationException { // Lock all rows first so nobody else can read it Set rowIdsToLock = _resourceCountDao.listAllRowsToUpdate(account.getId(), ResourceOwnerType.Account, type); SearchCriteria sc = ResourceCountSearch.create(); @@ -390,8 +394,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim if (accountLimit != Resource.RESOURCE_UNLIMITED && potentialCount > accountLimit) { String message = "Maximum number of resources of type '" + type + "' for account name=" + account.getAccountName() + " in domain id=" + account.getDomainId() + " has been exceeded."; - if (project != null) { - message = "Maximum number of resources of type '" + type + "' for project name=" + project.getName() + if (projectFinal != null) { + message = "Maximum number of resources of type '" + type + "' for project name=" + projectFinal.getName() + " in domain id=" + account.getDomainId() + " has been exceeded."; } throw new ResourceAllocationException(message, type); @@ -399,8 +403,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim // check all domains in the account's domain hierarchy Long domainId = null; - if (project != null) { - domainId = project.getDomainId(); + if (projectFinal != null) { + domainId = projectFinal.getDomainId(); } else { domainId = account.getDomainId(); } @@ -419,9 +423,8 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } domainId = domain.getParent(); } - } finally { - txn.commit(); } + }); } @Override @@ -717,12 +720,12 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } @DB - protected boolean updateResourceCountForAccount(long accountId, ResourceType type, boolean increment, long delta) { - boolean result = true; + protected boolean updateResourceCountForAccount(final long accountId, final ResourceType type, final boolean increment, final long delta) { try { - Transaction txn = Transaction.currentTxn(); - txn.start(); - + return Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean result = true; Set rowsToLock = _resourceCountDao.listAllRowsToUpdate(accountId, ResourceOwnerType.Account, type); // Lock rows first @@ -737,22 +740,22 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim } } - txn.commit(); + return result; + } + }); } catch (Exception ex) { s_logger.error("Failed to update resource count for account id=" + accountId); - result = false; + return false; } - return result; } @DB - protected long recalculateDomainResourceCount(long domainId, ResourceType type) { + protected long recalculateDomainResourceCount(final long domainId, final ResourceType type) { + return Transaction.execute(new TransactionCallback() { + @Override + public Long doInTransaction(TransactionStatus status) { long newCount = 0; - Transaction txn = Transaction.currentTxn(); - txn.start(); - - try { // Lock all rows first so nobody else can read it Set rowIdsToLock = _resourceCountDao.listAllRowsToUpdate(domainId, ResourceOwnerType.Domain, type); SearchCriteria sc = ResourceCountSearch.create(); @@ -790,22 +793,19 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim s_logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + " for domain ID " + domainId + " is fixed during resource count recalculation."); } - } catch (Exception e) { - throw new CloudRuntimeException("Failed to update resource count for domain with Id " + domainId); - } finally { - txn.commit(); - } return newCount; } + }); + } @DB - protected long recalculateAccountResourceCount(long accountId, ResourceType type) { + protected long recalculateAccountResourceCount(final long accountId, final ResourceType type) { + Long newCount = Transaction.execute(new TransactionCallback() { + @Override + public Long doInTransaction(TransactionStatus status) { Long newCount = null; - Transaction txn = Transaction.currentTxn(); - txn.start(); - // this lock guards against the updates to user_vm, volume, snapshot, public _ip and template table // as any resource creation precedes with the resourceLimitExceeded check which needs this lock too SearchCriteria sc = ResourceCountSearch.create(); @@ -853,7 +853,10 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim s_logger.info("Discrepency in the resource count " + "(original count=" + oldCount + " correct count = " + newCount + ") for type " + type + " for account ID " + accountId + " is fixed during resource count recalculation."); } - txn.commit(); + + return newCount; + } + }); return (newCount == null) ? 0 : newCount.longValue(); } @@ -940,13 +943,13 @@ public class ResourceLimitManagerImpl extends ManagerBase implements ResourceLim return _resourceCountDao.getResourceCount(account.getId(), ResourceOwnerType.Account, type); } - protected class ResourceCountCheckTask implements Runnable { + protected class ResourceCountCheckTask extends ManagedContextRunnable { public ResourceCountCheckTask() { } @Override - public void run() { + protected void runInContext() { s_logger.info("Running resource count check periodic task"); List domains = _domainDao.findImmediateChildrenForParent(DomainVO.ROOT_DOMAIN); diff --git a/server/src/com/cloud/server/ConfigurationServerImpl.java b/server/src/com/cloud/server/ConfigurationServerImpl.java index 4c5c8a1c1cf..0676db8ca18 100755 --- a/server/src/com/cloud/server/ConfigurationServerImpl.java +++ b/server/src/com/cloud/server/ConfigurationServerImpl.java @@ -42,14 +42,13 @@ import javax.crypto.SecretKey; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.log4j.Logger; - import org.apache.cloudstack.framework.config.ConfigDepot; import org.apache.cloudstack.framework.config.ConfigDepotAdmin; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.apache.commons.codec.binary.Base64; +import org.apache.commons.io.FileUtils; +import org.apache.log4j.Logger; import com.cloud.cluster.ClusterManager; import com.cloud.configuration.Config; @@ -107,6 +106,10 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.Script; @@ -249,11 +252,9 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // Create userIpAddress ranges // Update existing vlans with networkId - Transaction txn = Transaction.currentTxn(); - List vlans = _vlanDao.listAll(); if (vlans != null && !vlans.isEmpty()) { - for (VlanVO vlan : vlans) { + for (final VlanVO vlan : vlans) { if (vlan.getNetworkId().longValue() == 0) { updateVlanWithNetworkId(vlan); } @@ -261,15 +262,19 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // Create vlan user_ip_address range String ipPange = vlan.getIpRange(); String[] range = ipPange.split("-"); - String startIp = range[0]; - String endIp = range[1]; + final String startIp = range[0]; + final String endIp = range[1]; + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + IPRangeConfig config = new IPRangeConfig(); + long startIPLong = NetUtils.ip2Long(startIp); + long endIPLong = NetUtils.ip2Long(endIp); + config.savePublicIPRange(TransactionLegacy.currentTxn(), startIPLong, endIPLong, vlan.getDataCenterId(), vlan.getId(), vlan.getNetworkId(), vlan.getPhysicalNetworkId()); + } + }); - txn.start(); - IPRangeConfig config = new IPRangeConfig(); - long startIPLong = NetUtils.ip2Long(startIp); - long endIPLong = NetUtils.ip2Long(endIp); - config.savePublicIPRange(txn, startIPLong, endIPLong, vlan.getDataCenterId(), vlan.getId(), vlan.getNetworkId(), vlan.getPhysicalNetworkId()); - txn.commit(); } } } @@ -292,6 +297,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio updateCloudIdentifier(); _configDepotAdmin.populateConfigurations(); + // setup XenServer default PV driver version + initiateXenServerPVDriverVersion(); // We should not update seed data UUID column here since this will be invoked in upgrade case as well. //updateUuids(); @@ -302,6 +309,84 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio _configDao.invalidateCache(); } + + private void templateDetailsInitIfNotExist(long id, String name, String value) { + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement stmt = null; + PreparedStatement stmtInsert = null; + boolean insert = false; + try { + txn.start(); + stmt = txn.prepareAutoCloseStatement("SELECT id FROM vm_template_details WHERE template_id=? and name=?"); + stmt.setLong(1, id); + stmt.setString(2, name); + ResultSet rs = stmt.executeQuery(); + if(rs == null || !rs.next()) { + insert = true; + } + stmt.close(); + + if ( insert ) { + stmtInsert = txn.prepareAutoCloseStatement( + "INSERT INTO vm_template_details(template_id, name, value) VALUES(?, ?, ?)"); + stmtInsert.setLong(1, id); + stmtInsert.setString(2, name); + stmtInsert.setString(3, value); + if(stmtInsert.executeUpdate() < 1) { + throw new CloudRuntimeException("Unable to init template " + id + " datails: " + name); + } + } + txn.commit(); + } catch (Exception e) { + s_logger.warn("Unable to init template " + id + " datails: " + name, e); + throw new CloudRuntimeException("Unable to init template " + id + " datails: " + name); + } + } + + private void initiateXenServerPVDriverVersion() { + String pvdriverversion = Config.XenPVdriverVersion.getDefaultValue(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); + PreparedStatement pstmt = null; + ResultSet rs1 = null; + ResultSet rs2 = null; + try { + String oldValue = _configDao.getValue(Config.XenPVdriverVersion.key()); + if ( oldValue == null ) { + String sql = "select resource from host where hypervisor_type='XenServer' and removed is null and status not in ('Error', 'Removed') group by resource" ; + pstmt = txn.prepareAutoCloseStatement(sql); + rs1 = pstmt.executeQuery(); + while (rs1.next()) { + String resouce = rs1.getString(1); //resource column + if ( resouce == null ) continue; + if ( resouce.equalsIgnoreCase("com.cloud.hypervisor.xen.resource.XenServer56Resource") + || resouce.equalsIgnoreCase("com.cloud.hypervisor.xen.resource.XenServer56FP1Resource") + || resouce.equalsIgnoreCase("com.cloud.hypervisor.xen.resource.XenServer56SP2Resource") + || resouce.equalsIgnoreCase("com.cloud.hypervisor.xen.resource.XenServer600Resource") + || resouce.equalsIgnoreCase("com.cloud.hypervisor.xen.resource.XenServer602Resource") ) { + pvdriverversion = "xenserver56"; + break; + } + } + _configDao.getValueAndInitIfNotExist(Config.XenPVdriverVersion.key(), + Config.XenPVdriverVersion.getCategory(), pvdriverversion, Config.XenPVdriverVersion.getDescription()); + sql = "select id from vm_template where hypervisor_type='XenServer' and format!='ISO' and removed is null"; + pstmt = txn.prepareAutoCloseStatement(sql); + rs2 = pstmt.executeQuery(); + List tmpl_ids = new ArrayList(); + while (rs2.next()) { + tmpl_ids.add(rs2.getLong(1)); + } + for( Long tmpl_id : tmpl_ids) { + templateDetailsInitIfNotExist(tmpl_id, "hypervisortoolsversion", pvdriverversion); + } + } + } catch (Exception e) { + s_logger.debug("initiateXenServerPVDriverVersion failed due to " + e.toString()); + // ignore + } + } + + /* private void updateUuids() { _identityDao.initializeDefaultUuid("disk_offering"); @@ -352,7 +437,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio protected void saveUser() { // insert system account String insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, account.default) VALUES (1, UUID(), 'system', '1', '1', 1)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -361,7 +446,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, user.default)" + " VALUES (1, UUID(), 'system', RAND(), 1, 'system', 'cloud', now(), 1)"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -377,7 +462,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // create an account for the admin user first insertSql = "INSERT INTO `cloud`.`account` (id, uuid, account_name, type, domain_id, account.default) VALUES (" + id + ", UUID(), '" + username + "', '1', '1', 1)"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -388,7 +473,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio insertSql = "INSERT INTO `cloud`.`user` (id, uuid, username, password, account_id, firstname, lastname, created, state, user.default) " + "VALUES (" + id + ", UUID(), '" + username + "', RAND(), 2, '" + firstname + "','" + lastname + "',now(), 'disabled', 1)"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -419,7 +504,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio "VALUES ('default', 'Default Security Group', 2, 1, 'admin')"; } - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -559,7 +644,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String already = _configDao.getValue("system.vm.password"); if (already == null) { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { String rpassword = PasswordGenerator.generatePresharedKey(8); String wSql = "INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) " @@ -643,7 +728,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String insertSql2 = "INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) " + "VALUES ('Hidden','DEFAULT', 'management-server','ssh.publickey', '" + DBEncryptionUtil.encrypt(publicKey) + "','Public key for the entire CloudStack')"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); @@ -778,7 +863,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio String insertSql1 = "INSERT INTO `cloud`.`configuration` (category, instance, component, name, value, description) " + "VALUES ('Hidden','DEFAULT', 'management-server','secstorage.copy.password', '" + DBEncryptionUtil.encrypt(password) + "','Password used to authenticate zone-to-zone template copy requests')"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt1 = txn.prepareAutoCloseStatement(insertSql1); stmt1.executeUpdate(); @@ -806,7 +891,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } @DB - protected HostPodVO createPod(long userId, String podName, long zoneId, String gateway, String cidr, String startIp, String endIp) throws InternalErrorException { + protected HostPodVO createPod(long userId, String podName, final long zoneId, String gateway, String cidr, final String startIp, String endIp) throws InternalErrorException { String[] cidrPair = cidr.split("\\/"); String cidrAddress = cidrPair[0]; int cidrSize = Integer.parseInt(cidrPair[1]); @@ -828,37 +913,35 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio ipRange = ""; } - HostPodVO pod = new HostPodVO(podName, zoneId, gateway, cidrAddress, cidrSize, ipRange); - Transaction txn = Transaction.currentTxn(); + final HostPodVO pod = new HostPodVO(podName, zoneId, gateway, cidrAddress, cidrSize, ipRange); try { - txn.start(); - - if (_podDao.persist(pod) == null) { - txn.rollback(); - throw new InternalErrorException("Failed to create new pod. Please contact Cloud Support."); - } - - if (startIp != null) { - _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIp); - } - - String ipNums = _configDao.getValue("linkLocalIp.nums"); - int nums = Integer.parseInt(ipNums); - if (nums > 16 || nums <= 0) { - throw new InvalidParameterValueException("The linkLocalIp.nums: " + nums + "is wrong, should be 1~16"); - } - /* local link ip address starts from 169.254.0.2 - 169.254.(nums) */ - String[] linkLocalIpRanges = NetUtils.getLinkLocalIPRange(nums); - if (linkLocalIpRanges == null) { - throw new InvalidParameterValueException("The linkLocalIp.nums: " + nums + "may be wrong, should be 1~16"); - } else { - _zoneDao.addLinkLocalIpAddress(zoneId, pod.getId(), linkLocalIpRanges[0], linkLocalIpRanges[1]); - } - - txn.commit(); - + final String endIpFinal = endIp; + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws InternalErrorException { + if (_podDao.persist(pod) == null) { + throw new InternalErrorException("Failed to create new pod. Please contact Cloud Support."); + } + + if (startIp != null) { + _zoneDao.addPrivateIpAddress(zoneId, pod.getId(), startIp, endIpFinal); + } + + String ipNums = _configDao.getValue("linkLocalIp.nums"); + int nums = Integer.parseInt(ipNums); + if (nums > 16 || nums <= 0) { + throw new InvalidParameterValueException("The linkLocalIp.nums: " + nums + "is wrong, should be 1~16"); + } + /* local link ip address starts from 169.254.0.2 - 169.254.(nums) */ + String[] linkLocalIpRanges = NetUtils.getLinkLocalIPRange(nums); + if (linkLocalIpRanges == null) { + throw new InvalidParameterValueException("The linkLocalIp.nums: " + nums + "may be wrong, should be 1~16"); + } else { + _zoneDao.addLinkLocalIpAddress(zoneId, pod.getId(), linkLocalIpRanges[0], linkLocalIpRanges[1]); + } + } + }); } catch (Exception e) { - txn.rollback(); s_logger.error("Unable to create new pod due to " + e.getMessage(), e); throw new InternalErrorException("Failed to create new pod. Please contact Cloud Support."); } @@ -919,20 +1002,20 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio privateGatewayNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(privateGatewayNetworkOffering); //populate providers - Map defaultSharedNetworkOfferingProviders = new HashMap(); + final Map defaultSharedNetworkOfferingProviders = new HashMap(); defaultSharedNetworkOfferingProviders.put(Service.Dhcp, Provider.VirtualRouter); defaultSharedNetworkOfferingProviders.put(Service.Dns, Provider.VirtualRouter); defaultSharedNetworkOfferingProviders.put(Service.UserData, Provider.VirtualRouter); - Map defaultIsolatedNetworkOfferingProviders = defaultSharedNetworkOfferingProviders; + final Map defaultIsolatedNetworkOfferingProviders = defaultSharedNetworkOfferingProviders; - Map defaultSharedSGNetworkOfferingProviders = new HashMap(); + final Map defaultSharedSGNetworkOfferingProviders = new HashMap(); defaultSharedSGNetworkOfferingProviders.put(Service.Dhcp, Provider.VirtualRouter); defaultSharedSGNetworkOfferingProviders.put(Service.Dns, Provider.VirtualRouter); defaultSharedSGNetworkOfferingProviders.put(Service.UserData, Provider.VirtualRouter); defaultSharedSGNetworkOfferingProviders.put(Service.SecurityGroup, Provider.SecurityGroupProvider); - Map defaultIsolatedSourceNatEnabledNetworkOfferingProviders = new HashMap(); + final Map defaultIsolatedSourceNatEnabledNetworkOfferingProviders = new HashMap(); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.Dhcp, Provider.VirtualRouter); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.Dns, Provider.VirtualRouter); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.UserData, Provider.VirtualRouter); @@ -944,7 +1027,7 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.PortForwarding, Provider.VirtualRouter); defaultIsolatedSourceNatEnabledNetworkOfferingProviders.put(Service.Vpn, Provider.VirtualRouter); - Map netscalerServiceProviders = new HashMap(); + final Map netscalerServiceProviders = new HashMap(); netscalerServiceProviders.put(Service.Dhcp, Provider.VirtualRouter); netscalerServiceProviders.put(Service.Dns, Provider.VirtualRouter); netscalerServiceProviders.put(Service.UserData, Provider.VirtualRouter); @@ -954,182 +1037,182 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio // The only one diff between 1 and 2 network offerings is that the first one has SG enabled. In Basic zone only // first network offering has to be enabled, in Advance zone - the second one - Transaction txn = Transaction.currentTxn(); - txn.start(); - - // Offering #1 - NetworkOfferingVO defaultSharedSGNetworkOffering = new NetworkOfferingVO( - NetworkOffering.DefaultSharedNetworkOfferingWithSGService, - "Offering for Shared Security group enabled networks", - TrafficType.Guest, - false, true, null, null, true, Availability.Optional, - null, Network.GuestType.Shared, true, true, false, false, false); - - defaultSharedSGNetworkOffering.setState(NetworkOffering.State.Enabled); - defaultSharedSGNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultSharedSGNetworkOffering); - - for (Service service : defaultSharedSGNetworkOfferingProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedSGNetworkOffering.getId(), service, defaultSharedSGNetworkOfferingProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #2 - NetworkOfferingVO defaultSharedNetworkOffering = new NetworkOfferingVO( - NetworkOffering.DefaultSharedNetworkOffering, - "Offering for Shared networks", - TrafficType.Guest, - false, true, null, null, true, Availability.Optional, - null, Network.GuestType.Shared, true, true, false, false, false); - - defaultSharedNetworkOffering.setState(NetworkOffering.State.Enabled); - defaultSharedNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultSharedNetworkOffering); - - for (Service service : defaultSharedNetworkOfferingProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedNetworkOffering.getId(), service, defaultSharedNetworkOfferingProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #3 - NetworkOfferingVO defaultIsolatedSourceNatEnabledNetworkOffering = new NetworkOfferingVO( - NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService, - "Offering for Isolated networks with Source Nat service enabled", - TrafficType.Guest, - false, false, null, null, true, Availability.Required, - null, Network.GuestType.Isolated, true, false, false, false, true); - - defaultIsolatedSourceNatEnabledNetworkOffering.setState(NetworkOffering.State.Enabled); - defaultIsolatedSourceNatEnabledNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultIsolatedSourceNatEnabledNetworkOffering); - - for (Service service : defaultIsolatedSourceNatEnabledNetworkOfferingProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO - (defaultIsolatedSourceNatEnabledNetworkOffering.getId(), service, defaultIsolatedSourceNatEnabledNetworkOfferingProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #4 - NetworkOfferingVO defaultIsolatedEnabledNetworkOffering = new NetworkOfferingVO( - NetworkOffering.DefaultIsolatedNetworkOffering, - "Offering for Isolated networks with no Source Nat service", - TrafficType.Guest, - false, true, null, null, true, Availability.Optional, - null, Network.GuestType.Isolated, true, true, false, false, false); - - defaultIsolatedEnabledNetworkOffering.setState(NetworkOffering.State.Enabled); - defaultIsolatedEnabledNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultIsolatedEnabledNetworkOffering); - - for (Service service : defaultIsolatedNetworkOfferingProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultIsolatedEnabledNetworkOffering.getId(), service, defaultIsolatedNetworkOfferingProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #5 - NetworkOfferingVO defaultNetscalerNetworkOffering = new NetworkOfferingVO( - NetworkOffering.DefaultSharedEIPandELBNetworkOffering, - "Offering for Shared networks with Elastic IP and Elastic LB capabilities", - TrafficType.Guest, - false, true, null, null, true, Availability.Optional, - null, Network.GuestType.Shared, true, false, false, false, true, true, true, false, false, true, true, false, false); - - defaultNetscalerNetworkOffering.setState(NetworkOffering.State.Enabled); - defaultNetscalerNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetscalerNetworkOffering); - - for (Service service : netscalerServiceProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetscalerNetworkOffering.getId(), service, netscalerServiceProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #6 - NetworkOfferingVO defaultNetworkOfferingForVpcNetworks = new NetworkOfferingVO( - NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks, - "Offering for Isolated Vpc networks with Source Nat service enabled", - TrafficType.Guest, - false, false, null, null, true, Availability.Optional, - null, Network.GuestType.Isolated, false, false, false, false, true); - - defaultNetworkOfferingForVpcNetworks.setState(NetworkOffering.State.Enabled); - defaultNetworkOfferingForVpcNetworks = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetworkOfferingForVpcNetworks); - - Map defaultVpcNetworkOfferingProviders = new HashMap(); - defaultVpcNetworkOfferingProviders.put(Service.Dhcp, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.UserData, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.NetworkACL, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.Gateway, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.Lb, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.SourceNat, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.StaticNat, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.PortForwarding, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProviders.put(Service.Vpn, Provider.VPCVirtualRouter); - - for (Service service : defaultVpcNetworkOfferingProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO - (defaultNetworkOfferingForVpcNetworks.getId(), service, defaultVpcNetworkOfferingProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - // Offering #7 - NetworkOfferingVO defaultNetworkOfferingForVpcNetworksNoLB = new NetworkOfferingVO( - NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, - "Offering for Isolated Vpc networks with Source Nat service enabled and LB service Disabled", - TrafficType.Guest, - false, false, null, null, true, Availability.Optional, - null, Network.GuestType.Isolated, false, false, false, false, false); - - defaultNetworkOfferingForVpcNetworksNoLB.setState(NetworkOffering.State.Enabled); - defaultNetworkOfferingForVpcNetworksNoLB = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetworkOfferingForVpcNetworksNoLB); - - Map defaultVpcNetworkOfferingProvidersNoLB = new HashMap(); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.Dhcp, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.Dns, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.UserData, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.NetworkACL, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.Gateway, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.SourceNat, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.StaticNat, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.PortForwarding, Provider.VPCVirtualRouter); - defaultVpcNetworkOfferingProvidersNoLB.put(Service.Vpn, Provider.VPCVirtualRouter); - - for (Service service : defaultVpcNetworkOfferingProvidersNoLB.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO - (defaultNetworkOfferingForVpcNetworksNoLB.getId(), service, defaultVpcNetworkOfferingProvidersNoLB.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // Offering #1 + NetworkOfferingVO defaultSharedSGNetworkOffering = new NetworkOfferingVO( + NetworkOffering.DefaultSharedNetworkOfferingWithSGService, + "Offering for Shared Security group enabled networks", + TrafficType.Guest, + false, true, null, null, true, Availability.Optional, + null, Network.GuestType.Shared, true, true, false, false, false); - //offering #8 - network offering with internal lb service - NetworkOfferingVO internalLbOff = new NetworkOfferingVO( - NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB, - "Offering for Isolated Vpc networks with Internal LB support", - TrafficType.Guest, - false, false, null, null, true, Availability.Optional, - null, Network.GuestType.Isolated, false, false, false, true, false); - - internalLbOff.setState(NetworkOffering.State.Enabled); - internalLbOff = _networkOfferingDao.persistDefaultNetworkOffering(internalLbOff); - - Map internalLbOffProviders = new HashMap(); - internalLbOffProviders.put(Service.Dhcp, Provider.VPCVirtualRouter); - internalLbOffProviders.put(Service.Dns, Provider.VPCVirtualRouter); - internalLbOffProviders.put(Service.UserData, Provider.VPCVirtualRouter); - internalLbOffProviders.put(Service.NetworkACL, Provider.VPCVirtualRouter); - internalLbOffProviders.put(Service.Gateway, Provider.VPCVirtualRouter); - internalLbOffProviders.put(Service.Lb, Provider.InternalLbVm); - internalLbOffProviders.put(Service.SourceNat, Provider.VPCVirtualRouter); - - for (Service service : internalLbOffProviders.keySet()) { - NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO - (internalLbOff.getId(), service, internalLbOffProviders.get(service)); - _ntwkOfferingServiceMapDao.persist(offService); - s_logger.trace("Added service for the network offering: " + offService); - } - - txn.commit(); + defaultSharedSGNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultSharedSGNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultSharedSGNetworkOffering); + + for (Service service : defaultSharedSGNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedSGNetworkOffering.getId(), service, defaultSharedSGNetworkOfferingProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #2 + NetworkOfferingVO defaultSharedNetworkOffering = new NetworkOfferingVO( + NetworkOffering.DefaultSharedNetworkOffering, + "Offering for Shared networks", + TrafficType.Guest, + false, true, null, null, true, Availability.Optional, + null, Network.GuestType.Shared, true, true, false, false, false); + + defaultSharedNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultSharedNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultSharedNetworkOffering); + + for (Service service : defaultSharedNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultSharedNetworkOffering.getId(), service, defaultSharedNetworkOfferingProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #3 + NetworkOfferingVO defaultIsolatedSourceNatEnabledNetworkOffering = new NetworkOfferingVO( + NetworkOffering.DefaultIsolatedNetworkOfferingWithSourceNatService, + "Offering for Isolated networks with Source Nat service enabled", + TrafficType.Guest, + false, false, null, null, true, Availability.Required, + null, Network.GuestType.Isolated, true, false, false, false, true); + + defaultIsolatedSourceNatEnabledNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultIsolatedSourceNatEnabledNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultIsolatedSourceNatEnabledNetworkOffering); + + for (Service service : defaultIsolatedSourceNatEnabledNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO + (defaultIsolatedSourceNatEnabledNetworkOffering.getId(), service, defaultIsolatedSourceNatEnabledNetworkOfferingProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #4 + NetworkOfferingVO defaultIsolatedEnabledNetworkOffering = new NetworkOfferingVO( + NetworkOffering.DefaultIsolatedNetworkOffering, + "Offering for Isolated networks with no Source Nat service", + TrafficType.Guest, + false, true, null, null, true, Availability.Optional, + null, Network.GuestType.Isolated, true, true, false, false, false); + + defaultIsolatedEnabledNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultIsolatedEnabledNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultIsolatedEnabledNetworkOffering); + + for (Service service : defaultIsolatedNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultIsolatedEnabledNetworkOffering.getId(), service, defaultIsolatedNetworkOfferingProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #5 + NetworkOfferingVO defaultNetscalerNetworkOffering = new NetworkOfferingVO( + NetworkOffering.DefaultSharedEIPandELBNetworkOffering, + "Offering for Shared networks with Elastic IP and Elastic LB capabilities", + TrafficType.Guest, + false, true, null, null, true, Availability.Optional, + null, Network.GuestType.Shared, true, false, false, false, true, true, true, false, false, true, true, false, false); + + defaultNetscalerNetworkOffering.setState(NetworkOffering.State.Enabled); + defaultNetscalerNetworkOffering = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetscalerNetworkOffering); + + for (Service service : netscalerServiceProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO(defaultNetscalerNetworkOffering.getId(), service, netscalerServiceProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #6 + NetworkOfferingVO defaultNetworkOfferingForVpcNetworks = new NetworkOfferingVO( + NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworks, + "Offering for Isolated Vpc networks with Source Nat service enabled", + TrafficType.Guest, + false, false, null, null, true, Availability.Optional, + null, Network.GuestType.Isolated, false, false, false, false, true); + + defaultNetworkOfferingForVpcNetworks.setState(NetworkOffering.State.Enabled); + defaultNetworkOfferingForVpcNetworks = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetworkOfferingForVpcNetworks); + + Map defaultVpcNetworkOfferingProviders = new HashMap(); + defaultVpcNetworkOfferingProviders.put(Service.Dhcp, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.Dns, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.UserData, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.NetworkACL, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.Gateway, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.Lb, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.SourceNat, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.StaticNat, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.PortForwarding, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProviders.put(Service.Vpn, Provider.VPCVirtualRouter); + + for (Service service : defaultVpcNetworkOfferingProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO + (defaultNetworkOfferingForVpcNetworks.getId(), service, defaultVpcNetworkOfferingProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + // Offering #7 + NetworkOfferingVO defaultNetworkOfferingForVpcNetworksNoLB = new NetworkOfferingVO( + NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksNoLB, + "Offering for Isolated Vpc networks with Source Nat service enabled and LB service Disabled", + TrafficType.Guest, + false, false, null, null, true, Availability.Optional, + null, Network.GuestType.Isolated, false, false, false, false, false); + + defaultNetworkOfferingForVpcNetworksNoLB.setState(NetworkOffering.State.Enabled); + defaultNetworkOfferingForVpcNetworksNoLB = _networkOfferingDao.persistDefaultNetworkOffering(defaultNetworkOfferingForVpcNetworksNoLB); + + Map defaultVpcNetworkOfferingProvidersNoLB = new HashMap(); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.Dhcp, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.Dns, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.UserData, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.NetworkACL, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.Gateway, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.SourceNat, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.StaticNat, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.PortForwarding, Provider.VPCVirtualRouter); + defaultVpcNetworkOfferingProvidersNoLB.put(Service.Vpn, Provider.VPCVirtualRouter); + + for (Service service : defaultVpcNetworkOfferingProvidersNoLB.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO + (defaultNetworkOfferingForVpcNetworksNoLB.getId(), service, defaultVpcNetworkOfferingProvidersNoLB.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + + //offering #8 - network offering with internal lb service + NetworkOfferingVO internalLbOff = new NetworkOfferingVO( + NetworkOffering.DefaultIsolatedNetworkOfferingForVpcNetworksWithInternalLB, + "Offering for Isolated Vpc networks with Internal LB support", + TrafficType.Guest, + false, false, null, null, true, Availability.Optional, + null, Network.GuestType.Isolated, false, false, false, true, false); + + internalLbOff.setState(NetworkOffering.State.Enabled); + internalLbOff = _networkOfferingDao.persistDefaultNetworkOffering(internalLbOff); + + Map internalLbOffProviders = new HashMap(); + internalLbOffProviders.put(Service.Dhcp, Provider.VPCVirtualRouter); + internalLbOffProviders.put(Service.Dns, Provider.VPCVirtualRouter); + internalLbOffProviders.put(Service.UserData, Provider.VPCVirtualRouter); + internalLbOffProviders.put(Service.NetworkACL, Provider.VPCVirtualRouter); + internalLbOffProviders.put(Service.Gateway, Provider.VPCVirtualRouter); + internalLbOffProviders.put(Service.Lb, Provider.InternalLbVm); + internalLbOffProviders.put(Service.SourceNat, Provider.VPCVirtualRouter); + + for (Service service : internalLbOffProviders.keySet()) { + NetworkOfferingServiceMapVO offService = new NetworkOfferingServiceMapVO + (internalLbOff.getId(), service, internalLbOffProviders.get(service)); + _ntwkOfferingServiceMapDao.persist(offService); + s_logger.trace("Added service for the network offering: " + offService); + } + } + }); } private void createDefaultNetworks() { @@ -1243,8 +1326,8 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio List domainResourceCount = _resourceCountDao.listResourceCountByOwnerType(ResourceOwnerType.Domain); List accountResourceCount = _resourceCountDao.listResourceCountByOwnerType(ResourceOwnerType.Account); - List accountSupportedResourceTypes = new ArrayList(); - List domainSupportedResourceTypes = new ArrayList(); + final List accountSupportedResourceTypes = new ArrayList(); + final List domainSupportedResourceTypes = new ArrayList(); for (ResourceType resourceType : resourceTypes) { if (resourceType.supportsOwner(ResourceOwnerType.Account)) { @@ -1255,59 +1338,63 @@ public class ConfigurationServerImpl extends ManagerBase implements Configuratio } } - int accountExpectedCount = accountSupportedResourceTypes.size(); - int domainExpectedCount = domainSupportedResourceTypes.size(); + final int accountExpectedCount = accountSupportedResourceTypes.size(); + final int domainExpectedCount = domainSupportedResourceTypes.size(); if ((domainResourceCount.size() < domainExpectedCount * domains.size())) { s_logger.debug("resource_count table has records missing for some domains...going to insert them"); - for (DomainVO domain : domains) { + for (final DomainVO domain : domains) { // Lock domain - Transaction txn = Transaction.currentTxn(); - txn.start(); - _domainDao.lockRow(domain.getId(), true); - List domainCounts = _resourceCountDao.listByOwnerId(domain.getId(), ResourceOwnerType.Domain); - List domainCountStr = new ArrayList(); - for (ResourceCountVO domainCount : domainCounts) { - domainCountStr.add(domainCount.getType().toString()); - } - - if (domainCountStr.size() < domainExpectedCount) { - for (ResourceType resourceType : domainSupportedResourceTypes) { - if (!domainCountStr.contains(resourceType.toString())) { - ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); - s_logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); - _resourceCountDao.persist(resourceCountVO); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _domainDao.lockRow(domain.getId(), true); + List domainCounts = _resourceCountDao.listByOwnerId(domain.getId(), ResourceOwnerType.Domain); + List domainCountStr = new ArrayList(); + for (ResourceCountVO domainCount : domainCounts) { + domainCountStr.add(domainCount.getType().toString()); + } + + if (domainCountStr.size() < domainExpectedCount) { + for (ResourceType resourceType : domainSupportedResourceTypes) { + if (!domainCountStr.contains(resourceType.toString())) { + ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, domain.getId(), ResourceOwnerType.Domain); + s_logger.debug("Inserting resource count of type " + resourceType + " for domain id=" + domain.getId()); + _resourceCountDao.persist(resourceCountVO); + } + } } } - } - txn.commit(); + }); + } } if ((accountResourceCount.size() < accountExpectedCount * accounts.size())) { s_logger.debug("resource_count table has records missing for some accounts...going to insert them"); - for (AccountVO account : accounts) { + for (final AccountVO account : accounts) { // lock account - Transaction txn = Transaction.currentTxn(); - txn.start(); - _accountDao.lockRow(account.getId(), true); - List accountCounts = _resourceCountDao.listByOwnerId(account.getId(), ResourceOwnerType.Account); - List accountCountStr = new ArrayList(); - for (ResourceCountVO accountCount : accountCounts) { - accountCountStr.add(accountCount.getType().toString()); - } - - if (accountCountStr.size() < accountExpectedCount) { - for (ResourceType resourceType : accountSupportedResourceTypes) { - if (!accountCountStr.contains(resourceType.toString())) { - ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); - s_logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); - _resourceCountDao.persist(resourceCountVO); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + _accountDao.lockRow(account.getId(), true); + List accountCounts = _resourceCountDao.listByOwnerId(account.getId(), ResourceOwnerType.Account); + List accountCountStr = new ArrayList(); + for (ResourceCountVO accountCount : accountCounts) { + accountCountStr.add(accountCount.getType().toString()); + } + + if (accountCountStr.size() < accountExpectedCount) { + for (ResourceType resourceType : accountSupportedResourceTypes) { + if (!accountCountStr.contains(resourceType.toString())) { + ResourceCountVO resourceCountVO = new ResourceCountVO(resourceType, 0, account.getId(), ResourceOwnerType.Account); + s_logger.debug("Inserting resource count of type " + resourceType + " for account id=" + account.getId()); + _resourceCountDao.persist(resourceCountVO); + } + } } } - } - - txn.commit(); + }); } } } diff --git a/server/src/com/cloud/server/ManagementServerImpl.java b/server/src/com/cloud/server/ManagementServerImpl.java index 114433093ab..5af088d1eee 100755 --- a/server/src/com/cloud/server/ManagementServerImpl.java +++ b/server/src/com/cloud/server/ManagementServerImpl.java @@ -173,6 +173,7 @@ import org.apache.cloudstack.api.command.admin.storage.ListSecondaryStagingStore import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd; import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForMaintenanceCmd; +import org.apache.cloudstack.api.command.admin.storage.PrepareSecondaryStorageForMigrationCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; @@ -211,6 +212,7 @@ import org.apache.cloudstack.api.command.admin.vlan.DeleteVlanIpRangeCmd; import org.apache.cloudstack.api.command.admin.vlan.ListVlanIpRangesCmd; import org.apache.cloudstack.api.command.admin.vlan.ReleasePublicIpRangeCmd; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.ListVMsCmdByAdmin; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; import org.apache.cloudstack.api.command.admin.vm.MigrateVirtualMachineWithVolumeCmd; @@ -354,6 +356,7 @@ import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotCmd; import org.apache.cloudstack.api.command.user.snapshot.DeleteSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotPoliciesCmd; import org.apache.cloudstack.api.command.user.snapshot.ListSnapshotsCmd; +import org.apache.cloudstack.api.command.user.snapshot.RevertSnapshotCmd; import org.apache.cloudstack.api.command.user.ssh.CreateSSHKeyPairCmd; import org.apache.cloudstack.api.command.user.ssh.DeleteSSHKeyPairCmd; import org.apache.cloudstack.api.command.user.ssh.ListSSHKeyPairsCmd; @@ -447,6 +450,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -532,7 +536,7 @@ import com.cloud.projects.Project; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.projects.ProjectManager; import com.cloud.resource.ResourceManager; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.auth.UserAuthenticator; import com.cloud.storage.DiskOfferingVO; import com.cloud.storage.GuestOS; @@ -583,6 +587,8 @@ import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.MacAddress; import com.cloud.utils.net.NetUtils; @@ -690,7 +696,6 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject private HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; private List _hostAllocators; - @Inject private List _storagePoolAllocators; @Inject private ResourceTagDao _resourceTagDao; @@ -725,6 +730,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Inject DeploymentPlanningManager _dpMgr; + LockMasterListener _lockMasterListener; + private final ScheduledExecutorService _eventExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("EventChecker")); private final ScheduledExecutorService _alertExecutor = Executors.newScheduledThreadPool(1, new NamedThreadFactory("AlertChecker")); @Inject private KeystoreManager _ksMgr; @@ -829,7 +836,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe public boolean start() { s_logger.info("Startup CloudStack management server..."); - _clusterMgr.registerListener(new LockMasterListener(ManagementServerNode.getManagementServerId())); + if ( _lockMasterListener == null ) { + _lockMasterListener = new LockMasterListener(ManagementServerNode.getManagementServerId()); + } + + _clusterMgr.registerListener(_lockMasterListener); enableAdminUser("password"); return true; @@ -2010,7 +2021,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.PublicIpAddress.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.PublicIpAddress.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -2181,12 +2192,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override @DB public DomainVO updateDomain(UpdateDomainCmd cmd) { - Long domainId = cmd.getId(); - String domainName = cmd.getDomainName(); - String networkDomain = cmd.getNetworkDomain(); + final Long domainId = cmd.getId(); + final String domainName = cmd.getDomainName(); + final String networkDomain = cmd.getNetworkDomain(); // check if domain exists in the system - DomainVO domain = _domainDao.findById(domainId); + final DomainVO domain = _domainDao.findById(domainId); if (domain == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); ex.addProxyObject(domainId.toString(), "domainId"); @@ -2227,10 +2238,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - Transaction txn = Transaction.currentTxn(); - - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { if (domainName != null) { String updatedDomainPath = getUpdatedDomainPath(domain.getPath(), domainName); updateDomainChildren(domain, updatedDomainPath); @@ -2246,8 +2256,8 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } _domainDao.update(domainId, domain); - - txn.commit(); + } + }); return _domainDao.findById(domainId); @@ -2385,7 +2395,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe for (SummedCapacity summedCapacity : summedCapacities) { CapacityVO capacity = new CapacityVO(summedCapacity.getDataCenterId(), summedCapacity.getPodId(), summedCapacity.getClusterId(), summedCapacity.getCapacityType(), summedCapacity.getPercentUsed()); - capacity.setUsedCapacity(summedCapacity.getUsedCapacity()); + capacity.setUsedCapacity(summedCapacity.getUsedCapacity() + summedCapacity.getReservedCapacity()); capacity.setTotalCapacity(summedCapacity.getTotalCapacity()); capacities.add(capacity); } @@ -2739,6 +2749,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(DeleteSnapshotPoliciesCmd.class); cmdList.add(ListSnapshotPoliciesCmd.class); cmdList.add(ListSnapshotsCmd.class); + cmdList.add(RevertSnapshotCmd.class); cmdList.add(CreateSSHKeyPairCmd.class); cmdList.add(DeleteSSHKeyPairCmd.class); cmdList.add(ListSSHKeyPairsCmd.class); @@ -2758,6 +2769,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(AddNicToVMCmd.class); cmdList.add(DeployVMCmd.class); cmdList.add(DestroyVMCmd.class); + cmdList.add(ExpungeVMCmd.class); cmdList.add(GetVMPasswordCmd.class); cmdList.add(ListVMsCmd.class); cmdList.add(ListVMsCmdByAdmin.class); @@ -2838,6 +2850,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe cmdList.add(CreateSecondaryStagingStoreCmd.class); cmdList.add(ListSecondaryStagingStoresCmd.class); cmdList.add(DeleteSecondaryStagingStoreCmd.class); + cmdList.add(PrepareSecondaryStorageForMigrationCmd.class); cmdList.add(CreateApplicationLoadBalancerCmd.class); cmdList.add(ListApplicationLoadBalancersCmd.class); cmdList.add(DeleteApplicationLoadBalancerCmd.class); @@ -2886,9 +2899,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe return cmdList; } - protected class EventPurgeTask implements Runnable { + protected class EventPurgeTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { GlobalLock lock = GlobalLock.getInternLock("EventPurge"); if (lock == null) { @@ -2920,9 +2933,9 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } } - protected class AlertPurgeTask implements Runnable { + protected class AlertPurgeTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { GlobalLock lock = GlobalLock.getInternLock("AlertPurge"); if (lock == null) { @@ -3462,7 +3475,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe // give us the same key if (_hashKey == null) { _hashKey = _configDao.getValueAndInitIfNotExist(Config.HashKey.key(), Config.HashKey.getCategory(), - getBase64EncodedRandomKey(128)); + getBase64EncodedRandomKey(128), Config.HashKey.getDescription()); } return _hashKey; } @@ -3472,7 +3485,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (_encryptionKey == null) { _encryptionKey = _configDao.getValueAndInitIfNotExist(Config.EncryptionKey.key(), Config.EncryptionKey.getCategory(), - getBase64EncodedRandomKey(128)); + getBase64EncodedRandomKey(128), Config.EncryptionKey.getDescription()); } return _encryptionKey; } @@ -3482,7 +3495,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe if (_encryptionIV == null) { _encryptionIV = _configDao.getValueAndInitIfNotExist(Config.EncryptionIV.key(), Config.EncryptionIV.getCategory(), - getBase64EncodedRandomKey(128)); + getBase64EncodedRandomKey(128), Config.EncryptionIV.getDescription()); } return _encryptionIV; } @@ -3673,7 +3686,7 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe @Override @DB - public boolean updateHostPassword(UpdateHostPasswordCmd cmd) { + public boolean updateHostPassword(final UpdateHostPasswordCmd cmd) { if (cmd.getClusterId() == null && cmd.getHostId() == null) { throw new InvalidParameterValueException("You should provide one of cluster id or a host id."); } else if (cmd.getClusterId() == null) { @@ -3690,10 +3703,11 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe throw new InvalidParameterValueException("This operation is not supported for this hypervisor type"); } // get all the hosts in this cluster - List hosts = _resourceMgr.listAllHostsInCluster(cmd.getClusterId()); - Transaction txn = Transaction.currentTxn(); - try { - txn.start(); + final List hosts = _resourceMgr.listAllHostsInCluster(cmd.getClusterId()); + + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { for (HostVO h : hosts) { if (s_logger.isDebugEnabled()) { s_logger.debug("Changing password for host name = " + h.getName()); @@ -3707,18 +3721,12 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe } else { // if one host in the cluster has diff username then // rollback to maintain consistency - txn.rollback(); throw new InvalidParameterValueException( "The username is not same for all hosts, please modify passwords for individual hosts."); } } - txn.commit(); - // if hypervisor is xenserver then we update it in - // CitrixResourceBase - } catch (Exception e) { - txn.rollback(); - throw new CloudRuntimeException("Failed to update password " + e.getMessage()); } + }); } return true; @@ -3893,4 +3901,21 @@ public class ManagementServerImpl extends ManagerBase implements ManagementServe _dpMgr.cleanupVMReservations(); } + + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + + @Inject + public void setStoragePoolAllocators(List storagePoolAllocators) { + _storagePoolAllocators = storagePoolAllocators; + } + + public LockMasterListener getLockMasterListener() { + return _lockMasterListener; + } + + public void setLockMasterListener(LockMasterListener lockMasterListener) { + _lockMasterListener = lockMasterListener; + } } diff --git a/server/src/com/cloud/server/StatsCollector.java b/server/src/com/cloud/server/StatsCollector.java index 5e110aa53d5..699c3c0f55c 100755 --- a/server/src/com/cloud/server/StatsCollector.java +++ b/server/src/com/cloud/server/StatsCollector.java @@ -37,6 +37,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; @@ -84,6 +85,8 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.net.MacAddress; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; @@ -230,9 +233,9 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } - class HostCollector implements Runnable { + class HostCollector extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { s_logger.debug("HostStatsCollector is running..."); @@ -273,9 +276,9 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } - class VmStatsCollector implements Runnable { + class VmStatsCollector extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { s_logger.debug("VmStatsCollector is running..."); @@ -350,9 +353,9 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc return _VmStats.get(id); } - class VmDiskStatsUpdaterTask implements Runnable { + class VmDiskStatsUpdaterTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { GlobalLock scanLock = GlobalLock.getInternLock("vm.disk.stats"); try { if(scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { @@ -364,29 +367,29 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc scanLock.unlock(); return; } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { - txn.start(); - //get all stats with delta > 0 - List updatedVmNetStats = _vmDiskStatsDao.listUpdatedStats(); - for(VmDiskStatisticsVO stat : updatedVmNetStats){ - if (_dailyOrHourly) { - //update agg bytes - stat.setAggBytesRead(stat.getCurrentBytesRead() + stat.getNetBytesRead()); - stat.setAggBytesWrite(stat.getCurrentBytesWrite() + stat.getNetBytesWrite()); - stat.setAggIORead(stat.getCurrentIORead() + stat.getNetIORead()); - stat.setAggIOWrite(stat.getCurrentIOWrite() + stat.getNetIOWrite()); - _vmDiskStatsDao.update(stat.getId(), stat); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + //get all stats with delta > 0 + List updatedVmNetStats = _vmDiskStatsDao.listUpdatedStats(); + for(VmDiskStatisticsVO stat : updatedVmNetStats){ + if (_dailyOrHourly) { + //update agg bytes + stat.setAggBytesRead(stat.getCurrentBytesRead() + stat.getNetBytesRead()); + stat.setAggBytesWrite(stat.getCurrentBytesWrite() + stat.getNetBytesWrite()); + stat.setAggIORead(stat.getCurrentIORead() + stat.getNetIORead()); + stat.setAggIOWrite(stat.getCurrentIOWrite() + stat.getNetIOWrite()); + _vmDiskStatsDao.update(stat.getId(), stat); + } + } + s_logger.debug("Successfully updated aggregate vm disk stats"); } - } - s_logger.debug("Successfully updated aggregate vm disk stats"); - txn.commit(); + }); } catch (Exception e){ - txn.rollback(); s_logger.debug("Failed to update aggregate disk stats", e); } finally { scanLock.unlock(); - txn.close(); } } } catch (Exception e){ @@ -397,132 +400,131 @@ public class StatsCollector extends ManagerBase implements ComponentMethodInterc } } - class VmDiskStatsTask implements Runnable { + class VmDiskStatsTask extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { // collect the vm disk statistics(total) from hypervisor. added by weizhou, 2013.03. - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { - txn.start(); - SearchCriteria sc = _hostDao.createSearchCriteria(); - sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString()); - sc.addAnd("resourceState", SearchCriteria.Op.NIN, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); - sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Routing.toString()); - sc.addAnd("hypervisorType", SearchCriteria.Op.EQ, HypervisorType.KVM); // support KVM only util 2013.06.25 - List hosts = _hostDao.search(sc, null); - - for (HostVO host : hosts) { - List vms = _userVmDao.listRunningByHostId(host.getId()); - List vmIds = new ArrayList(); - - for (UserVmVO vm : vms) { - if (vm.getType() == VirtualMachine.Type.User) // user vm - vmIds.add(vm.getId()); - } - - HashMap> vmDiskStatsById = _userVmMgr.getVmDiskStatistics(host.getId(), host.getName(), vmIds); - if (vmDiskStatsById == null) - continue; - - Set vmIdSet = vmDiskStatsById.keySet(); - for(Long vmId : vmIdSet) - { - List vmDiskStats = vmDiskStatsById.get(vmId); - if (vmDiskStats == null) + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + SearchCriteria sc = _hostDao.createSearchCriteria(); + sc.addAnd("status", SearchCriteria.Op.EQ, Status.Up.toString()); + sc.addAnd("resourceState", SearchCriteria.Op.NIN, ResourceState.Maintenance, ResourceState.PrepareForMaintenance, ResourceState.ErrorInMaintenance); + sc.addAnd("type", SearchCriteria.Op.EQ, Host.Type.Routing.toString()); + sc.addAnd("hypervisorType", SearchCriteria.Op.EQ, HypervisorType.KVM); // support KVM only util 2013.06.25 + List hosts = _hostDao.search(sc, null); + + for (HostVO host : hosts) { + List vms = _userVmDao.listRunningByHostId(host.getId()); + List vmIds = new ArrayList(); + + for (UserVmVO vm : vms) { + if (vm.getType() == VirtualMachine.Type.User) // user vm + vmIds.add(vm.getId()); + } + + HashMap> vmDiskStatsById = _userVmMgr.getVmDiskStatistics(host.getId(), host.getName(), vmIds); + if (vmDiskStatsById == null) continue; - UserVmVO userVm = _userVmDao.findById(vmId); - for (VmDiskStatsEntry vmDiskStat:vmDiskStats) { - SearchCriteria sc_volume = _volsDao.createSearchCriteria(); - sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath()); - VolumeVO volume = _volsDao.search(sc_volume, null).get(0); - VmDiskStatisticsVO previousVmDiskStats = _vmDiskStatsDao.findBy(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); - VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); - - if ((vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0) - && (vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0)) { - s_logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); - continue; - } - - if (vmDiskStat_lock == null) { - s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and volumeId:" + volume.getId()); - continue; - } - - if (previousVmDiskStats != null - && ((previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) - || (previousVmDiskStats.getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite()) - || (previousVmDiskStats.getCurrentIORead() != vmDiskStat_lock.getCurrentIORead()) - || (previousVmDiskStats.getCurrentIOWrite() != vmDiskStat_lock.getCurrentIOWrite()))) { - s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + - "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + - " Read(Bytes): " + vmDiskStat.getBytesRead() + " write(Bytes): " + vmDiskStat.getBytesWrite() + - " Read(IO): " + vmDiskStat.getIORead() + " write(IO): " + vmDiskStat.getIOWrite()); - continue; - } - - if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + - " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); + + Set vmIdSet = vmDiskStatsById.keySet(); + for(Long vmId : vmIdSet) + { + List vmDiskStats = vmDiskStatsById.get(vmId); + if (vmDiskStats == null) + continue; + UserVmVO userVm = _userVmDao.findById(vmId); + for (VmDiskStatsEntry vmDiskStat:vmDiskStats) { + SearchCriteria sc_volume = _volsDao.createSearchCriteria(); + sc_volume.addAnd("path", SearchCriteria.Op.EQ, vmDiskStat.getPath()); + VolumeVO volume = _volsDao.search(sc_volume, null).get(0); + VmDiskStatisticsVO previousVmDiskStats = _vmDiskStatsDao.findBy(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); + VmDiskStatisticsVO vmDiskStat_lock = _vmDiskStatsDao.lock(userVm.getAccountId(), userVm.getDataCenterId(), vmId, volume.getId()); + + if ((vmDiskStat.getBytesRead() == 0) && (vmDiskStat.getBytesWrite() == 0) + && (vmDiskStat.getIORead() == 0) && (vmDiskStat.getIOWrite() == 0)) { + s_logger.debug("IO/bytes read and write are all 0. Not updating vm_disk_statistics"); + continue; + } + + if (vmDiskStat_lock == null) { + s_logger.warn("unable to find vm disk stats from host for account: " + userVm.getAccountId() + " with vmId: " + userVm.getId()+ " and volumeId:" + volume.getId()); + continue; + } + + if (previousVmDiskStats != null + && ((previousVmDiskStats.getCurrentBytesRead() != vmDiskStat_lock.getCurrentBytesRead()) + || (previousVmDiskStats.getCurrentBytesWrite() != vmDiskStat_lock.getCurrentBytesWrite()) + || (previousVmDiskStats.getCurrentIORead() != vmDiskStat_lock.getCurrentIORead()) + || (previousVmDiskStats.getCurrentIOWrite() != vmDiskStat_lock.getCurrentIOWrite()))) { + s_logger.debug("vm disk stats changed from the time GetVmDiskStatsCommand was sent. " + + "Ignoring current answer. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Read(Bytes): " + vmDiskStat.getBytesRead() + " write(Bytes): " + vmDiskStat.getBytesWrite() + + " Read(IO): " + vmDiskStat.getIORead() + " write(IO): " + vmDiskStat.getIOWrite()); + continue; + } + + if (vmDiskStat_lock.getCurrentBytesRead() > vmDiskStat.getBytesRead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesRead() + " Stored: " + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + } + vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); + if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of bytes that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + } + vmDiskStat_lock.setCurrentBytesWrite(vmDiskStat.getBytesWrite()); + if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Read # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + } + vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); + if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { + if (s_logger.isDebugEnabled()) { + s_logger.debug("Write # of IO that's less than the last one. " + + "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + + " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + } + vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); + + if (! _dailyOrHourly) { + //update agg bytes + vmDiskStat_lock.setAggBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); + vmDiskStat_lock.setAggBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); + vmDiskStat_lock.setAggIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); + vmDiskStat_lock.setAggIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); + } + + _vmDiskStatsDao.update(vmDiskStat_lock.getId(), vmDiskStat_lock); } - vmDiskStat_lock.setNetBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); } - vmDiskStat_lock.setCurrentBytesRead(vmDiskStat.getBytesRead()); - if (vmDiskStat_lock.getCurrentBytesWrite() > vmDiskStat.getBytesWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of bytes that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + - " Reported: " + vmDiskStat.getBytesWrite() + " Stored: " + vmDiskStat_lock.getCurrentBytesWrite()); - } - vmDiskStat_lock.setNetBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); - } - vmDiskStat_lock.setCurrentBytesWrite(vmDiskStat.getBytesWrite()); - if (vmDiskStat_lock.getCurrentIORead() > vmDiskStat.getIORead()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Read # of IO that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + - " Reported: " + vmDiskStat.getIORead() + " Stored: " + vmDiskStat_lock.getCurrentIORead()); - } - vmDiskStat_lock.setNetIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); - } - vmDiskStat_lock.setCurrentIORead(vmDiskStat.getIORead()); - if (vmDiskStat_lock.getCurrentIOWrite() > vmDiskStat.getIOWrite()) { - if (s_logger.isDebugEnabled()) { - s_logger.debug("Write # of IO that's less than the last one. " + - "Assuming something went wrong and persisting it. Host: " + host.getName() + " . VM: " + vmDiskStat.getVmName() + - " Reported: " + vmDiskStat.getIOWrite() + " Stored: " + vmDiskStat_lock.getCurrentIOWrite()); - } - vmDiskStat_lock.setNetIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); - } - vmDiskStat_lock.setCurrentIOWrite(vmDiskStat.getIOWrite()); - - if (! _dailyOrHourly) { - //update agg bytes - vmDiskStat_lock.setAggBytesWrite(vmDiskStat_lock.getNetBytesWrite() + vmDiskStat_lock.getCurrentBytesWrite()); - vmDiskStat_lock.setAggBytesRead(vmDiskStat_lock.getNetBytesRead() + vmDiskStat_lock.getCurrentBytesRead()); - vmDiskStat_lock.setAggIOWrite(vmDiskStat_lock.getNetIOWrite() + vmDiskStat_lock.getCurrentIOWrite()); - vmDiskStat_lock.setAggIORead(vmDiskStat_lock.getNetIORead() + vmDiskStat_lock.getCurrentIORead()); - } - - _vmDiskStatsDao.update(vmDiskStat_lock.getId(), vmDiskStat_lock); } } - } - txn.commit(); + }); } catch (Exception e) { s_logger.warn("Error while collecting vm disk stats from hosts", e); - } finally { - txn.close(); } - } } - class StorageCollector implements Runnable { + class StorageCollector extends ManagedContextRunnable { @Override - public void run() { + protected void runInContext() { try { if (s_logger.isDebugEnabled()) { s_logger.debug("StorageCollector is running..."); diff --git a/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java b/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java index 952f724bac9..253b0ac3ec4 100644 --- a/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java +++ b/server/src/com/cloud/server/auth/DefaultUserAuthenticator.java @@ -16,10 +16,7 @@ // under the License. package com.cloud.server.auth; -import java.util.Map; - import javax.ejb.Local; -import javax.naming.ConfigurationException; import com.cloud.utils.component.AdapterBase; @@ -29,28 +26,4 @@ import com.cloud.utils.component.AdapterBase; */ @Local(value={UserAuthenticator.class}) public abstract class DefaultUserAuthenticator extends AdapterBase implements UserAuthenticator { - private String _name = null; - - @Override - public boolean configure(String name, Map params) - throws ConfigurationException { - _name = name; - return true; - } - - @Override - public String getName() { - return _name; - } - - @Override - public boolean start() { - return true; - } - - @Override - public boolean stop() { - return true; - } - } diff --git a/server/src/com/cloud/servlet/CloudStartupServlet.java b/server/src/com/cloud/servlet/CloudStartupServlet.java index 8fbae924529..4fe96aa505e 100755 --- a/server/src/com/cloud/servlet/CloudStartupServlet.java +++ b/server/src/com/cloud/servlet/CloudStartupServlet.java @@ -30,6 +30,7 @@ import com.cloud.utils.LogUtils; import com.cloud.utils.SerialVersionUID; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class CloudStartupServlet extends HttpServlet { public static final Logger s_logger = Logger.getLogger(CloudStartupServlet.class.getName()); @@ -49,7 +50,7 @@ public class CloudStartupServlet extends HttpServlet { if(ComponentContext.getApplicationContext() != null) { _timer.cancel(); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { ComponentContext.initComponentsLifeCycle(); } finally { diff --git a/server/src/com/cloud/servlet/ConsoleProxyServlet.java b/server/src/com/cloud/servlet/ConsoleProxyServlet.java index 3665486e2ad..b7f4b40d742 100644 --- a/server/src/com/cloud/servlet/ConsoleProxyServlet.java +++ b/server/src/com/cloud/servlet/ConsoleProxyServlet.java @@ -57,6 +57,7 @@ import com.cloud.utils.Pair; import com.cloud.utils.Ternary; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.vm.VirtualMachine; import com.cloud.vm.VirtualMachineManager; @@ -573,7 +574,7 @@ public class ConsoleProxyServlet extends HttpServlet { return false; // no signature, bad request } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); txn.close(); User user = null; // verify there is a user with this api key diff --git a/server/src/com/cloud/storage/OCFS2ManagerImpl.java b/server/src/com/cloud/storage/OCFS2ManagerImpl.java index 5eb9a4a5c44..505a18dc2ad 100755 --- a/server/src/com/cloud/storage/OCFS2ManagerImpl.java +++ b/server/src/com/cloud/storage/OCFS2ManagerImpl.java @@ -25,11 +25,12 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.PrepareOCFS2NodesCommand; @@ -47,9 +48,8 @@ import com.cloud.storage.Storage.StoragePoolType; import com.cloud.storage.dao.StoragePoolHostDao; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.exception.CloudRuntimeException; @Component @@ -141,11 +141,11 @@ public class OCFS2ManagerImpl extends ManagerBase implements OCFS2Manager, Resou throw new CloudRuntimeException("Cannot find cluster for ID " + clusterId); } - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getClusterId(), Op.EQ, clusterId); - sc.addAnd(sc.getEntity().getPodId(), Op.EQ, cluster.getPodId()); - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, cluster.getDataCenterId()); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.Routing); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getClusterId(), Op.EQ, clusterId); + sc.and(sc.entity().getPodId(), Op.EQ, cluster.getPodId()); + sc.and(sc.entity().getDataCenterId(), Op.EQ, cluster.getDataCenterId()); + sc.and(sc.entity().getType(), Op.EQ, Host.Type.Routing); List hosts = sc.list(); if (hosts.isEmpty()) { s_logger.debug("There is no host in cluster " + clusterId + ", no need to prepare OCFS2 nodes"); diff --git a/server/src/com/cloud/storage/StorageManagerImpl.java b/server/src/com/cloud/storage/StorageManagerImpl.java index 9bbfe989b07..adbae0544cc 100755 --- a/server/src/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/com/cloud/storage/StorageManagerImpl.java @@ -74,6 +74,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeAp import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; @@ -163,6 +164,9 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine.State; @@ -459,7 +463,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _serverId = _msServer.getId(); UpHostsInPoolSearch = _storagePoolHostDao.createSearchBuilder(Long.class); - UpHostsInPoolSearch.selectField(UpHostsInPoolSearch.entity().getHostId()); + UpHostsInPoolSearch.selectFields(UpHostsInPoolSearch.entity().getHostId()); SearchBuilder hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ); hostSearch.and("resourceState", hostSearch.entity().getResourceState(), Op.EQ); @@ -702,7 +706,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C Map updatedDetails = new HashMap(); if (tags != null) { - Map existingDetails = _storagePoolDetailsDao.getDetails(id); + Map existingDetails = _storagePoolDetailsDao.listDetailsKeyPairs(id); Set existingKeys = existingDetails.keySet(); Map existingDetailsToKeep = new HashMap(); @@ -1042,6 +1046,10 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C List snapshots = _snapshotDao.listAllByStatus(Snapshot.State.Error); for (SnapshotVO snapshotVO : snapshots) { try { + List storeRefs = _snapshotStoreDao.findBySnapshotId(snapshotVO.getId()); + for(SnapshotDataStoreVO ref : storeRefs) { + _snapshotStoreDao.expunge(ref.getId()); + } _snapshotDao.expunge(snapshotVO.getId()); } catch (Exception e) { s_logger.warn("Unable to destroy " + snapshotVO.getId(), e); @@ -1062,7 +1070,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C String sql = "SELECT volume_id from snapshots, snapshot_store_ref WHERE snapshots.id = snapshot_store_ref.snapshot_id and store_id=? GROUP BY volume_id"; List list = new ArrayList(); try { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ResultSet rs = null; PreparedStatement pstmt = null; pstmt = txn.prepareAutoCloseStatement(sql); @@ -1082,7 +1090,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C List findAllSnapshotForVolume(Long volumeId) { String sql = "SELECT backup_snap_id FROM snapshots WHERE volume_id=? and backup_snap_id is not NULL"; try { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); ResultSet rs = null; PreparedStatement pstmt = null; pstmt = txn.prepareAutoCloseStatement(sql); @@ -1239,13 +1247,36 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C return (PrimaryDataStoreInfo) dataStoreMgr.getDataStore(primaryStorage.getId(), DataStoreRole.Primary); } - protected class StorageGarbageCollector implements Runnable { + @Override + @DB + public ImageStore prepareSecondaryStorageForObjectStoreMigration(Long storeId) throws ResourceUnavailableException, InsufficientCapacityException { + // Verify that image store exists + ImageStoreVO store = _imageStoreDao.findById(storeId); + if (store == null) { + throw new InvalidParameterValueException("Image store with id " + storeId + " doesn't exist"); + } else if (!store.getProviderName().equals(DataStoreProvider.NFS_IMAGE)) { + throw new InvalidParameterValueException("We only support migrate NFS secondary storage to use object store!"); + } + _accountMgr.checkAccessAndSpecifyAuthority(CallContext.current().getCallingAccount(), store.getDataCenterId()); + + DataStoreProvider provider = dataStoreProviderMgr.getDataStoreProvider(store.getProviderName()); + DataStoreLifeCycle lifeCycle = provider.getDataStoreLifeCycle(); + DataStore secStore = dataStoreMgr.getDataStore(storeId, DataStoreRole.Image); + lifeCycle.migrateToObjectStore(secStore); + // update store_role in template_store_ref and snapshot_store_ref to ImageCache + _templateStoreDao.updateStoreRoleToCachce(storeId); + _snapshotStoreDao.updateStoreRoleToCache(storeId); + // converted to an image cache store + return (ImageStore)_dataStoreMgr.getDataStore(storeId, DataStoreRole.ImageCache); + } + + protected class StorageGarbageCollector extends ManagedContextRunnable { public StorageGarbageCollector() { } @Override - public void run() { + protected void runInContext() { try { s_logger.trace("Storage Garbage Collection Thread is running."); @@ -1715,9 +1746,20 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C // store associateCrosszoneTemplatesToZone(dcId); + // duplicate cache store records to region wide storage + if (scopeType == ScopeType.REGION) { + duplicateCacheStoreRecordsToRegionStore(store.getId()); + } + return (ImageStore) _dataStoreMgr.getDataStore(store.getId(), DataStoreRole.Image); } + private void duplicateCacheStoreRecordsToRegionStore(long storeId) { + _templateStoreDao.duplicateCacheRecordsOnRegionStore(storeId); + _snapshotStoreDao.duplicateCacheRecordsOnRegionStore(storeId); + _volumeStoreDao.duplicateCacheRecordsOnRegionStore(storeId); + } + private void associateCrosszoneTemplatesToZone(Long zoneId) { VMTemplateZoneVO tmpltZone; @@ -1749,7 +1791,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean deleteImageStore(DeleteImageStoreCmd cmd) { - long storeId = cmd.getId(); + final long storeId = cmd.getId(); // Verify that image store exists ImageStoreVO store = _imageStoreDao.findById(storeId); if (store == null) { @@ -1775,8 +1817,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } // ready to delete - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { // first delete from image_store_details table, we need to do that since // we are not actually deleting record from main // image_data_store table, so delete cascade will not work @@ -1785,7 +1828,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _volumeStoreDao.deletePrimaryRecordsForStore(storeId); _templateStoreDao.deletePrimaryRecordsForStore(storeId); _imageStoreDao.remove(storeId); - txn.commit(); + } + }); + return true; } @@ -1859,7 +1904,7 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C @Override public boolean deleteSecondaryStagingStore(DeleteSecondaryStagingStoreCmd cmd) { - long storeId = cmd.getId(); + final long storeId = cmd.getId(); // Verify that cache store exists ImageStoreVO store = _imageStoreDao.findById(storeId); if (store == null) { @@ -1884,8 +1929,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C } // ready to delete - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { // first delete from image_store_details table, we need to do that since // we are not actually deleting record from main // image_data_store table, so delete cascade will not work @@ -1894,7 +1940,9 @@ public class StorageManagerImpl extends ManagerBase implements StorageManager, C _volumeStoreDao.deletePrimaryRecordsForStore(storeId); _templateStoreDao.deletePrimaryRecordsForStore(storeId); _imageStoreDao.remove(storeId); - txn.commit(); + } + }); + return true; } diff --git a/server/src/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/com/cloud/storage/VolumeApiServiceImpl.java index dbcb9618032..61422d13802 100644 --- a/server/src/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/com/cloud/storage/VolumeApiServiceImpl.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.Map; +import java.util.HashMap; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -34,7 +35,6 @@ import org.apache.cloudstack.api.command.user.volume.DetachVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd; import org.apache.cloudstack.api.command.user.volume.MigrateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.ResizeVolumeCmd; -import org.apache.cloudstack.api.command.user.volume.UpdateVolumeCmd; import org.apache.cloudstack.api.command.user.volume.UploadVolumeCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; @@ -141,6 +141,8 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.NoTransitionException; import com.cloud.utils.fsm.StateMachine2; @@ -275,7 +277,6 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic protected VmDiskStatisticsDao _vmDiskStatsDao; @Inject protected VMSnapshotDao _vmSnapshotDao; - @Inject protected List _storagePoolAllocators; @Inject ConfigurationDao _configDao; @@ -406,11 +407,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } @DB - protected VolumeVO persistVolume(Account owner, Long zoneId, String volumeName, String url, String format) { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - + protected VolumeVO persistVolume(final Account owner, final Long zoneId, final String volumeName, final String url, final String format) { + return Transaction.execute(new TransactionCallback() { + @Override + public VolumeVO doInTransaction(TransactionStatus status) { VolumeVO volume = new VolumeVO(volumeName, zoneId, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); volume.setPoolId(null); volume.setDataCenterId(zoneId); @@ -432,9 +432,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.secondary_storage, UriUtils.getRemoteSize(url)); - txn.commit(); return volume; } + }); + } /* * Just allocate a volume in the database, don't send the createvolume cmd @@ -602,9 +603,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic userSpecifiedName = getRandomVolumeName(); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + VolumeVO volume = commitVolume(cmd, caller, ownerId, displayVolumeEnabled, zoneId, diskOfferingId, size, + minIops, maxIops, parentVolume, userSpecifiedName); + return volume; + } + + private VolumeVO commitVolume(final CreateVolumeCmd cmd, final Account caller, final long ownerId, final Boolean displayVolumeEnabled, + final Long zoneId, final Long diskOfferingId, final Long size, final Long minIops, final Long maxIops, final VolumeVO parentVolume, + final String userSpecifiedName) { + return Transaction.execute(new TransactionCallback() { + @Override + public VolumeVO doInTransaction(TransactionStatus status) { VolumeVO volume = new VolumeVO(userSpecifiedName, -1, -1, -1, -1, new Long(-1), null, null, 0, Volume.Type.DATADISK); volume.setPoolId(null); volume.setDataCenterId(zoneId); @@ -639,11 +649,10 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic // decrement it _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.volume); _resourceLimitMgr.incrementResourceCount(volume.getAccountId(), ResourceType.primary_storage, new Long(volume.getSize())); - - txn.commit(); - return volume; } + }); + } public boolean validateVolumeSizeRange(long size) { if (size < 0 || (size > 0 && size < (1024 * 1024 * 1024))) { @@ -757,7 +766,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (diskOffering.getTags() != null) { - if (!newDiskOffering.getTags().equals(diskOffering.getTags())) { + if (newDiskOffering.getTags() == null || !newDiskOffering.getTags().equals(diskOffering.getTags())) { throw new InvalidParameterValueException("Tags on new and old disk offerings must match"); } } else if (newDiskOffering.getTags() != null) { @@ -948,6 +957,13 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic AsyncCallFuture future2 = volService.expungeVolumeAsync(volOnSecondary); future2.get(); } + // delete all cache entries for this volume + List cacheVols = volFactory.listVolumeOnCache(volume.getId()); + for (VolumeInfo volOnCache : cacheVols) { + s_logger.info("Delete volume from image cache store: " + volOnCache.getDataStore().getName()); + volOnCache.delete(); + } + } catch (Exception e) { s_logger.warn("Failed to expunge volume:", e); return false; @@ -1050,8 +1066,17 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic HypervisorType rootDiskHyperType = vm.getHypervisorType(); HypervisorType dataDiskHyperType = _volsDao.getHypervisorType(volume.getId()); + + VolumeVO dataDiskVol = _volsDao.findById(volume.getId()); + StoragePoolVO dataDiskStoragePool = _storagePoolDao.findById(dataDiskVol.getPoolId()); + + // managed storage can be used for different types of hypervisors + // only perform this check if the volume's storage pool is not null and not managed + if (dataDiskStoragePool != null && !dataDiskStoragePool.isManaged()) { if (dataDiskHyperType != HypervisorType.None && rootDiskHyperType != dataDiskHyperType) { - throw new InvalidParameterValueException("Can't attach a volume created by: " + dataDiskHyperType + " to a " + rootDiskHyperType + " vm"); + throw new InvalidParameterValueException("Can't attach a volume created by: " + dataDiskHyperType + + " to a " + rootDiskHyperType + " vm"); + } } deviceId = getDeviceId(vmId, deviceId); @@ -1108,16 +1133,36 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } @Override - public Volume updateVolume(UpdateVolumeCmd cmd) { - Long volumeId = cmd.getId(); - String path = cmd.getPath(); + @ActionEvent(eventType = EventTypes.EVENT_VOLUME_UPDATE, eventDescription = "updating volume", async = true) + public Volume updateVolume(long volumeId, String path, String state, Long storageId, Boolean displayVolume) { + VolumeVO volume = _volumeDao.findById(volumeId); - if (path == null) { - throw new InvalidParameterValueException("Failed to update the volume as path was null"); + if (path != null) { + volume.setPath(path); + } + + if (displayVolume != null) { + volume.setDisplayVolume(displayVolume); + } + + if (state != null) { + try { + Volume.State volumeState = Volume.State.valueOf(state); + volume.setState(volumeState); + } + catch(IllegalArgumentException ex) { + throw new InvalidParameterValueException("Invalid volume state specified"); + } + } + + if (storageId != null) { + StoragePool pool = _storagePoolDao.findById(storageId); + if (pool.getDataCenterId() != volume.getDataCenterId()) { + throw new InvalidParameterValueException("Invalid storageId specified; refers to the pool outside of the volume's zone"); + } + volume.setPoolId(pool.getId()); } - VolumeVO volume = ApiDBUtils.findVolumeById(volumeId); - volume.setPath(path); _volumeDao.update(volumeId, volume); return volume; @@ -1194,7 +1239,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic StoragePoolVO volumePool = _storagePoolDao.findById(volume.getPoolId()); DataTO volTO = volFactory.getVolume(volume.getId()).getTO(); - DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), null, volume.getVolumeType()); + DiskTO disk = new DiskTO(volTO, volume.getDeviceId(), volume.getPath(), volume.getVolumeType()); DettachCommand cmd = new DettachCommand(disk, vm.getInstanceName()); @@ -1283,7 +1328,11 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic throw new InvalidParameterValueException("Failed to find the destination storage pool: " + storagePoolId); } - if (!_volumeMgr.volumeOnSharedStoragePool(vol)) { + if (_volumeMgr.volumeOnSharedStoragePool(vol)) { + if (destPool.isLocal()) { + throw new InvalidParameterValueException("Migration of volume from shared to local storage pool is not supported"); + } + } else { throw new InvalidParameterValueException("Migration of volume from local storage pool is not supported"); } @@ -1519,15 +1568,25 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic } if (storeForRootStoreScope.getScopeType() != storeForDataStoreScope.getScopeType()) { - if (storeForDataStoreScope.getScopeType() == ScopeType.CLUSTER && storeForRootStoreScope.getScopeType() == ScopeType.HOST) { + if (storeForDataStoreScope.getScopeType() == ScopeType.CLUSTER) { + Long vmClusterId = null; + if (storeForRootStoreScope.getScopeType() == ScopeType.HOST) { HostScope hs = (HostScope)storeForRootStoreScope; - if (storeForDataStoreScope.getScopeId().equals(hs.getClusterId())) { - return false; + vmClusterId = hs.getClusterId(); + } else if (storeForRootStoreScope.getScopeType() == ScopeType.ZONE) { + Long hostId = _vmInstanceDao.findById(rootVolumeOfVm.getInstanceId()).getHostId(); + if (hostId != null) { + HostVO host = _hostDao.findById(hostId); + vmClusterId = host.getClusterId(); + } } + if (storeForDataStoreScope.getScopeId().equals(vmClusterId)) { + return false; } - if (storeForRootStoreScope.getScopeType() == ScopeType.CLUSTER && storeForDataStoreScope.getScopeType() == ScopeType.HOST) { - HostScope hs = (HostScope)storeForDataStoreScope; - if (storeForRootStoreScope.getScopeId().equals(hs.getClusterId())) { + } else if (storeForDataStoreScope.getScopeType() == ScopeType.HOST && + (storeForRootStoreScope.getScopeType() == ScopeType.CLUSTER || storeForRootStoreScope.getScopeType() == ScopeType.ZONE)) { + Long hostId = _vmInstanceDao.findById(rootVolumeOfVm.getInstanceId()).getHostId(); + if (storeForDataStoreScope.getScopeId().equals(hostId)) { return false; } } @@ -1554,29 +1613,41 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic if (sendCommand) { volumeToAttachStoragePool = _storagePoolDao.findById(volumeToAttach.getPoolId()); - long storagePoolId = volumeToAttachStoragePool.getId(); + + HostVO host = _hostDao.findById(hostId); + + if (host.getHypervisorType() == HypervisorType.KVM && + volumeToAttachStoragePool.isManaged() && + volumeToAttach.getPath() == null) { + volumeToAttach.setPath(volumeToAttach.get_iScsiName()); + + _volsDao.update(volumeToAttach.getId(), volumeToAttach); + } DataTO volTO = volFactory.getVolume(volumeToAttach.getId()).getTO(); - DiskTO disk = new DiskTO(volTO, deviceId, null, volumeToAttach.getVolumeType()); + DiskTO disk = new DiskTO(volTO, deviceId, volumeToAttach.getPath(), volumeToAttach.getVolumeType()); AttachCommand cmd = new AttachCommand(disk, vm.getInstanceName()); - cmd.setManaged(volumeToAttachStoragePool.isManaged()); - - cmd.setStorageHost(volumeToAttachStoragePool.getHostAddress()); - cmd.setStoragePort(volumeToAttachStoragePool.getPort()); - - cmd.set_iScsiName(volumeToAttach.get_iScsiName()); - VolumeInfo volumeInfo = volFactory.getVolume(volumeToAttach.getId()); - DataStore dataStore = dataStoreMgr.getDataStore(storagePoolId, DataStoreRole.Primary); + DataStore dataStore = dataStoreMgr.getDataStore(volumeToAttachStoragePool.getId(), DataStoreRole.Primary); ChapInfo chapInfo = volService.getChapInfo(volumeInfo, dataStore); + Map details = new HashMap(); + + disk.setDetails(details); + + details.put(DiskTO.MANAGED, String.valueOf(volumeToAttachStoragePool.isManaged())); + details.put(DiskTO.STORAGE_HOST, volumeToAttachStoragePool.getHostAddress()); + details.put(DiskTO.STORAGE_PORT, String.valueOf(volumeToAttachStoragePool.getPort())); + details.put(DiskTO.VOLUME_SIZE, String.valueOf(volumeToAttach.getSize())); + details.put(DiskTO.IQN, volumeToAttach.get_iScsiName()); + if (chapInfo != null) { - cmd.setChapInitiatorUsername(chapInfo.getInitiatorUsername()); - cmd.setChapInitiatorPassword(chapInfo.getInitiatorSecret()); - cmd.setChapTargetUsername(chapInfo.getTargetUsername()); - cmd.setChapTargetPassword(chapInfo.getTargetSecret()); + details.put(DiskTO.CHAP_INITIATOR_USERNAME, chapInfo.getInitiatorUsername()); + details.put(DiskTO.CHAP_INITIATOR_SECRET, chapInfo.getInitiatorSecret()); + details.put(DiskTO.CHAP_TARGET_USERNAME, chapInfo.getTargetUsername()); + details.put(DiskTO.CHAP_TARGET_SECRET, chapInfo.getTargetSecret()); } try { @@ -1595,7 +1666,7 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic volumeToAttach = _volsDao.findById(volumeToAttach.getId()); if (volumeToAttachStoragePool.isManaged() && volumeToAttach.getPath() == null) { - volumeToAttach.setPath(answer.getDisk().getVdiUuid()); + volumeToAttach.setPath(answer.getDisk().getPath()); _volsDao.update(volumeToAttach.getId(), volumeToAttach); } @@ -1676,7 +1747,18 @@ public class VolumeApiServiceImpl extends ManagerBase implements VolumeApiServic String _customDiskOfferingMinSizeStr = _configDao.getValue(Config.CustomDiskOfferingMinSize.toString()); _customDiskOfferingMinSize = NumbersUtil.parseInt(_customDiskOfferingMinSizeStr, Integer.parseInt(Config.CustomDiskOfferingMinSize.getDefaultValue())); + String maxVolumeSizeInGbString = _configDao.getValue(Config.MaxVolumeSize.toString()); + _maxVolumeSizeInGb = NumbersUtil.parseLong(maxVolumeSizeInGbString, 2000); return true; } + public List getStoragePoolAllocators() { + return _storagePoolAllocators; + } + + @Inject + public void setStoragePoolAllocators(List storagePoolAllocators) { + _storagePoolAllocators = storagePoolAllocators; + } + } diff --git a/server/src/com/cloud/storage/download/DownloadListener.java b/server/src/com/cloud/storage/download/DownloadListener.java index e5efcb2bbd7..91ae0ae786a 100755 --- a/server/src/com/cloud/storage/download/DownloadListener.java +++ b/server/src/com/cloud/storage/download/DownloadListener.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import org.apache.log4j.Level; import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; @@ -36,6 +35,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import org.apache.cloudstack.storage.command.DownloadCommand; import org.apache.cloudstack.storage.command.DownloadCommand.ResourceType; import org.apache.cloudstack.storage.command.DownloadProgressCommand; @@ -68,7 +68,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class DownloadListener implements Listener { - private static final class StatusTask extends TimerTask { + private static final class StatusTask extends ManagedContextTimerTask { private final DownloadListener dl; private final RequestType reqType; @@ -78,13 +78,13 @@ public class DownloadListener implements Listener { } @Override - public void run() { + protected void runInContext() { dl.sendCommand(reqType); } } - private static final class TimeoutTask extends TimerTask { + private static final class TimeoutTask extends ManagedContextTimerTask { private final DownloadListener dl; public TimeoutTask( DownloadListener dl) { @@ -92,7 +92,7 @@ public class DownloadListener implements Listener { } @Override - public void run() { + protected void runInContext() { dl.checkProgress(); } } diff --git a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java index 2be0c65fb4c..e1d0e08a2f1 100755 --- a/server/src/com/cloud/storage/listener/StoragePoolMonitor.java +++ b/server/src/com/cloud/storage/listener/StoragePoolMonitor.java @@ -81,6 +81,9 @@ public class StoragePoolMonitor implements Listener { List zoneStoragePoolsByHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), scCmd.getHypervisorType()); zoneStoragePoolsByTags.retainAll(zoneStoragePoolsByHypervisor); pools.addAll(zoneStoragePoolsByTags); + List zoneStoragePoolsByAnyHypervisor = _poolDao.findZoneWideStoragePoolsByHypervisor(host.getDataCenterId(), HypervisorType.Any); + pools.addAll(zoneStoragePoolsByAnyHypervisor); + for (StoragePoolVO pool : pools) { if (pool.getStatus() != StoragePoolStatus.Up) { continue; diff --git a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java index d4463d91414..7743dca827e 100755 --- a/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java +++ b/server/src/com/cloud/storage/secondary/SecondaryStorageManagerImpl.java @@ -75,6 +75,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.Host; import com.cloud.host.HostVO; +import com.cloud.host.Status; import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.info.RunningHostCountInfo; @@ -114,9 +115,8 @@ import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.GlobalLock; +import com.cloud.utils.db.QueryBuilder; import com.cloud.utils.db.SearchCriteria.Op; -import com.cloud.utils.db.SearchCriteria2; -import com.cloud.utils.db.SearchCriteriaService; import com.cloud.utils.events.SubscriptionMgr; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @@ -169,7 +169,6 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar private int _mgmt_port = 8250; - @Inject private List _ssVmAllocators; @Inject @@ -420,9 +419,9 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar SecStorageFirewallCfgCommand thiscpc = new SecStorageFirewallCfgCommand(true); thiscpc.addPortConfig(thisSecStorageVm.getPublicIpAddress(), copyPort, true, TemplateConstants.DEFAULT_TMPLT_COPY_INTF); - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.SecondaryStorageVM); - sc.addAnd(sc.getEntity().getStatus(), Op.IN, com.cloud.host.Status.Up, com.cloud.host.Status.Connecting); + QueryBuilder sc = QueryBuilder.create(HostVO.class); + sc.and(sc.entity().getType(), Op.EQ,Host.Type.SecondaryStorageVM); + sc.and(sc.entity().getStatus(), Op.IN, Status.Up, Status.Connecting); List ssvms = sc.list(); for (HostVO ssvm : ssvms) { if (ssvm.getId() == ssAHostId) { @@ -1007,7 +1006,7 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { SecondaryStorageVmVO vm = _secStorageVmDao.findById(profile.getId()); - Map details = _vmDetailsDao.findDetails(vm.getId()); + Map details = _vmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); DataStore secStore = _dataStoreMgr.getImageStore(dest.getDataCenter().getId()); @@ -1345,12 +1344,12 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar @Override public List listUpAndConnectingSecondaryStorageVmHost(Long dcId) { - SearchCriteriaService sc = SearchCriteria2.create(HostVO.class); + QueryBuilder sc = QueryBuilder.create(HostVO.class); if (dcId != null) { - sc.addAnd(sc.getEntity().getDataCenterId(), Op.EQ, dcId); + sc.and(sc.entity().getDataCenterId(), Op.EQ,dcId); } - sc.addAnd(sc.getEntity().getState(), Op.IN, com.cloud.host.Status.Up, com.cloud.host.Status.Connecting); - sc.addAnd(sc.getEntity().getType(), Op.EQ, Host.Type.SecondaryStorageVM); + sc.and(sc.entity().getState(), Op.IN, Status.Up, Status.Connecting); + sc.and(sc.entity().getType(), Op.EQ,Host.Type.SecondaryStorageVM); return sc.list(); } @@ -1374,4 +1373,13 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar public void prepareStop(VirtualMachineProfile profile) { } + + public List getSecondaryStorageVmAllocators() { + return _ssVmAllocators; + } + + @Inject + public void setSecondaryStorageVmAllocators(List ssVmAllocators) { + this._ssVmAllocators = ssVmAllocators; + } } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java index 69ed16e026f..464f7f8b2f3 100755 --- a/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotManagerImpl.java @@ -40,6 +40,11 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotStrategy.SnapshotOperation; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; @@ -48,7 +53,6 @@ import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -69,17 +73,15 @@ import com.cloud.event.ActionEventUtils; import com.cloud.event.EventTypes; import com.cloud.event.EventVO; import com.cloud.event.UsageEventUtils; -import com.cloud.event.dao.EventDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.StorageUnavailableException; import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.projects.Project.ListProjectResourcesCriteria; import com.cloud.resource.ResourceManager; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.storage.CreateSnapshotPayload; import com.cloud.storage.DataStoreRole; import com.cloud.storage.ScopeType; @@ -140,8 +142,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject protected VMTemplateDao _templateDao; @Inject - protected HostDao _hostDao; - @Inject protected UserVmDao _vmDao; @Inject protected VolumeDao _volsDao; @@ -158,8 +158,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject protected PrimaryDataStoreDao _storagePoolDao; @Inject - protected EventDao _eventDao; - @Inject protected SnapshotPolicyDao _snapshotPolicyDao = null; @Inject protected SnapshotScheduleDao _snapshotScheduleDao; @@ -199,8 +197,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, @Inject EndPointSelector _epSelector; @Inject private ResourceManager _resourceMgr; - @Inject - protected List snapshotStrategies; + @Inject StorageStrategyFactory _storageStrategyFactory; private int _totalRetries; @@ -264,6 +261,35 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return null; } + @Override + public boolean revertSnapshot(Long snapshotId) { + Snapshot snapshot = _snapshotDao.findById(snapshotId); + if (snapshot == null) { + throw new InvalidParameterValueException("No such snapshot"); + } + + Volume volume = _volsDao.findById(snapshot.getVolumeId()); + Long instanceId = volume.getInstanceId(); + + // If this volume is attached to an VM, then the VM needs to be in the stopped state + // in order to revert the volume + if (instanceId != null) { + UserVmVO vm = _vmDao.findById(instanceId); + if (vm.getState() != State.Stopped && vm.getState() != State.Shutdowned) { + throw new InvalidParameterValueException("The VM the specified disk is attached to is not in the shutdown state."); + } + } + + SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); + + if (snapshotStrategy == null) { + s_logger.error("Unable to find snaphot strategy to handle snapshot with id '"+snapshotId+"'"); + return false; + } + + return snapshotStrategy.revertSnapshot(snapshotId); + } + @Override @DB @ActionEvent(eventType = EventTypes.EVENT_SNAPSHOT_CREATE, eventDescription = "creating snapshot", async = true) @@ -311,8 +337,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return snapshot; } - - @Override public Snapshot backupSnapshot(Long snapshotId) { SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, DataStoreRole.Image); @@ -323,97 +347,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return snapshotSrv.backupSnapshot(snapshot); } - /* - @Override - public void downloadSnapshotsFromSwift(SnapshotVO ss) { - - long volumeId = ss.getVolumeId(); - VolumeVO volume = _volsDao.findById(volumeId); - Long dcId = volume.getDataCenterId(); - Long accountId = volume.getAccountId(); - DataStore secStore = this.dataStoreMgr.getImageStore(dcId); - - Long swiftId = ss.getSwiftId(); - SwiftTO swift = _swiftMgr.getSwiftTO(swiftId); - SnapshotVO tss = ss; - List BackupUuids = new ArrayList(30); - while (true) { - BackupUuids.add(0, tss.getBackupSnapshotId()); - if (tss.getPrevSnapshotId() == 0) - break; - Long id = tss.getPrevSnapshotId(); - tss = _snapshotDao.findById(id); - assert tss != null : " can not find snapshot " + id; - } - String parent = null; - try { - for (String backupUuid : BackupUuids) { -<<<<<<< HEAD - downloadSnapshotFromSwiftCommand cmd = new downloadSnapshotFromSwiftCommand(swift, secStore.getUri(), dcId, accountId, volumeId, parent, backupUuid, _backupsnapshotwait); -======= - DownloadSnapshotFromSwiftCommand cmd = new DownloadSnapshotFromSwiftCommand(swift, secondaryStoragePoolUrl, dcId, accountId, volumeId, parent, backupUuid, _backupsnapshotwait); ->>>>>>> master - Answer answer = _agentMgr.sendToSSVM(dcId, cmd); - if ((answer == null) || !answer.getResult()) { - throw new CloudRuntimeException("downloadSnapshotsFromSwift failed "); - } - parent = backupUuid; - } - } catch (Exception e) { - throw new CloudRuntimeException("downloadSnapshotsFromSwift failed due to " + e.toString()); - } - - } - - private List determineBackupUuids(final SnapshotVO snapshot) { - - final List backupUuids = new ArrayList(); - backupUuids.add(0, snapshot.getBackupSnapshotId()); - - SnapshotVO tempSnapshot = snapshot; - while (tempSnapshot.getPrevSnapshotId() != 0) { - tempSnapshot = _snapshotDao.findById(tempSnapshot - .getPrevSnapshotId()); - backupUuids.add(0, tempSnapshot.getBackupSnapshotId()); - } - - return Collections.unmodifiableList(backupUuids); - } - - @Override - public void downloadSnapshotsFromS3(final SnapshotVO snapshot) { - - final VolumeVO volume = _volsDao.findById(snapshot.getVolumeId()); - final Long zoneId = volume.getDataCenterId(); - final DataStore secStore = this.dataStoreMgr.getImageStore(zoneId); - - final S3TO s3 = _s3Mgr.getS3TO(snapshot.getS3Id()); - final List backupUuids = determineBackupUuids(snapshot); - - try { - String parent = null; - for (final String backupUuid : backupUuids) { - final DownloadSnapshotFromS3Command cmd = new DownloadSnapshotFromS3Command( - s3, parent, secStore.getUri(), zoneId, - volume.getAccountId(), volume.getId(), backupUuid, - _backupsnapshotwait); - final Answer answer = _agentMgr.sendToSSVM(zoneId, cmd); - if ((answer == null) || !answer.getResult()) { - throw new CloudRuntimeException(String.format( - "S3 snapshot download failed due to %1$s.", - answer != null ? answer.getDetails() - : "unspecified error")); - } - parent = backupUuid; - } - } catch (Exception e) { - throw new CloudRuntimeException( - "Snapshot download from S3 failed due to " + e.toString(), - e); - } - - }*/ - @Override public SnapshotVO getParentSnapshot(VolumeInfo volume) { long preId = _snapshotDao.getLastSnapshot(volume.getId(), DataStoreRole.Primary); @@ -463,7 +396,9 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, while (snaps.size() > maxSnaps && snaps.size() > 1) { SnapshotVO oldestSnapshot = snaps.get(0); long oldSnapId = oldestSnapshot.getId(); + if (policy != null) { s_logger.debug("Max snaps: " + policy.getMaxSnaps() + " exceeded for snapshot policy with Id: " + policyId + ". Deleting oldest snapshot: " + oldSnapId); + } if(deleteSnapshot(oldSnapId)){ //log Snapshot delete event ActionEventUtils.onCompletedActionEvent(User.UID_SYSTEM, oldestSnapshot.getAccountId(), EventVO.LEVEL_INFO, EventTypes.EVENT_SNAPSHOT_DELETE, "Successfully deleted oldest snapshot: " + oldSnapId, 0); @@ -485,13 +420,12 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } _accountMgr.checkAccess(caller, null, true, snapshotCheck); - SnapshotStrategy snapshotStrategy = null; - for (SnapshotStrategy strategy : snapshotStrategies) { - if (strategy.canHandle(snapshotCheck)) { - snapshotStrategy = strategy; - break; - } + SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshotCheck, SnapshotOperation.DELETE); + if (snapshotStrategy == null) { + s_logger.error("Unable to find snaphot strategy to handle snapshot with id '"+snapshotId+"'"); + return false; } + try { boolean result = snapshotStrategy.deleteSnapshot(snapshotId); if (result) { @@ -511,7 +445,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } } - @Override public String getSecondaryStorageURL(SnapshotVO snapshot) { SnapshotDataStoreVO snapshotStore = _snapshotStoreDao.findBySnapshot(snapshot.getId(), DataStoreRole.Image); @@ -586,7 +519,7 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.Snapshot.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.Snapshot.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); @@ -637,7 +570,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return new Pair, Integer>(result.first(), result.second()); } - @Override public boolean deleteSnapshotDirsForAccount(long accountId) { @@ -676,14 +608,12 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, // Either way delete the snapshots for this volume. List snapshots = listSnapsforVolume(volumeId); for (SnapshotVO snapshot : snapshots) { - SnapshotVO snap = _snapshotDao.findById(snapshot.getId()); - SnapshotStrategy snapshotStrategy = null; - for (SnapshotStrategy strategy : snapshotStrategies) { - if (strategy.canHandle(snap)) { - snapshotStrategy = strategy; - break; - } + SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.DELETE); + if (snapshotStrategy == null) { + s_logger.error("Unable to find snaphot strategy to handle snapshot with id '"+snapshot.getId()+"'"); + continue; } + if (snapshotStrategy.deleteSnapshot(snapshot.getId())) { if (snapshot.getRecurringType() == Type.MANUAL) { _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.snapshot); @@ -904,8 +834,6 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return null; } - - private boolean hostSupportSnapsthotForVolume(HostVO host, VolumeInfo volume) { if (host.getHypervisorType() != HypervisorType.KVM) { return true; @@ -1001,33 +929,32 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, return true; } @Override + @DB public SnapshotInfo takeSnapshot(VolumeInfo volume) throws ResourceAllocationException { CreateSnapshotPayload payload = (CreateSnapshotPayload)volume.getpayload(); Long snapshotId = payload.getSnapshotId(); Account snapshotOwner = payload.getAccount(); SnapshotInfo snapshot = snapshotFactory.getSnapshot(snapshotId, volume.getDataStore()); - boolean processed = false; try { - for (SnapshotStrategy strategy : snapshotStrategies) { - if (strategy.canHandle(snapshot)) { - processed = true; - snapshot = strategy.takeSnapshot(snapshot); - break; - } - } - if (!processed) { + SnapshotStrategy snapshotStrategy = _storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.TAKE); + + if (snapshotStrategy == null) { throw new CloudRuntimeException("Can't find snapshot strategy to deal with snapshot:" + snapshotId); } + + snapshotStrategy.takeSnapshot(snapshot); + + try { postCreateSnapshot(volume.getId(), snapshotId, payload.getSnapshotPolicyId()); UsageEventUtils.publishUsageEvent(EventTypes.EVENT_SNAPSHOT_CREATE, snapshot.getAccountId(), snapshot.getDataCenterId(), snapshotId, snapshot.getName(), null, null, volume.getSize(), snapshot.getClass().getName(), snapshot.getUuid()); - - _resourceLimitMgr.incrementResourceCount(snapshotOwner.getId(), ResourceType.snapshot); - + } catch (Exception e) { + s_logger.debug("post process snapshot failed", e); + } } catch(Exception e) { s_logger.debug("Failed to create snapshot", e); if (backup) { @@ -1192,4 +1119,5 @@ public class SnapshotManagerImpl extends ManagerBase implements SnapshotManager, } return snapshot; } + } diff --git a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java index 52e20f02c08..15e9cd3438a 100644 --- a/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java +++ b/server/src/com/cloud/storage/snapshot/SnapshotSchedulerImpl.java @@ -33,11 +33,11 @@ import org.springframework.stereotype.Component; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.user.snapshot.CreateSnapshotCmd; -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.framework.jobs.dao.AsyncJobDao; import org.apache.cloudstack.framework.jobs.impl.AsyncJobVO; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import com.cloud.api.ApiDispatcher; import com.cloud.api.ApiGsonHelper; @@ -371,17 +371,14 @@ public class SnapshotSchedulerImpl extends ManagerBase implements SnapshotSchedu _testClockTimer.schedule(_testTimerTask, 100*1000L, 60*1000L); } else { - TimerTask timerTask = new TimerTask() { + TimerTask timerTask = new ManagedContextTimerTask() { @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { try { Date currentTimestamp = new Date(); poll(currentTimestamp); } catch (Throwable t) { s_logger.warn("Catch throwable in snapshot scheduler ", t); - } finally { - ServerContexts.unregisterSystemContext(); } } }; diff --git a/server/src/com/cloud/storage/upload/UploadListener.java b/server/src/com/cloud/storage/upload/UploadListener.java index 09db421617f..add58774c28 100755 --- a/server/src/com/cloud/storage/upload/UploadListener.java +++ b/server/src/com/cloud/storage/upload/UploadListener.java @@ -27,7 +27,6 @@ import javax.inject.Inject; import org.apache.log4j.Level; import org.apache.log4j.Logger; - import org.apache.cloudstack.api.command.user.iso.ExtractIsoCmd; import org.apache.cloudstack.api.command.user.template.ExtractTemplateCmd; import org.apache.cloudstack.api.command.user.volume.ExtractVolumeCmd; @@ -38,6 +37,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.jobs.JobInfo; +import org.apache.cloudstack.managed.context.ManagedContextTimerTask; import com.cloud.agent.Listener; import com.cloud.agent.api.AgentControlAnswer; @@ -63,7 +63,7 @@ import com.cloud.utils.exception.CloudRuntimeException; public class UploadListener implements Listener { - private static final class StatusTask extends TimerTask { + private static final class StatusTask extends ManagedContextTimerTask { private final UploadListener ul; private final RequestType reqType; @@ -73,13 +73,13 @@ public class UploadListener implements Listener { } @Override - public void run() { + protected void runInContext() { ul.sendCommand(reqType); } } - private static final class TimeoutTask extends TimerTask { + private static final class TimeoutTask extends ManagedContextTimerTask { private final UploadListener ul; public TimeoutTask(UploadListener ul) { @@ -87,7 +87,7 @@ public class UploadListener implements Listener { } @Override - public void run() { + protected void runInContext() { ul.checkProgress(); } } diff --git a/server/src/com/cloud/storage/upload/UploadMonitorImpl.java b/server/src/com/cloud/storage/upload/UploadMonitorImpl.java index 12378de870d..4eb4900e67d 100755 --- a/server/src/com/cloud/storage/upload/UploadMonitorImpl.java +++ b/server/src/com/cloud/storage/upload/UploadMonitorImpl.java @@ -34,13 +34,13 @@ import javax.naming.ConfigurationException; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; @@ -441,13 +441,13 @@ public class UploadMonitorImpl extends ManagerBase implements UploadMonitor { } - protected class StorageGarbageCollector implements Runnable { + protected class StorageGarbageCollector extends ManagedContextRunnable { public StorageGarbageCollector() { } @Override - public void run() { + protected void runInContext() { try { GlobalLock scanLock = GlobalLock.getInternLock("uploadmonitor.storageGC"); try { diff --git a/server/src/com/cloud/tags/TaggedResourceManagerImpl.java b/server/src/com/cloud/tags/TaggedResourceManagerImpl.java index e90481c9d79..d8d26891d86 100644 --- a/server/src/com/cloud/tags/TaggedResourceManagerImpl.java +++ b/server/src/com/cloud/tags/TaggedResourceManagerImpl.java @@ -25,15 +25,13 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.vm.dao.NicDao; -import com.cloud.network.vpc.NetworkACLItemDao; - +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.context.CallContext; - import com.cloud.api.query.dao.ResourceTagJoinDao; +import com.cloud.dc.dao.DataCenterDao; import com.cloud.domain.Domain; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; @@ -46,12 +44,14 @@ import com.cloud.network.dao.NetworkDao; import com.cloud.network.dao.RemoteAccessVpnDao; import com.cloud.network.rules.dao.PortForwardingRulesDao; import com.cloud.network.security.dao.SecurityGroupDao; +import com.cloud.network.vpc.NetworkACLItemDao; import com.cloud.network.vpc.dao.StaticRouteDao; import com.cloud.network.vpc.dao.VpcDao; import com.cloud.projects.dao.ProjectDao; import com.cloud.server.ResourceTag; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.server.TaggedResourceService; +import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VolumeDao; @@ -67,8 +67,11 @@ import com.cloud.utils.db.GenericDao; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.uuididentity.dao.IdentityDao; +import com.cloud.vm.dao.NicDao; import com.cloud.vm.dao.UserVmDao; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @@ -78,8 +81,8 @@ import com.cloud.vm.snapshot.dao.VMSnapshotDao; public class TaggedResourceManagerImpl extends ManagerBase implements TaggedResourceService { public static final Logger s_logger = Logger.getLogger(TaggedResourceManagerImpl.class); - private static Map> _daoMap= - new HashMap>(); + private static Map> _daoMap= + new HashMap>(); @Inject AccountManager _accountMgr; @@ -125,28 +128,38 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso NicDao _nicDao; @Inject NetworkACLItemDao _networkACLItemDao; + @Inject + DataCenterDao _dataCenterDao; + @Inject + ServiceOfferingDao _serviceOffDao; + @Inject + PrimaryDataStoreDao _storagePoolDao; @Override public boolean configure(String name, Map params) throws ConfigurationException { - _daoMap.put(TaggedResourceType.UserVm, _userVmDao); - _daoMap.put(TaggedResourceType.Volume, _volumeDao); - _daoMap.put(TaggedResourceType.Template, _templateDao); - _daoMap.put(TaggedResourceType.ISO, _templateDao); - _daoMap.put(TaggedResourceType.Snapshot, _snapshotDao); - _daoMap.put(TaggedResourceType.Network, _networkDao); - _daoMap.put(TaggedResourceType.LoadBalancer, _lbDao); - _daoMap.put(TaggedResourceType.PortForwardingRule, _pfDao); - _daoMap.put(TaggedResourceType.FirewallRule, _firewallDao); - _daoMap.put(TaggedResourceType.SecurityGroup, _securityGroupDao); - _daoMap.put(TaggedResourceType.PublicIpAddress, _publicIpDao); - _daoMap.put(TaggedResourceType.Project, _projectDao); - _daoMap.put(TaggedResourceType.Vpc, _vpcDao); - _daoMap.put(TaggedResourceType.Nic, _nicDao); - _daoMap.put(TaggedResourceType.NetworkACL, _networkACLItemDao); - _daoMap.put(TaggedResourceType.StaticRoute, _staticRouteDao); - _daoMap.put(TaggedResourceType.VMSnapshot, _vmSnapshotDao); - _daoMap.put(TaggedResourceType.RemoteAccessVpn, _vpnDao); + _daoMap.put(ResourceObjectType.UserVm, _userVmDao); + _daoMap.put(ResourceObjectType.Volume, _volumeDao); + _daoMap.put(ResourceObjectType.Template, _templateDao); + _daoMap.put(ResourceObjectType.ISO, _templateDao); + _daoMap.put(ResourceObjectType.Snapshot, _snapshotDao); + _daoMap.put(ResourceObjectType.Network, _networkDao); + _daoMap.put(ResourceObjectType.LoadBalancer, _lbDao); + _daoMap.put(ResourceObjectType.PortForwardingRule, _pfDao); + _daoMap.put(ResourceObjectType.FirewallRule, _firewallDao); + _daoMap.put(ResourceObjectType.SecurityGroup, _securityGroupDao); + _daoMap.put(ResourceObjectType.PublicIpAddress, _publicIpDao); + _daoMap.put(ResourceObjectType.Project, _projectDao); + _daoMap.put(ResourceObjectType.Vpc, _vpcDao); + _daoMap.put(ResourceObjectType.Nic, _nicDao); + _daoMap.put(ResourceObjectType.NetworkACL, _networkACLItemDao); + _daoMap.put(ResourceObjectType.StaticRoute, _staticRouteDao); + _daoMap.put(ResourceObjectType.VMSnapshot, _vmSnapshotDao); + _daoMap.put(ResourceObjectType.RemoteAccessVpn, _vpnDao); + _daoMap.put(ResourceObjectType.Zone, _dataCenterDao); + _daoMap.put(ResourceObjectType.ServiceOffering, _serviceOffDao); + _daoMap.put(ResourceObjectType.Storage, _storagePoolDao); + return true; } @@ -162,7 +175,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso } @Override - public Long getResourceId(String resourceId, TaggedResourceType resourceType) { + public long getResourceId(String resourceId, ResourceObjectType resourceType) { GenericDao dao = _daoMap.get(resourceType); if (dao == null) { throw new CloudRuntimeException("Dao is not loaded for the resource type " + resourceType); @@ -192,14 +205,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso } return identityId; } - - protected String getTableName(TaggedResourceType resourceType) { - GenericDao dao = _daoMap.get(resourceType); - Class claz = DbUtil.getEntityBeanType(dao); - return DbUtil.getTableName(claz); - } - private Pair getAccountDomain(long resourceId, TaggedResourceType resourceType) { + + private Pair getAccountDomain(long resourceId, ResourceObjectType resourceType) { Pair pair = null; GenericDao dao = _daoMap.get(resourceType); @@ -235,9 +243,9 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso } @Override - public TaggedResourceType getResourceType(String resourceTypeStr) { + public ResourceObjectType getResourceType(String resourceTypeStr) { - for (TaggedResourceType type : ResourceTag.TaggedResourceType.values()) { + for (ResourceObjectType type : ResourceTag.ResourceObjectType.values()) { if (type.toString().equalsIgnoreCase(resourceTypeStr)) { return type; } @@ -248,60 +256,59 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso @Override @DB @ActionEvent(eventType = EventTypes.EVENT_TAGS_CREATE, eventDescription = "creating resource tags") - public List createTags(List resourceIds, TaggedResourceType resourceType, - Map tags, String customer) { - Account caller = CallContext.current().getCallingAccount(); + public List createTags(final List resourceIds, final ResourceObjectType resourceType, + final Map tags, final String customer) { + final Account caller = CallContext.current().getCallingAccount(); - List resourceTags = new ArrayList(tags.size()); + final List resourceTags = new ArrayList(tags.size()); - Transaction txn = Transaction.currentTxn(); - txn.start(); - - for (String key : tags.keySet()) { - for (String resourceId : resourceIds) { - Long id = getResourceId(resourceId, resourceType); - String resourceUuid = getUuid(resourceId, resourceType); - - //check if object exists - if (_daoMap.get(resourceType).findById(id) == null) { - throw new InvalidParameterValueException("Unable to find resource by id " + resourceId + - " and type " + resourceType); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (String key : tags.keySet()) { + for (String resourceId : resourceIds) { + if (!resourceType.resourceTagsSupport()) { + throw new InvalidParameterValueException("The resource type " + resourceType + " doesn't support resource tags"); + } + + long id = getResourceId(resourceId, resourceType); + String resourceUuid = getUuid(resourceId, resourceType); + + Pair accountDomainPair = getAccountDomain(id, resourceType); + Long domainId = accountDomainPair.second(); + Long accountId = accountDomainPair.first(); + if (accountId != null) { + _accountMgr.checkAccess(caller, null, false, _accountMgr.getAccount(accountId)); + } else if (domainId != null && caller.getType() != Account.ACCOUNT_TYPE_NORMAL) { + //check permissions; + _accountMgr.checkAccess(caller, _domainMgr.getDomain(domainId)); + } else { + throw new PermissionDeniedException("Account " + caller + " doesn't have permissions to create tags" + + " for resource " + key); + } + + String value = tags.get(key); + + if (value == null || value.isEmpty()) { + throw new InvalidParameterValueException("Value for the key " + key + " is either null or empty"); + } + + ResourceTagVO resourceTag = new ResourceTagVO(key, value, accountDomainPair.first(), + accountDomainPair.second(), + id, resourceType, customer, resourceUuid); + resourceTag = _resourceTagDao.persist(resourceTag); + resourceTags.add(resourceTag); + } } - - Pair accountDomainPair = getAccountDomain(id, resourceType); - Long domainId = accountDomainPair.second(); - Long accountId = accountDomainPair.first(); - if (accountId != null) { - _accountMgr.checkAccess(caller, null, false, _accountMgr.getAccount(accountId)); - } else if (domainId != null && caller.getType() != Account.ACCOUNT_TYPE_NORMAL) { - //check permissions; - _accountMgr.checkAccess(caller, _domainMgr.getDomain(domainId)); - } else { - throw new PermissionDeniedException("Account " + caller + " doesn't have permissions to create tags" + - " for resource " + key); - } - - String value = tags.get(key); - - if (value == null || value.isEmpty()) { - throw new InvalidParameterValueException("Value for the key " + key + " is either null or empty"); - } - - ResourceTagVO resourceTag = new ResourceTagVO(key, value, accountDomainPair.first(), - accountDomainPair.second(), - id, resourceType, customer, resourceUuid); - resourceTag = _resourceTagDao.persist(resourceTag); - resourceTags.add(resourceTag); } - } - - txn.commit(); - + }); + return resourceTags; } + @Override - public String getUuid(String resourceId, TaggedResourceType resourceType) { + public String getUuid(String resourceId, ResourceObjectType resourceType) { GenericDao dao = _daoMap.get(resourceType); Class claz = DbUtil.getEntityBeanType(dao); @@ -333,7 +340,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso @Override @DB @ActionEvent(eventType = EventTypes.EVENT_TAGS_DELETE, eventDescription = "deleting resource tags") - public boolean deleteTags(List resourceIds, TaggedResourceType resourceType, Map tags) { + public boolean deleteTags(List resourceIds, ResourceObjectType resourceType, Map tags) { Account caller = CallContext.current().getCallingAccount(); SearchBuilder sb = _resourceTagDao.createSearchBuilder(); @@ -348,7 +355,7 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso sc.setParameters("resourceType", resourceType); List resourceTags = _resourceTagDao.search(sc, null);; - List tagsToRemove = new ArrayList(); + final List tagsToRemove = new ArrayList(); // Finalize which tags should be removed for (ResourceTag resourceTag : resourceTags) { @@ -384,20 +391,22 @@ public class TaggedResourceManagerImpl extends ManagerBase implements TaggedReso } //Remove the tags - Transaction txn = Transaction.currentTxn(); - txn.start(); - for (ResourceTag tagToRemove : tagsToRemove) { - _resourceTagDao.remove(tagToRemove.getId()); - s_logger.debug("Removed the tag " + tagToRemove); - } - txn.commit(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (ResourceTag tagToRemove : tagsToRemove) { + _resourceTagDao.remove(tagToRemove.getId()); + s_logger.debug("Removed the tag " + tagToRemove); + } + } + }); return true; } @Override - public List listByResourceTypeAndId(TaggedResourceType type, long resourceId) { + public List listByResourceTypeAndId(ResourceObjectType type, long resourceId) { return _resourceTagDao.listBy(resourceId, type); } } diff --git a/server/src/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/com/cloud/template/HypervisorTemplateAdapter.java index 00e62225b62..b33a19225ef 100755 --- a/server/src/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/com/cloud/template/HypervisorTemplateAdapter.java @@ -25,6 +25,8 @@ import java.util.concurrent.ExecutionException; import javax.ejb.Local; import javax.inject.Inject; +import org.apache.log4j.Logger; + import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; @@ -44,7 +46,6 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; -import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; import com.cloud.alert.AlertManager; @@ -56,11 +57,11 @@ import com.cloud.event.UsageEventUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; import com.cloud.org.Grouping; +import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; import com.cloud.storage.TemplateProfile; import com.cloud.storage.VMTemplateStorageResourceAssoc.Status; -import com.cloud.storage.ScopeType; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VMTemplateZoneVO; import com.cloud.storage.dao.VMTemplateZoneDao; @@ -182,7 +183,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { } // find all eligible image stores for this zone scope - List imageStores = this.storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId())); + List imageStores = storeMgr.getImageStoresByScope(new ZoneScope(profile.getZoneId())); if ( imageStores == null || imageStores.size() == 0 ){ throw new CloudRuntimeException("Unable to find image store to download template "+ profile.getTemplate()); } @@ -205,12 +206,12 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { } } - TemplateInfo tmpl = this.imageFactory.getTemplate(template.getId(), imageStore); + TemplateInfo tmpl = imageFactory.getTemplate(template.getId(), imageStore); CreateTemplateContext context = new CreateTemplateContext(null, tmpl); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().createTemplateAsyncCallBack(null, null)); caller.setContext(context); - this.imageService.createTemplateAsync(tmpl, imageStore, caller); + imageService.createTemplateAsync(tmpl, imageStore, caller); if( !(profile.getIsPublic() || profile.getFeatured()) ){ // If private template then break break; } @@ -237,7 +238,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { // populated template entry _tmpltDao.remove(template.getId()); } else { - VMTemplateVO tmplt = this._tmpltDao.findById(template.getId()); + VMTemplateVO tmplt = _tmpltDao.findById(template.getId()); long accountId = tmplt.getAccountId(); if (template.getSize() != null) { // publish usage event @@ -283,7 +284,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { VMTemplateVO template = profile.getTemplate(); // find all eligible image stores for this template - List imageStores = this.templateMgr.getImageStoreByTemplate(template.getId(), profile.getZoneId()); + List imageStores = templateMgr.getImageStoreByTemplate(template.getId(), profile.getZoneId()); if (imageStores == null || imageStores.size() == 0) { // already destroyed on image stores s_logger.info("Unable to find image store still having template: " + template.getName() @@ -321,7 +322,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { } s_logger.info("Delete template from image store: " + imageStore.getName()); - AsyncCallFuture future = this.imageService.deleteTemplateAsync(this.imageFactory + AsyncCallFuture future = imageService.deleteTemplateAsync(imageFactory .getTemplate(template.getId(), imageStore)); try { TemplateApiResult result = future.get(); @@ -350,9 +351,15 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { } } if (success) { + // delete all cache entries for this template + List cacheTmpls = imageFactory.listTemplateOnCache(template.getId()); + for (TemplateInfo tmplOnCache : cacheTmpls) { + s_logger.info("Delete template from image cache store: " + tmplOnCache.getDataStore().getName()); + tmplOnCache.delete(); + } // find all eligible image stores for this template - List iStores = this.templateMgr.getImageStoreByTemplate(template.getId(), null); + List iStores = templateMgr.getImageStoreByTemplate(template.getId(), null); if (iStores == null || iStores.size() == 0) { // remove template from vm_templates table if (_tmpltDao.remove(template.getId())) { @@ -380,7 +387,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { throw new InvalidParameterValueException("The DomR template cannot be deleted."); } - if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) { + if (zoneId != null && (storeMgr.getImageStore(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone."); } @@ -392,7 +399,7 @@ public class HypervisorTemplateAdapter extends TemplateAdapterBase { TemplateProfile profile = super.prepareDelete(cmd); Long zoneId = profile.getZoneId(); - if (zoneId != null && (this.storeMgr.getImageStore(zoneId) == null)) { + if (zoneId != null && (storeMgr.getImageStore(zoneId) == null)) { throw new InvalidParameterValueException("Failed to find a secondary storage in the specified zone."); } diff --git a/server/src/com/cloud/template/TemplateAdapterBase.java b/server/src/com/cloud/template/TemplateAdapterBase.java index 9485b2a8824..18fbbe71328 100755 --- a/server/src/com/cloud/template/TemplateAdapterBase.java +++ b/server/src/com/cloud/template/TemplateAdapterBase.java @@ -18,6 +18,7 @@ package com.cloud.template; import java.util.List; import java.util.Map; +import java.util.HashMap; import javax.inject.Inject; @@ -36,6 +37,7 @@ import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import com.cloud.api.ApiDBUtils; +import com.cloud.configuration.Config; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; @@ -45,6 +47,7 @@ import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; import com.cloud.exception.ResourceAllocationException; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.org.Grouping; import com.cloud.projects.ProjectManager; @@ -207,6 +210,20 @@ public abstract class TemplateAdapterBase extends AdapterBase implements Templat } } + if (hypervisorType.equals(Hypervisor.HypervisorType.XenServer) ) { + if( details == null || !details.containsKey("hypervisortoolsversion") + || details.get("hypervisortoolsversion") == null + || ((String)details.get("hypervisortoolsversion")).equalsIgnoreCase("none") ) { + String hpvs = _configDao.getValue(Config.XenPVdriverVersion.key()); + if ( hpvs != null) { + if ( details == null ) { + details = new HashMap(); + } + details.put("hypervisortoolsversion", hpvs); + } + } + } + Long id = _tmpltDao.getNextInSequence(Long.class, "id"); CallContext.current().setEventDetails("Id: " + id + " name: " + name); return new TemplateProfile(id, userId, name, displayText, bits, passwordEnabled, requiresHVM, url, isPublic, featured, isExtractable, imgfmt, guestOSId, zoneId, diff --git a/server/src/com/cloud/template/TemplateManagerImpl.java b/server/src/com/cloud/template/TemplateManagerImpl.java index a1e20b9f0b1..d40a01f3137 100755 --- a/server/src/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/com/cloud/template/TemplateManagerImpl.java @@ -52,7 +52,6 @@ import org.apache.cloudstack.api.command.user.template.UpdateTemplateCmd; import org.apache.cloudstack.api.command.user.template.UpdateTemplatePermissionsCmd; import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; @@ -73,6 +72,7 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.command.AttachCommand; import org.apache.cloudstack.storage.command.CommandResult; import org.apache.cloudstack.storage.command.DettachCommand; @@ -135,6 +135,7 @@ import com.cloud.storage.StoragePoolHostVO; import com.cloud.storage.StoragePoolStatus; import com.cloud.storage.TemplateProfile; import com.cloud.storage.Upload; +import com.cloud.storage.VMTemplateDetailVO; import com.cloud.storage.VMTemplateHostVO; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateStorageResourceAssoc; @@ -176,6 +177,8 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.concurrency.NamedThreadFactory; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.UserVmManager; import com.cloud.vm.UserVmVO; @@ -291,7 +294,6 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, boolean _disableExtraction = false; ExecutorService _preloadExecutor; - @Inject protected List _adapters; @Inject @@ -479,9 +481,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("The " + desc + " has not been downloaded "); } - DataObject templateObject = _tmplFactory.getTemplate(templateId, tmpltStore); + // Handle NFS to S3 object store migration case, we trigger template sync from NFS to S3 during extract template or copy template + _tmpltSvr.syncTemplateToRegionStore(templateId, tmpltStore); - return tmpltStore.createEntityExtractUrl(tmpltStoreRef.getInstallPath(), template.getFormat(), templateObject); + TemplateInfo templateObject = _tmplFactory.getTemplate(templateId, tmpltStore); + + return tmpltStore.createEntityExtractUrl(templateObject.getInstallPath(), template.getFormat(), templateObject); } public void prepareTemplateInAllStoragePools(final VMTemplateVO template, long zoneId) { @@ -489,9 +494,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, for (final StoragePoolVO pool : pools) { if (pool.getDataCenterId() == zoneId) { s_logger.info("Schedule to preload template " + template.getId() + " into primary storage " + pool.getId()); - _preloadExecutor.execute(new Runnable() { + _preloadExecutor.execute(new ManagedContextRunnable() { @Override - public void run() { + protected void runInContext() { try { reallyRun(); } catch (Throwable e) { @@ -698,7 +703,15 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, throw new InvalidParameterValueException("Unable to find template with id"); } + DataStore srcSecStore = getImageStore(sourceZoneId, templateId); + if (srcSecStore == null) { + throw new InvalidParameterValueException("There is no template " + templateId + " in zone " + sourceZoneId); + } + if (template.isCrossZones()){ + //TODO: we may need UI still enable CopyTemplate in case of cross zone template to trigger sync to region store. + // sync template from cache store to region store if it is not there, for cases where we are going to migrate existing NFS to S3. + _tmpltSvr.syncTemplateToRegionStore(templateId, srcSecStore); s_logger.debug("Template " + templateId + " is cross-zone, don't need to copy"); return template; } @@ -710,11 +723,6 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return template; } - DataStore srcSecStore = getImageStore(sourceZoneId, templateId); - if (srcSecStore == null) { - throw new InvalidParameterValueException("There is no template " + templateId + " in zone " + sourceZoneId); - } - _accountMgr.checkAccess(caller, AccessType.ModifyEntry, true, template); boolean success = copy(userId, template, srcSecStore, dstZone); @@ -770,7 +778,17 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, } @Override + @DB public void evictTemplateFromStoragePool(VMTemplateStoragePoolVO templatePoolVO) { + //Need to hold the lock, otherwise, another thread may create a volume from the template at the same time. + //Assumption here is that, we will hold the same lock during create volume from template + VMTemplateStoragePoolVO templatePoolRef = _tmpltPoolDao.acquireInLockTable(templatePoolVO.getId()); + if (templatePoolRef == null) { + s_logger.debug("can't aquire the lock for template pool ref:" + templatePoolVO.getId()); + return; + } + + try { StoragePool pool = (StoragePool) _dataStoreMgr.getPrimaryDataStore(templatePoolVO.getPoolId()); VMTemplateVO template = _tmpltDao.findByIdIncludingRemoved(templatePoolVO.getTemplateId()); @@ -794,6 +812,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, s_logger.info("Storage is unavailable currently. Will retry evicte template: " + template.getName() + " from storage pool: " + pool.getName()); } + } finally { + _tmpltPoolDao.releaseFromLockTable(templatePoolRef.getId()); + } } @@ -1150,11 +1171,9 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, @DB @Override public boolean updateTemplateOrIsoPermissions(BaseUpdateTemplateOrIsoPermissionsCmd cmd) { - Transaction txn = Transaction.currentTxn(); - // Input validation - Long id = cmd.getId(); - Account caller = CallContext.current().getCallingAccount(); + final Long id = cmd.getId(); + final Account caller = CallContext.current().getCallingAccount(); List accountNames = cmd.getAccountNames(); List projectIds = cmd.getProjectIds(); Boolean isFeatured = cmd.isFeatured(); @@ -1271,10 +1290,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, //Derive the domain id from the template owner as updateTemplatePermissions is not cross domain operation Account owner = _accountMgr.getAccount(ownerId); - Domain domain = _domainDao.findById(owner.getDomainId()); + final Domain domain = _domainDao.findById(owner.getDomainId()); if ("add".equalsIgnoreCase(operation)) { - txn.start(); - for (String accountName : accountNames) { + final List accountNamesFinal = accountNames; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + for (String accountName : accountNamesFinal) { Account permittedAccount = _accountDao.findActiveAccount(accountName, domain.getId()); if (permittedAccount != null) { if (permittedAccount.getId() == caller.getId()) { @@ -1287,12 +1309,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, _launchPermissionDao.persist(launchPermission); } } else { - txn.rollback(); throw new InvalidParameterValueException("Unable to grant a launch permission to account " + accountName + " in domain id=" + domain.getUuid() + ", account not found. " + "No permissions updated, please verify the account names and retry."); } } - txn.commit(); + } + }); } else if ("remove".equalsIgnoreCase(operation)) { List accountIds = new ArrayList(); for (String accountName : accountNames) { @@ -1322,11 +1344,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, if (userId == null) { userId = User.UID_SYSTEM; } - long templateId = command.getEntityId(); + final long templateId = command.getEntityId(); Long volumeId = command.getVolumeId(); Long snapshotId = command.getSnapshotId(); VMTemplateVO privateTemplate = null; - Long accountId = null; + final Long accountId = null; SnapshotVO snapshot = null; VolumeVO volume = null; @@ -1376,16 +1398,23 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, //getting the prent volume long parentVolumeId=_snapshotDao.findById(snapshotId).getVolumeId(); VolumeVO parentVolume = _volumeDao.findById(parentVolumeId); - if (parentVolume.getIsoId() != null) { + + if (parentVolume != null && parentVolume.getIsoId() != null && parentVolume.getIsoId() != 0) { privateTemplate.setSourceTemplateId(parentVolume.getIsoId()); _tmpltDao.update(privateTemplate.getId(), privateTemplate); + } else if (parentVolume != null && parentVolume.getTemplateId() != null) { + privateTemplate.setSourceTemplateId(parentVolume.getTemplateId()); + _tmpltDao.update(privateTemplate.getId(), privateTemplate); } } else if (volumeId != null) { VolumeVO parentVolume = _volumeDao.findById(volumeId); - if (parentVolume.getIsoId() != null) { + if (parentVolume.getIsoId() != null && parentVolume.getIsoId() != 0) { privateTemplate.setSourceTemplateId(parentVolume.getIsoId()); _tmpltDao.update(privateTemplate.getId(), privateTemplate); + } else if (parentVolume.getTemplateId() != null) { + privateTemplate.setSourceTemplateId(parentVolume.getTemplateId()); + _tmpltDao.update(privateTemplate.getId(), privateTemplate); } } TemplateDataStoreVO srcTmpltStore = _tmplStoreDao.findByStoreTemplate(store.getId(), templateId); @@ -1408,8 +1437,11 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, zoneId, accountId, volumeId); }*/ if (privateTemplate == null) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + final VolumeVO volumeFinal = volume; + final SnapshotVO snapshotFinal = snapshot; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { // template_store_ref entries should have been removed using our // DataObject.processEvent command in case of failure, but clean // it up here to avoid @@ -1424,10 +1456,12 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, // decrement resource count if (accountId != null) { _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.template); - _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.secondary_storage, new Long(volume != null ? volume.getSize() - : snapshot.getSize())); + _resourceLimitMgr.decrementResourceCount(accountId, ResourceType.secondary_storage, new Long(volumeFinal != null ? volumeFinal.getSize() + : snapshotFinal.getSize())); } - txn.commit(); + } + }); + } } @@ -1594,8 +1628,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, VMTemplateVO template = _tmpltDao.persist(privateTemplate); // Increment the number of templates if (template != null) { - if (cmd.getDetails() != null) { - _templateDetailsDao.persist(template.getId(), cmd.getDetails()); + Map detailsStr = cmd.getDetails(); + if (detailsStr != null) { + List details = new ArrayList(); + for (String key : detailsStr.keySet()) { + details.add(new VMTemplateDetailVO(template.getId(), key, detailsStr.get(key))); + } + _templateDetailsDao.saveDetails(details); } _resourceLimitMgr.incrementResourceCount(templateOwner.getId(), ResourceType.template); @@ -1676,6 +1715,7 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, return stores; } + @Override public VMTemplateVO updateTemplate(UpdateIsoCmd cmd) { return updateTemplateOrIso(cmd); @@ -1798,4 +1838,13 @@ public class TemplateManagerImpl extends ManagerBase implements TemplateManager, public ConfigKey[] getConfigKeys() { return new ConfigKey[] {AllowPublicUserTemplates}; } + + public List getTemplateAdapters() { + return _adapters; + } + + @Inject + public void setTemplateAdapters(List adapters) { + _adapters = adapters; + } } diff --git a/server/src/com/cloud/test/DatabaseConfig.java b/server/src/com/cloud/test/DatabaseConfig.java index 38a1abf7542..9370218be0c 100755 --- a/server/src/com/cloud/test/DatabaseConfig.java +++ b/server/src/com/cloud/test/DatabaseConfig.java @@ -57,6 +57,11 @@ import com.cloud.utils.PropertiesUtil; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionLegacy; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.net.NfsUtils; public class DatabaseConfig { @@ -407,34 +412,32 @@ public class DatabaseConfig { @DB protected void doConfig() { - Transaction txn = Transaction.currentTxn(); try { - - File configFile = new File(_configFileName); + final File configFile = new File(_configFileName); SAXParserFactory spfactory = SAXParserFactory.newInstance(); - SAXParser saxParser = spfactory.newSAXParser(); - DbConfigXMLHandler handler = new DbConfigXMLHandler(); + final SAXParser saxParser = spfactory.newSAXParser(); + final DbConfigXMLHandler handler = new DbConfigXMLHandler(); handler.setParent(this); - txn.start(); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws Exception { + // Save user configured values for all fields + saxParser.parse(configFile, handler); + + // Save default values for configuration fields + saveVMTemplate(); + saveRootDomain(); + saveDefaultConfiguations(); + } + }); - // Save user configured values for all fields - saxParser.parse(configFile, handler); - - // Save default values for configuration fields - saveVMTemplate(); - saveRootDomain(); - saveDefaultConfiguations(); - - txn.commit(); // Check pod CIDRs against each other, and against the guest ip network/netmask pzc.checkAllPodCidrSubnets(); - } catch (Exception ex) { System.out.print("ERROR IS"+ex); s_logger.error("error", ex); - txn.rollback(); } } @@ -486,7 +489,7 @@ public class DatabaseConfig { String insertSql1 = "INSERT INTO `host` (`id`, `name`, `status` , `type` , `private_ip_address`, `private_netmask` ,`private_mac_address` , `storage_ip_address` ,`storage_netmask`, `storage_mac_address`, `data_center_id`, `version`, `dom0_memory`, `last_ping`, `resource`, `guid`, `hypervisor_type`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; String insertSqlHostDetails = "INSERT INTO `host_details` (`id`, `host_id`, `name`, `value`) VALUES(?,?,?,?)"; String insertSql2 = "INSERT INTO `op_host` (`id`, `sequence`) VALUES(?, ?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); stmt.setLong(1, 0); @@ -563,7 +566,7 @@ public class DatabaseConfig { String hypervisor = _currentObjectParams.get("hypervisorType"); String insertSql1 = "INSERT INTO `cluster` (`id`, `name`, `data_center_id` , `pod_id`, `hypervisor_type` , `cluster_type`, `allocation_state`) VALUES (?,?,?,?,?,?,?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); stmt.setLong(1, id); @@ -599,7 +602,7 @@ public class DatabaseConfig { String insertSql1 = "INSERT INTO `storage_pool` (`id`, `name`, `uuid` , `pool_type` , `port`, `data_center_id` ,`available_bytes` , `capacity_bytes` ,`host_address`, `path`, `created`, `pod_id`,`status` , `cluster_id`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; // String insertSql2 = "INSERT INTO `netfs_storage_pool` VALUES (?,?,?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); stmt.setLong(1, id); @@ -704,7 +707,7 @@ public class DatabaseConfig { "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); stmt.setLong(1, id); @@ -742,7 +745,7 @@ public class DatabaseConfig { String insertSql1 = "INSERT INTO `virtual_router_providers` (`id`, `nsp_id`, `uuid` , `type` , `enabled`) " + "VALUES (?,?,?,?,?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql1); stmt.setLong(1, id); @@ -1030,7 +1033,7 @@ public class DatabaseConfig { String insertNWRateSql = "UPDATE `cloud`.`service_offering` SET `nw_rate` = ?"; String insertMCRateSql = "UPDATE `cloud`.`service_offering` SET `mc_rate` = ?"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt; @@ -1109,7 +1112,7 @@ public class DatabaseConfig { protected void saveUser() { // insert system account String insertSql = "INSERT INTO `cloud`.`account` (id, account_name, type, domain_id) VALUES (1, 'system', '1', '1')"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -1120,7 +1123,7 @@ public class DatabaseConfig { // insert system user insertSql = "INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname, lastname, created)" + " VALUES (1, 'system', RAND(), 1, 'system', 'cloud', now())"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -1159,7 +1162,7 @@ public class DatabaseConfig { // create an account for the admin user first insertSql = "INSERT INTO `cloud`.`account` (id, account_name, type, domain_id) VALUES (" + id + ", '" + username + "', '1', '1')"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -1171,7 +1174,7 @@ public class DatabaseConfig { insertSql = "INSERT INTO `cloud`.`user` (id, username, password, account_id, firstname, lastname, email, created) " + "VALUES (" + id + ",'" + username + "','" + sb.toString() + "', 2, '" + firstname + "','" + lastname + "','" + email + "',now())"; - txn = Transaction.currentTxn(); + txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -1227,7 +1230,7 @@ public class DatabaseConfig { String selectSql = "SELECT name FROM cloud.configuration WHERE name = '" + name + "'"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet result = stmt.executeQuery(); @@ -1270,7 +1273,7 @@ public class DatabaseConfig { @DB protected void saveRootDomain() { String insertSql = "insert into `cloud`.`domain` (id, name, parent, owner, path, level) values (1, 'ROOT', NULL, 2, '/', 0)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertSql); stmt.executeUpdate(); @@ -1377,7 +1380,7 @@ public class DatabaseConfig { } public static String getDatabaseValueString(String selectSql, String name, String errorMsg) { - Transaction txn = Transaction.open("getDatabaseValueString"); + TransactionLegacy txn = TransactionLegacy.open("getDatabaseValueString"); PreparedStatement stmt = null; try { @@ -1399,7 +1402,7 @@ public class DatabaseConfig { } public static long getDatabaseValueLong(String selectSql, String name, String errorMsg) { - Transaction txn = Transaction.open("getDatabaseValueLong"); + TransactionLegacy txn = TransactionLegacy.open("getDatabaseValueLong"); PreparedStatement stmt = null; try { @@ -1420,7 +1423,7 @@ public class DatabaseConfig { } public static void saveSQL(String sql, String errorMsg) { - Transaction txn = Transaction.open("saveSQL"); + TransactionLegacy txn = TransactionLegacy.open("saveSQL"); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(sql); stmt.executeUpdate(); diff --git a/server/src/com/cloud/test/IPRangeConfig.java b/server/src/com/cloud/test/IPRangeConfig.java index 4b884f8c4b2..23ca1bba3ee 100755 --- a/server/src/com/cloud/test/IPRangeConfig.java +++ b/server/src/com/cloud/test/IPRangeConfig.java @@ -29,6 +29,7 @@ import java.util.Vector; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.NetUtils; @@ -303,7 +304,7 @@ public class IPRangeConfig { endIPLong = NetUtils.ip2Long(endIP); } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); Vector problemIPs = null; if (type.equals("public")) { problemIPs = deletePublicIPRange(txn, startIPLong, endIPLong, vlanDbId); @@ -314,7 +315,7 @@ public class IPRangeConfig { return problemIPs; } - private Vector deletePublicIPRange(Transaction txn, long startIP, long endIP, long vlanDbId) { + private Vector deletePublicIPRange(TransactionLegacy txn, long startIP, long endIP, long vlanDbId) { String deleteSql = "DELETE FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; String isPublicIPAllocatedSelectSql = "SELECT * FROM `cloud`.`user_ip_address` WHERE public_ip_address = ? AND vlan_id = ?"; @@ -349,7 +350,7 @@ public class IPRangeConfig { return problemIPs; } - private Vector deletePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + private Vector deletePrivateIPRange(TransactionLegacy txn, long startIP, long endIP, long podId, long zoneId) { String deleteSql = "DELETE FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND pod_id = ? AND data_center_id = ?"; String isPrivateIPAllocatedSelectSql = "SELECT * FROM `cloud`.`op_dc_ip_address_alloc` WHERE ip_address = ? AND data_center_id = ? AND pod_id = ?"; @@ -429,7 +430,7 @@ public class IPRangeConfig { endIPLong = NetUtils.ip2Long(endIP); } - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); List problemIPs = null; if (type.equals("public")) { @@ -447,7 +448,7 @@ public class IPRangeConfig { return problemIPs; } - public Vector savePublicIPRange(Transaction txn, long startIP, long endIP, long zoneId, long vlanDbId, Long sourceNetworkId, long physicalNetworkId) { + public Vector savePublicIPRange(TransactionLegacy txn, long startIP, long endIP, long zoneId, long vlanDbId, Long sourceNetworkId, long physicalNetworkId) { String insertSql = "INSERT INTO `cloud`.`user_ip_address` (public_ip_address, data_center_id, vlan_db_id, mac_address, source_network_id, physical_network_id, uuid) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?), ?, ?, ?)"; String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?"; Vector problemIPs = new Vector(); @@ -485,7 +486,7 @@ public class IPRangeConfig { return problemIPs; } - public List savePrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + public List savePrivateIPRange(TransactionLegacy txn, long startIP, long endIP, long podId, long zoneId) { String insertSql = "INSERT INTO `cloud`.`op_dc_ip_address_alloc` (ip_address, data_center_id, pod_id, mac_address) VALUES (?, ?, ?, (select mac_address from `cloud`.`data_center` where id=?))"; String updateSql = "UPDATE `cloud`.`data_center` set mac_address = mac_address+1 where id=?"; Vector problemIPs = new Vector(); @@ -519,7 +520,7 @@ public class IPRangeConfig { return problemIPs; } - private Vector saveLinkLocalPrivateIPRange(Transaction txn, long startIP, long endIP, long podId, long zoneId) { + private Vector saveLinkLocalPrivateIPRange(TransactionLegacy txn, long startIP, long endIP, long podId, long zoneId) { String insertSql = "INSERT INTO `cloud`.`op_dc_link_local_ip_address_alloc` (ip_address, data_center_id, pod_id) VALUES (?, ?, ?)"; Vector problemIPs = new Vector(); diff --git a/server/src/com/cloud/test/PodZoneConfig.java b/server/src/com/cloud/test/PodZoneConfig.java index 59f8b6ce12d..628c74603cc 100644 --- a/server/src/com/cloud/test/PodZoneConfig.java +++ b/server/src/com/cloud/test/PodZoneConfig.java @@ -28,6 +28,7 @@ import com.cloud.network.Networks.TrafficType; import com.cloud.utils.component.ComponentContext; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.NetUtils; public class PodZoneConfig { @@ -148,7 +149,7 @@ public class PodZoneConfig { HashMap> currentPodCidrSubnets = new HashMap>(); String selectSql = "SELECT id, cidr_address, cidr_size FROM host_pod_ref WHERE data_center_id=" + dcId; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet rs = stmt.executeQuery(); @@ -363,7 +364,7 @@ public class PodZoneConfig { String insertVnet = "INSERT INTO `cloud`.`op_dc_vnet_alloc` (vnet, data_center_id, physical_network_id) VALUES ( ?, ?, ?)"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(insertVnet); for (int i = begin; i <= end; i++) { @@ -483,7 +484,7 @@ public class PodZoneConfig { Vector allZoneIDs = new Vector(); String selectSql = "SELECT id FROM data_center"; - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); try { PreparedStatement stmt = txn.prepareAutoCloseStatement(selectSql); ResultSet rs = stmt.executeQuery(); diff --git a/server/src/com/cloud/usage/UsageServiceImpl.java b/server/src/com/cloud/usage/UsageServiceImpl.java index c96e036259f..f7cfe313bb0 100755 --- a/server/src/com/cloud/usage/UsageServiceImpl.java +++ b/server/src/com/cloud/usage/UsageServiceImpl.java @@ -34,11 +34,11 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.usage.UsageService; import org.apache.cloudstack.usage.UsageTypes; - import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.configuration.Config; +import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.PermissionDeniedException; @@ -55,6 +55,7 @@ import com.cloud.utils.component.ManagerBase; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value = { UsageService.class }) @@ -89,7 +90,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag @Override public boolean generateUsageRecords(GenerateUsageRecordsCmd cmd) { - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { UsageJobVO immediateJob = _usageJobDao.getNextImmediateJob(); if (immediateJob == null) { @@ -107,7 +108,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag txn.close(); // switch back to VMOPS_DB - Transaction swap = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); swap.close(); } return true; @@ -153,6 +154,7 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag } boolean isAdmin = false; + boolean isDomainAdmin = false; //If accountId couldn't be found using accountName and domainId, get it from userContext if(accountId == null){ @@ -161,6 +163,8 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag //If account_id or account_name is explicitly mentioned, list records for the specified account only even if the caller is of type admin if (_accountService.isRootAdmin(caller.getId())) { isAdmin = true; + } else if(caller.getType() == Account.ACCOUNT_TYPE_DOMAIN_ADMIN){ + isDomainAdmin = true; } s_logger.debug("Account details not available. Using userContext accountId: " + accountId); } @@ -182,10 +186,20 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag SearchCriteria sc = _usageDao.createSearchCriteria(); - if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !isAdmin) { + if (accountId != -1 && accountId != Account.ACCOUNT_ID_SYSTEM && !isAdmin && !isDomainAdmin) { sc.addAnd("accountId", SearchCriteria.Op.EQ, accountId); } + if (isDomainAdmin) { + SearchCriteria sdc = _domainDao.createSearchCriteria(); + sdc.addOr("path", SearchCriteria.Op.LIKE, _domainDao.findById(caller.getDomainId()).getPath() + "%"); + List domains = _domainDao.search(sdc, null); + List domainIds = new ArrayList(); + for(DomainVO domain:domains) + domainIds.add(domain.getId()); + sc.addAnd("domainId", SearchCriteria.Op.IN, domainIds.toArray()); + } + if (domainId != null) { sc.addAnd("domainId", SearchCriteria.Op.EQ, domainId); } @@ -202,14 +216,14 @@ public class UsageServiceImpl extends ManagerBase implements UsageService, Manag } List usageRecords = null; - Transaction txn = Transaction.open(Transaction.USAGE_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.USAGE_DB); try { usageRecords = _usageDao.searchAllRecords(sc, usageFilter); } finally { txn.close(); // switch back to VMOPS_DB - Transaction swap = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy swap = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); swap.close(); } diff --git a/server/src/com/cloud/user/AccountManagerImpl.java b/server/src/com/cloud/user/AccountManagerImpl.java index 03b51f4cbaa..a88b6140c3c 100755 --- a/server/src/com/cloud/user/AccountManagerImpl.java +++ b/server/src/com/cloud/user/AccountManagerImpl.java @@ -58,9 +58,9 @@ import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.RegisterCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.region.gslb.GlobalLoadBalancerRuleDao; import com.cloud.api.ApiDBUtils; @@ -145,6 +145,9 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.InstanceGroupVO; @@ -513,10 +516,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } @DB - public void updateLoginAttempts(Long id, int attempts, boolean toDisable) { - Transaction txn = Transaction.currentTxn(); - txn.start(); + public void updateLoginAttempts(final Long id, final int attempts, final boolean toDisable) { try { + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { UserAccountVO user = null; user = _userAccountDao.lockRow(id, true); user.setLoginAttempts(attempts); @@ -524,11 +528,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M user.setState(State.disabled.toString()); } _userAccountDao.update(id, user); - txn.commit(); + } + }); } catch (Exception e) { s_logger.error("Failed to update login attempts for user with id " + id ); } - txn.close(); } private boolean doSetUserStatus(long userId, State state) { @@ -733,7 +737,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - if (vpcsDeleted) { + if (networksDeleted && vpcsDeleted) { // release ip addresses belonging to the account List ipsToRelease = _ipAddressDao.listByAccount(accountId); for (IpAddress ip : ipsToRelease) { @@ -893,10 +897,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @DB @ActionEvents({ @ActionEvent(eventType = EventTypes.EVENT_ACCOUNT_CREATE, eventDescription = "creating Account"), - @ActionEvent(eventType = EventTypes.EVENT_USER_CREATE, eventDescription = "creating User"), + @ActionEvent(eventType = EventTypes.EVENT_USER_CREATE, eventDescription = "creating User") }) - public UserAccount createUserAccount(String userName, String password, String firstName, String lastName, String email, String timezone, String accountName, short accountType, - Long domainId, String networkDomain, Map details, String accountUUID, String userUUID) { + public UserAccount createUserAccount(final String userName, final String password, final String firstName, final String lastName, final String email, final String timezone, String accountName, final short accountType, + Long domainId, final String networkDomain, final Map details, String accountUUID, final String userUUID) { if (accountName == null) { accountName = userName; @@ -938,38 +942,47 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - Transaction txn = Transaction.currentTxn(); - txn.start(); + final String accountNameFinal = accountName; + final Long domainIdFinal = domainId; + final String accountUUIDFinal = accountUUID; + Pair pair = Transaction.execute(new TransactionCallback>() { + @Override + public Pair doInTransaction(TransactionStatus status) { + // create account + String accountUUID = accountUUIDFinal; + if (accountUUID == null) { + accountUUID = UUID.randomUUID().toString(); + } + AccountVO account = createAccount(accountNameFinal, accountType, domainIdFinal, networkDomain, details, accountUUID); + long accountId = account.getId(); - // create account - if(accountUUID == null){ - accountUUID = UUID.randomUUID().toString(); - } - AccountVO account = createAccount(accountName, accountType, domainId, networkDomain, details, accountUUID); - long accountId = account.getId(); + // create the first user for the account + UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone, userUUID); - // create the first user for the account - UserVO user = createUser(accountId, userName, password, firstName, lastName, email, timezone, userUUID); + if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { + // set registration token + byte[] bytes = (domainIdFinal + accountNameFinal + userName + System.currentTimeMillis()).getBytes(); + String registrationToken = UUID.nameUUIDFromBytes(bytes).toString(); + user.setRegistrationToken(registrationToken); + } - if (accountType == Account.ACCOUNT_TYPE_RESOURCE_DOMAIN_ADMIN) { - // set registration token - byte[] bytes = (domainId + accountName + userName + System.currentTimeMillis()).getBytes(); - String registrationToken = UUID.nameUUIDFromBytes(bytes).toString(); - user.setRegistrationToken(registrationToken); - } + // create correct account and group association based on accountType + if (accountType != Account.ACCOUNT_TYPE_PROJECT) { + AclGroupAccountMapVO grpAcct = new AclGroupAccountMapVO(accountType + 1, accountId); + _aclGroupAccountDao.persist(grpAcct); + } - // create correct account and group association based on accountType - if (accountType != Account.ACCOUNT_TYPE_PROJECT) { - AclGroupAccountMapVO grpAcct = new AclGroupAccountMapVO(accountType + 1, accountId); - _aclGroupAccountDao.persist(grpAcct); - } + return new Pair(user.getId(), account); + } + }); - txn.commit(); + long userId = pair.first(); + Account account = pair.second(); CallContext.current().putContextParameter(Account.class, account.getUuid()); //check success - return _userAccountDao.findById(user.getId()); + return _userAccountDao.findById(userId); } @Override @@ -1175,12 +1188,12 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB @ActionEvent(eventType = EventTypes.EVENT_USER_ENABLE, eventDescription = "enabling User") - public UserAccount enableUser(long userId) { + public UserAccount enableUser(final long userId) { Account caller = CallContext.current().getCallingAccount(); // Check if user exists in the system - User user = _userDao.findById(userId); + final User user = _userDao.findById(userId); if (user == null || user.getRemoved() != null) { throw new InvalidParameterValueException("Unable to find active user by id " + userId); } @@ -1198,15 +1211,18 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M checkAccess(caller, null, true, account); - Transaction txn = Transaction.currentTxn(); - txn.start(); - + boolean success = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { boolean success = doSetUserStatus(userId, State.enabled); // make sure the account is enabled too success = success && enableAccount(user.getAccountId()); - txn.commit(); + return success; + } + }); + if (success) { // whenever the user is successfully enabled, reset the login attempts to zero @@ -1433,7 +1449,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M String accountName = cmd.getAccountName(); String newAccountName = cmd.getNewName(); String networkDomain = cmd.getNetworkDomain(); - Map details = cmd.getDetails(); + final Map details = cmd.getDetails(); boolean success = false; Account account = null; @@ -1477,7 +1493,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - AccountVO acctForUpdate = _accountDao.findById(account.getId()); + final AccountVO acctForUpdate = _accountDao.findById(account.getId()); acctForUpdate.setAccountName(newAccountName); if (networkDomain != null) { @@ -1488,16 +1504,19 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - success = _accountDao.update(account.getId(), acctForUpdate); + final Account accountFinal = account; + success = Transaction.execute(new TransactionCallback() { + @Override + public Boolean doInTransaction(TransactionStatus status) { + boolean success = _accountDao.update(accountFinal.getId(), acctForUpdate); if (details != null && success) { - _accountDetailsDao.update(account.getId(), details); + _accountDetailsDao.update(accountFinal.getId(), details); } - txn.commit(); + return success; + } + }); if (success) { CallContext.current().putContextParameter(Account.class, account.getUuid()); @@ -1535,18 +1554,10 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M return _userDao.remove(id); } - public class ResourceCountCalculateTask implements Runnable { + protected class AccountCleanupTask extends ManagedContextRunnable { @Override - public void run() { - - } - } - - protected class AccountCleanupTask implements Runnable { - @Override - public void run() { + protected void runInContext() { try { - ServerContexts.registerSystemContext(); GlobalLock lock = GlobalLock.getInternLock("AccountCleanup"); if (lock == null) { s_logger.debug("Couldn't get the global lock"); @@ -1628,7 +1639,6 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M s_logger.error("Exception ", e); } finally { lock.unlock(); - ServerContexts.unregisterSystemContext(); } } catch (Exception e) { s_logger.error("Exception ", e); @@ -1758,7 +1768,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB - public AccountVO createAccount(String accountName, short accountType, Long domainId, String networkDomain, Map details, String uuid) { + public AccountVO createAccount(final String accountName, final short accountType, final Long domainId, final String networkDomain, final Map details, final String uuid) { // Validate domain Domain domain = _domainMgr.getDomain(domainId); if (domain == null) { @@ -1799,9 +1809,9 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } // Create the account - Transaction txn = Transaction.currentTxn(); - txn.start(); - + return Transaction.execute(new TransactionCallback() { + @Override + public AccountVO doInTransaction(TransactionStatus status) { AccountVO account = _accountDao.persist(new AccountVO(accountName, domainId, networkDomain, accountType, uuid)); if (account == null) { @@ -1819,10 +1829,11 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M // Create default security group _networkGroupMgr.createDefaultSecurityGroup(accountId); - txn.commit(); return account; } + }); + } protected UserVO createUser(long accountId, String userName, String password, String firstName, String lastName, String email, String timezone, String userUUID) { if (s_logger.isDebugEnabled()) { @@ -2058,7 +2069,7 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M @Override @DB @ActionEvent(eventType = EventTypes.EVENT_REGISTER_FOR_SECRET_API_KEY, eventDescription = "register for the developer API keys") public String[] createApiKeyAndSecretKey(RegisterCmd cmd) { - Long userId = cmd.getId(); + final Long userId = cmd.getId(); User user = getUserIncludingRemoved(userId); if (user == null) { @@ -2071,12 +2082,14 @@ public class AccountManagerImpl extends ManagerBase implements AccountManager, M } // generate both an api key and a secret key, update the user table with the keys, return the keys to the user - String[] keys = new String[2]; - Transaction txn = Transaction.currentTxn(); - txn.start(); + final String[] keys = new String[2]; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { keys[0] = createUserApiKey(userId); keys[1] = createUserSecretKey(userId); - txn.commit(); + } + }); return keys; } diff --git a/server/src/com/cloud/user/DomainManagerImpl.java b/server/src/com/cloud/user/DomainManagerImpl.java index 58e8017aebf..fa53b031e0b 100644 --- a/server/src/com/cloud/user/DomainManagerImpl.java +++ b/server/src/com/cloud/user/DomainManagerImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.command.admin.domain.ListDomainChildrenCmd; import org.apache.cloudstack.api.command.admin.domain.ListDomainsCmd; import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; @@ -65,6 +64,9 @@ import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.vm.ReservationContext; @@ -112,6 +114,15 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom return _domainDao.findByUuid(domainUuid); } + @Override + public Domain getDomainByName(String name, long parentId) { + SearchCriteria sc = _domainDao.createSearchCriteria(); + sc.addAnd("name", SearchCriteria.Op.EQ, name); + sc.addAnd("parent", SearchCriteria.Op.EQ, parentId); + Domain domain = _domainDao.findOneBy(sc); + return domain; + } + @Override public Set getDomainChildrenIds(String parentDomainPath) { Set childDomains = new HashSet(); @@ -158,7 +169,7 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom @Override @DB - public Domain createDomain(String name, Long parentId, Long ownerId, String networkDomain, String domainUUID) { + public Domain createDomain(final String name, final Long parentId, final Long ownerId, final String networkDomain, String domainUUID) { // Verify network domain if (networkDomain != null) { if (!NetUtils.verifyDomainName(networkDomain)) { @@ -181,11 +192,16 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom domainUUID = UUID.randomUUID().toString(); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - DomainVO domain = _domainDao.create(new DomainVO(name, ownerId, parentId, networkDomain, domainUUID)); + final String domainUUIDFinal = domainUUID; + DomainVO domain = Transaction.execute(new TransactionCallback() { + @Override + public DomainVO doInTransaction(TransactionStatus status) { + DomainVO domain = _domainDao.create(new DomainVO(name, ownerId, parentId, networkDomain, domainUUIDFinal)); _resourceCountDao.createResourceCounts(domain.getId(), ResourceLimit.ResourceOwnerType.Domain); - txn.commit(); + return domain; + } + }); + CallContext.current().putContextParameter(Domain.class, domain.getUuid()); return domain; } @@ -544,12 +560,12 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom @ActionEvent(eventType = EventTypes.EVENT_DOMAIN_UPDATE, eventDescription = "updating Domain") @DB public DomainVO updateDomain(UpdateDomainCmd cmd) { - Long domainId = cmd.getId(); - String domainName = cmd.getDomainName(); - String networkDomain = cmd.getNetworkDomain(); + final Long domainId = cmd.getId(); + final String domainName = cmd.getDomainName(); + final String networkDomain = cmd.getNetworkDomain(); // check if domain exists in the system - DomainVO domain = _domainDao.findById(domainId); + final DomainVO domain = _domainDao.findById(domainId); if (domain == null) { InvalidParameterValueException ex = new InvalidParameterValueException("Unable to find domain with specified domain id"); ex.addProxyObject(domainId.toString(), "domainId"); @@ -587,10 +603,9 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } } - Transaction txn = Transaction.currentTxn(); - - txn.start(); - + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { if (domainName != null) { String updatedDomainPath = getUpdatedDomainPath(domain.getPath(), domainName); updateDomainChildren(domain, updatedDomainPath); @@ -607,7 +622,9 @@ public class DomainManagerImpl extends ManagerBase implements DomainManager, Dom } _domainDao.update(domainId, domain); CallContext.current().putContextParameter(Domain.class, domain.getUuid()); - txn.commit(); + } + }); + return _domainDao.findById(domainId); diff --git a/server/src/com/cloud/uuididentity/dao/IdentityDao.java b/server/src/com/cloud/uuididentity/dao/IdentityDao.java index e9149a0fe63..be412f3c8a9 100644 --- a/server/src/com/cloud/uuididentity/dao/IdentityDao.java +++ b/server/src/com/cloud/uuididentity/dao/IdentityDao.java @@ -17,7 +17,7 @@ package com.cloud.uuididentity.dao; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.utils.Pair; import com.cloud.utils.db.GenericDao; @@ -31,5 +31,5 @@ public interface IdentityDao extends GenericDao { * @param resourceType TODO * @return */ - Pair getAccountDomainInfo(String tableName, Long identityId, TaggedResourceType resourceType); + Pair getAccountDomainInfo(String tableName, Long identityId, ResourceObjectType resourceType); } diff --git a/server/src/com/cloud/uuididentity/dao/IdentityDaoImpl.java b/server/src/com/cloud/uuididentity/dao/IdentityDaoImpl.java index c6fd8c17c9c..9c54fb77048 100644 --- a/server/src/com/cloud/uuididentity/dao/IdentityDaoImpl.java +++ b/server/src/com/cloud/uuididentity/dao/IdentityDaoImpl.java @@ -28,11 +28,12 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; @Component @Local(value={IdentityDao.class}) @@ -48,7 +49,7 @@ public class IdentityDaoImpl extends GenericDaoBase implements assert(identityString != null); PreparedStatement pstmt = null; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { try { try { @@ -96,11 +97,11 @@ public class IdentityDaoImpl extends GenericDaoBase implements @DB @Override - public Pair getAccountDomainInfo(String tableName, Long identityId, TaggedResourceType resourceType) { + public Pair getAccountDomainInfo(String tableName, Long identityId, ResourceObjectType resourceType) { assert(tableName != null); PreparedStatement pstmt = null; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { Long domainId = null; Long accountId = null; @@ -110,7 +111,9 @@ public class IdentityDaoImpl extends GenericDaoBase implements pstmt.setLong(1, identityId); ResultSet rs = pstmt.executeQuery(); if (rs.next()) { - domainId = rs.getLong(1); + if (rs.getLong(1) != 0) { + domainId = rs.getLong(1); + } } } catch (SQLException e) { } @@ -118,14 +121,16 @@ public class IdentityDaoImpl extends GenericDaoBase implements //get accountId try { String account = "account_id"; - if (resourceType == TaggedResourceType.Project) { + if (resourceType == ResourceObjectType.Project) { account = "project_account_id"; } pstmt = txn.prepareAutoCloseStatement(String.format("SELECT " + account + " FROM `%s` WHERE id=?", tableName)); pstmt.setLong(1, identityId); ResultSet rs = pstmt.executeQuery(); if (rs.next()) { - accountId = rs.getLong(1); + if (rs.getLong(1) != 0) { + accountId = rs.getLong(1); + } } } catch (SQLException e) { } @@ -142,7 +147,7 @@ public class IdentityDaoImpl extends GenericDaoBase implements assert(identityString != null); PreparedStatement pstmt = null; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { try { pstmt = txn.prepareAutoCloseStatement( @@ -183,7 +188,7 @@ public class IdentityDaoImpl extends GenericDaoBase implements assert(tableName != null); List l = getNullUuidRecords(tableName); - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { try { txn.start(); @@ -205,7 +210,7 @@ public class IdentityDaoImpl extends GenericDaoBase implements List l = new ArrayList(); PreparedStatement pstmt = null; - Transaction txn = Transaction.open(Transaction.CLOUD_DB); + TransactionLegacy txn = TransactionLegacy.open(TransactionLegacy.CLOUD_DB); try { try { pstmt = txn.prepareAutoCloseStatement( @@ -227,7 +232,7 @@ public class IdentityDaoImpl extends GenericDaoBase implements @DB void setInitialUuid(String tableName, long id) throws SQLException { - Transaction txn = Transaction.currentTxn(); + TransactionLegacy txn = TransactionLegacy.currentTxn(); PreparedStatement pstmtUpdate = null; pstmtUpdate = txn.prepareAutoCloseStatement( diff --git a/server/src/com/cloud/vm/SystemVmLoadScanner.java b/server/src/com/cloud/vm/SystemVmLoadScanner.java index 3932c3b9641..6e5521632c2 100644 --- a/server/src/com/cloud/vm/SystemVmLoadScanner.java +++ b/server/src/com/cloud/vm/SystemVmLoadScanner.java @@ -21,8 +21,7 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import org.apache.log4j.Logger; - -import org.apache.cloudstack.context.ServerContexts; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import com.cloud.utils.Pair; import com.cloud.utils.concurrency.NamedThreadFactory; @@ -64,17 +63,14 @@ public class SystemVmLoadScanner { } private Runnable getCapacityScanTask() { - return new Runnable() { + return new ManagedContextRunnable() { @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { try { reallyRun(); } catch (Throwable e) { s_logger.warn("Unexpected exception " + e.getMessage(), e); - } finally { - ServerContexts.unregisterSystemContext(); } } diff --git a/server/src/com/cloud/vm/UserVmManager.java b/server/src/com/cloud/vm/UserVmManager.java index ce882cb6016..0251754ed3c 100755 --- a/server/src/com/cloud/vm/UserVmManager.java +++ b/server/src/com/cloud/vm/UserVmManager.java @@ -20,6 +20,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.framework.config.ConfigKey; import com.cloud.agent.api.VmDiskStatsEntry; @@ -92,4 +93,7 @@ public interface UserVmManager extends UserVmService { boolean setupVmForPvlan(boolean add, Long hostId, NicProfile nic); void collectVmDiskStatistics (UserVmVO userVm); + + UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, Boolean isDisplayVmEnabled, Long osTypeId, String userData, + Boolean isDynamicallyScalable, HTTPMethod httpMethod)throws ResourceUnavailableException, InsufficientCapacityException; } diff --git a/server/src/com/cloud/vm/UserVmManagerImpl.java b/server/src/com/cloud/vm/UserVmManagerImpl.java index fb00f913555..2f1c6c09d93 100755 --- a/server/src/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/com/cloud/vm/UserVmManagerImpl.java @@ -42,8 +42,10 @@ import org.apache.cloudstack.affinity.AffinityGroupService; import org.apache.cloudstack.affinity.AffinityGroupVO; import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; +import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd.HTTPMethod; import org.apache.cloudstack.api.command.admin.vm.AssignVMCmd; +import org.apache.cloudstack.api.command.admin.vm.ExpungeVMCmd; import org.apache.cloudstack.api.command.admin.vm.RecoverVMCmd; import org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd; import org.apache.cloudstack.api.command.user.vm.DeployVMCmd; @@ -61,17 +63,21 @@ import org.apache.cloudstack.api.command.user.vm.UpgradeVMCmd; import org.apache.cloudstack.api.command.user.vmgroup.CreateVMGroupCmd; import org.apache.cloudstack.api.command.user.vmgroup.DeleteVMGroupCmd; import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.context.ServerContexts; import org.apache.cloudstack.engine.cloud.entity.api.VirtualMachineEntity; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.service.api.OrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeService.VolumeApiResult; +import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.managed.context.ManagedContextRunnable; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.to.TemplateObjectTO; @@ -210,7 +216,6 @@ import com.cloud.template.VirtualMachineTemplate.BootloaderType; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.AccountService; -import com.cloud.user.AccountVO; import com.cloud.user.ResourceLimitService; import com.cloud.user.SSHKeyPair; import com.cloud.user.SSHKeyPairVO; @@ -235,6 +240,10 @@ import com.cloud.utils.db.EntityManager; import com.cloud.utils.db.GlobalLock; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionCallbackWithExceptionNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.exception.ExecutionException; import com.cloud.utils.fsm.NoTransitionException; @@ -423,6 +432,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir PlannerHostReservationDao _plannerHostReservationDao; @Inject private ServiceOfferingDetailsDao serviceOfferingDetailsDao; + @Inject + VolumeService _volService; + @Inject + VolumeDataFactory volFactory; protected ScheduledExecutorService _executor = null; protected int _expungeInterval; @@ -709,7 +722,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir User user = _userDao.findById(userId); try { VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - status = vmEntity.stop(new Long(userId).toString()); + status = vmEntity.stop(Long.toString(userId)); } catch (ResourceUnavailableException e) { s_logger.debug("Unable to stop due to ", e); status = false; @@ -1369,11 +1382,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir public UserVm recoverVirtualMachine(RecoverVMCmd cmd) throws ResourceAllocationException, CloudRuntimeException { - Long vmId = cmd.getId(); + final Long vmId = cmd.getId(); Account caller = CallContext.current().getCallingAccount(); // Verify input parameters - UserVmVO vm = _vmDao.findById(vmId.longValue()); + final UserVmVO vm = _vmDao.findById(vmId.longValue()); if (vm == null) { throw new InvalidParameterValueException( @@ -1403,11 +1416,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir s_logger.debug("Recovering vm " + vmId); } - Transaction txn = Transaction.currentTxn(); - AccountVO account = null; - txn.start(); + Transaction.execute(new TransactionCallbackWithExceptionNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) throws ResourceAllocationException { - account = _accountDao.lockRow(vm.getAccountId(), true); + Account account = _accountDao.lockRow(vm.getAccountId(), true); // if the account is deleted, throw error if (account.getRemoved() != null) { @@ -1464,7 +1477,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //Update Resource Count for the given account resourceCountIncrement(account.getId(), new Long(serviceOffering.getCpu()), new Long(serviceOffering.getRamSize())); - txn.commit(); + } + }); + return _vmDao.findById(vmId); } @@ -1702,13 +1717,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - protected class ExpungeTask implements Runnable { + protected class ExpungeTask extends ManagedContextRunnable { public ExpungeTask() { } @Override - public void run() { - ServerContexts.registerSystemContext(); + protected void runInContext() { GlobalLock scanLock = GlobalLock.getInternLock("UserVMExpunge"); try { if (scanLock.lock(ACQUIRE_GLOBAL_LOCK_TIMEOUT_FOR_COOPERATION)) { @@ -1742,7 +1756,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } finally { scanLock.releaseRef(); - ServerContexts.unregisterSystemContext(); } } @@ -1762,43 +1775,29 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir Boolean isDynamicallyScalable = cmd.isDynamicallyScalable(); Account caller = CallContext.current().getCallingAccount(); - // Input validation - UserVmVO vmInstance = null; - - // Verify input parameters - vmInstance = _vmDao.findById(id.longValue()); - + // Input validation and permission checks + UserVmVO vmInstance = _vmDao.findById(id.longValue()); if (vmInstance == null) { throw new InvalidParameterValueException( "unable to find virtual machine with id " + id); } - ServiceOffering offering = _serviceOfferingDao.findById(vmInstance - .getServiceOfferingId()); - if (!offering.getOfferHA() && ha != null && ha) { - throw new InvalidParameterValueException( - "Can't enable ha for the vm as it's created from the Service offering having HA disabled"); - } - _accountMgr.checkAccess(CallContext.current().getCallingAccount(), null, true, vmInstance); - if (displayName == null) { - displayName = vmInstance.getDisplayName(); - } - - if (ha == null) { - ha = vmInstance.isHaEnabled(); - } - - if (isDisplayVmEnabled == null) { - isDisplayVmEnabled = vmInstance.isDisplayVm(); - } else{ - if(!_accountMgr.isRootAdmin(caller.getId())){ + if (isDisplayVmEnabled != null) { + if(!_accountMgr.isRootAdmin(caller.getType())){ throw new PermissionDeniedException( "Cannot update parameter displayvm, only admin permitted "); } } + return updateVirtualMachine(id, displayName, group, ha, isDisplayVmEnabled, osTypeId, userData, isDynamicallyScalable, cmd.getHttpMethod()); + } + + @Override + public UserVm updateVirtualMachine(long id, String displayName, String group, Boolean ha, + Boolean isDisplayVmEnabled, Long osTypeId, String userData, Boolean isDynamicallyScalable, HTTPMethod httpMethod) + throws ResourceUnavailableException, InsufficientCapacityException { UserVmVO vm = _vmDao.findById(id); if (vm == null) { throw new CloudRuntimeException( @@ -1811,44 +1810,49 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir + " is not in the right state"); } + if (displayName == null) { + displayName = vm.getDisplayName(); + } + + if (ha == null) { + ha = vm.isHaEnabled(); + } + + ServiceOffering offering = _serviceOfferingDao.findById(vm.getServiceOfferingId()); + if (!offering.getOfferHA() && ha) { + throw new InvalidParameterValueException( + "Can't enable ha for the vm as it's created from the Service offering having HA disabled"); + } + + if (isDisplayVmEnabled == null) { + isDisplayVmEnabled = vm.isDisplayVm(); + } + boolean updateUserdata = false; if (userData != null) { // check and replace newlines userData = userData.replace("\\n", ""); - validateUserData(userData, cmd.getHttpMethod()); + validateUserData(userData, httpMethod); // update userData on domain router. updateUserdata = true; } else { - userData = vmInstance.getUserData(); - } - - String description = ""; - - if (displayName != null && !displayName.equals(vmInstance.getDisplayName())) { - description += "New display name: " + displayName + ". "; - } - - if (ha != vmInstance.isHaEnabled()) { - if (ha) { - description += "Enabled HA. "; - } else { - description += "Disabled HA. "; - } - } - if (osTypeId == null) { - osTypeId = vmInstance.getGuestOSId(); - } else { - description += "Changed Guest OS Type to " + osTypeId + ". "; - } - - if (group != null) { - if (addInstanceToGroup(id, group)) { - description += "Added to group: " + group + "."; - } + userData = vm.getUserData(); } if (isDynamicallyScalable == null) { - isDynamicallyScalable = vmInstance.isDynamicallyScalable(); + isDynamicallyScalable = vm.isDynamicallyScalable(); + } + + if (osTypeId == null) { + osTypeId = vm.getGuestOSId(); + } + + if (group != null) { + addInstanceToGroup(id, group); + } + + if (isDynamicallyScalable == null) { + isDynamicallyScalable = vm.isDynamicallyScalable(); } _vmDao.updateVM(id, displayName, ha, osTypeId, userData, isDisplayVmEnabled, isDynamicallyScalable); @@ -1939,7 +1943,30 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @ActionEvent(eventType = EventTypes.EVENT_VM_DESTROY, eventDescription = "destroying Vm", async = true) public UserVm destroyVm(DestroyVMCmd cmd) throws ResourceUnavailableException, ConcurrentOperationException { - return destroyVm(cmd.getId()); + CallContext ctx = CallContext.current(); + long vmId = cmd.getId(); + boolean expunge = cmd.getExpunge(); + + if (!_accountMgr.isAdmin(ctx.getCallingAccount().getType()) && expunge) { + throw new PermissionDeniedException("Parameter " + ApiConstants.EXPUNGE + " can be passed by Admin only"); + } + + UserVm destroyedVm = destroyVm(vmId); + if (expunge) { + UserVmVO vm = _vmDao.findById(vmId); + if (!expunge(vm, ctx.getCallingUserId(), ctx.getCallingAccount())) { + throw new CloudRuntimeException("Failed to expunge vm " + destroyedVm); + } + } + + return destroyedVm; + } + + @Override + @ActionEvent(eventType = EventTypes.EVENT_VM_EXPUNGE, eventDescription = "expunging Vm", async = true) + public UserVm expungeVm(ExpungeVMCmd cmd) + throws ResourceUnavailableException, ConcurrentOperationException { + return expungeVm(cmd.getId()); } @Override @@ -1970,8 +1997,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @DB protected InstanceGroupVO createVmGroup(String groupName, long accountId) { Account account = null; - final Transaction txn = Transaction.currentTxn(); - txn.start(); try { account = _accountDao.acquireInLockTable(accountId); // to ensure // duplicate @@ -1994,7 +2019,6 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir if (account != null) { _accountDao.releaseFromLockTable(accountId); } - txn.commit(); } } @@ -2037,7 +2061,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override @DB - public boolean addInstanceToGroup(long userVmId, String groupName) { + public boolean addInstanceToGroup(final long userVmId, String groupName) { UserVmVO vm = _vmDao.findById(userVmId); InstanceGroupVO group = _vmGroupDao.findByAccountAndName( @@ -2048,23 +2072,25 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } if (group != null) { - final Transaction txn = Transaction.currentTxn(); - txn.start(); UserVm userVm = _vmDao.acquireInLockTable(userVmId); if (userVm == null) { s_logger.warn("Failed to acquire lock on user vm id=" + userVmId); } try { + final InstanceGroupVO groupFinal = group; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { // don't let the group be deleted when we are assigning vm to // it. - InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(group.getId(), + InstanceGroupVO ngrpLock = _vmGroupDao.lockRow(groupFinal.getId(), false); if (ngrpLock == null) { s_logger.warn("Failed to acquire lock on vm group id=" - + group.getId() + " name=" + group.getName()); - txn.rollback(); - return false; + + groupFinal.getId() + " name=" + groupFinal.getName()); + throw new CloudRuntimeException("Failed to acquire lock on vm group id=" + + groupFinal.getId() + " name=" + groupFinal.getName()); } // Currently don't allow to assign a vm to more than one group @@ -2081,10 +2107,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } InstanceGroupVMMapVO groupVmMapVO = new InstanceGroupVMMapVO( - group.getId(), userVmId); + groupFinal.getId(), userVmId); _groupVMMapDao.persist(groupVmMapVO); - txn.commit(); + } + }); + return true; } finally { if (userVm != null) { @@ -2447,7 +2475,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } @DB - protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate template, String hostName, String displayName, Account owner, Long diskOfferingId, + protected UserVm createVirtualMachine(DataCenter zone, ServiceOffering serviceOffering, VirtualMachineTemplate tmplt, String hostName, String displayName, Account owner, Long diskOfferingId, Long diskSize, List networkList, List securityGroupIdList, String group, HTTPMethod httpmethod, String userData, String sshKeyPair, HypervisorType hypervisor, Account caller, Map requestedIps, IpAddresses defaultIps, Boolean isDisplayVmEnabled, String keyboard, List affinityGroupIdList) @@ -2459,6 +2487,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw new PermissionDeniedException( "The owner of vm to deploy is disabled: " + owner); } + VMTemplateVO template = _templateDao.findById(tmplt.getId()); + if (template != null) { + _templateDao.loadDetails(template); + } long accountId = owner.getId(); @@ -2749,8 +2781,42 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir hypervisorType = template.getHypervisorType(); } - Transaction txn = Transaction.currentTxn(); - txn.start(); + UserVmVO vm = commitUserVm(zone, template, hostName, displayName, owner, diskOfferingId, diskSize, userData, + hypervisor, caller, isDisplayVmEnabled, keyboard, accountId, offering, isIso, sshPublicKey, + networkNicMap, id, instanceName, uuidName, hypervisorType); + + // Assign instance to the group + try { + if (group != null) { + boolean addToGroup = addInstanceToGroup(Long.valueOf(id), group); + if (!addToGroup) { + throw new CloudRuntimeException( + "Unable to assign Vm to the group " + group); + } + } + } catch (Exception ex) { + throw new CloudRuntimeException("Unable to assign Vm to the group " + + group); + } + + _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); + + if (affinityGroupIdList != null && !affinityGroupIdList.isEmpty()) { + _affinityGroupVMMapDao.updateMap(vm.getId(), affinityGroupIdList); + } + + return vm; + } + + private UserVmVO commitUserVm(final DataCenter zone, final VirtualMachineTemplate template, final String hostName, + final String displayName, final Account owner, final Long diskOfferingId, final Long diskSize, final String userData, + final HypervisorType hypervisor, final Account caller, final Boolean isDisplayVmEnabled, final String keyboard, final long accountId, + final ServiceOfferingVO offering, final boolean isIso, final String sshPublicKey, + final LinkedHashMap networkNicMap, final long id, final String instanceName, final String uuidName, + final HypervisorType hypervisorType) throws InsufficientCapacityException { + return Transaction.execute(new TransactionCallbackWithException() { + @Override + public UserVmVO doInTransaction(TransactionStatus status) throws InsufficientCapacityException { UserVmVO vm = new UserVmVO(id, instanceName, displayName, template.getId(), hypervisorType, template.getGuestOSId(), offering.getOfferHA(), offering.getLimitCpuUse(), @@ -2807,6 +2873,11 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } + Map details = template.getDetails(); + if ( details != null && !details.isEmpty() ) { + vm.details.putAll(details); + } + _vmDao.persist(vm); _vmDao.saveDetails(vm); @@ -2820,9 +2891,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir rootDiskTags.add(offering.getTags()); if(isIso){ - VirtualMachineEntity vmEntity = _orchSrvc.createVirtualMachineFromScratch(vm.getUuid(), new Long(owner.getAccountId()).toString(), vm.getIsoId().toString(), hostName, displayName, hypervisor.name(), guestOSCategory.getName(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan); + VirtualMachineEntity vmEntity = _orchSrvc.createVirtualMachineFromScratch(vm.getUuid(), Long.toString(owner.getAccountId()), vm.getIsoId().toString(), hostName, displayName, hypervisor.name(), guestOSCategory.getName(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan); }else { - VirtualMachineEntity vmEntity = _orchSrvc.createVirtualMachine(vm.getUuid(), new Long(owner.getAccountId()).toString(), new Long(template.getId()).toString(), hostName, displayName, hypervisor.name(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan); + VirtualMachineEntity vmEntity = _orchSrvc.createVirtualMachine(vm.getUuid(), Long.toString(owner.getAccountId()), Long.toString(template.getId()), hostName, displayName, hypervisor.name(), offering.getCpu(), offering.getSpeed(), offering.getRamSize(), diskSize, computeTags, rootDiskTags, networkNicMap, plan); } @@ -2839,31 +2910,10 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir //Update Resource Count for the given account resourceCountIncrement(accountId, new Long(offering.getCpu()), new Long(offering.getRamSize())); - - txn.commit(); - - // Assign instance to the group - try { - if (group != null) { - boolean addToGroup = addInstanceToGroup(Long.valueOf(id), group); - if (!addToGroup) { - throw new CloudRuntimeException( - "Unable to assign Vm to the group " + group); - } - } - } catch (Exception ex) { - throw new CloudRuntimeException("Unable to assign Vm to the group " - + group); - } - - _securityGroupMgr.addInstanceToGroups(vm.getId(), securityGroupIdList); - - if (affinityGroupIdList != null && !affinityGroupIdList.isEmpty()) { - _affinityGroupVMMapDao.updateMap(vm.getId(), affinityGroupIdList); - } - return vm; } + }); + } private void validateUserData(String userData, HTTPMethod httpmethod) { byte[] decodedUserData = null; @@ -2943,7 +2993,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @Override public boolean finalizeVirtualMachineProfile(VirtualMachineProfile profile, DeployDestination dest, ReservationContext context) { UserVmVO vm = _vmDao.findById(profile.getId()); - Map details = _vmDetailsDao.findDetails(vm.getId()); + Map details = _vmDetailsDao.listDetailsKeyPairs(vm.getId()); vm.setDetails(details); if (vm.getIsoId() != null) { @@ -3163,7 +3213,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir boolean status = false; try { VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - status = vmEntity.stop(new Long(userId).toString()); + status = vmEntity.stop(Long.toString(userId)); if (status) { return _vmDao.findById(vmId); } else { @@ -3339,8 +3389,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } } - String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), new Long(callerUser.getId()).toString()); - vmEntity.deploy(reservationId, new Long(callerUser.getId()).toString(), params); + String reservationId = vmEntity.reserve(plannerName, plan, new ExcludeList(), Long.toString(callerUser.getId())); + vmEntity.deploy(reservationId, Long.toString(callerUser.getId()), params); Pair> vmParamPair = new Pair(vm, params); if (vm != null && vm.isUpdateParameters()) { @@ -3384,7 +3434,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir try { VirtualMachineEntity vmEntity = _orchSrvc.getVirtualMachine(vm.getUuid()); - status = vmEntity.destroy(new Long(userId).toString()); + status = vmEntity.destroy(Long.toString(userId)); } catch (CloudException e) { CloudRuntimeException ex = new CloudRuntimeException( "Unable to destroy with specified vmId", e); @@ -3422,7 +3472,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } @Override - public void collectVmDiskStatistics (UserVmVO userVm) { + public void collectVmDiskStatistics (final UserVmVO userVm) { // support KVM only util 2013.06.25 if (!userVm.getHypervisorType().equals(HypervisorType.KVM)) return; @@ -3430,7 +3480,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir long hostId = userVm.getHostId(); List vmNames = new ArrayList(); vmNames.add(userVm.getInstanceName()); - HostVO host = _hostDao.findById(hostId); + final HostVO host = _hostDao.findById(hostId); GetVmDiskStatsAnswer diskStatsAnswer = null; try { @@ -3444,10 +3494,12 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir s_logger.warn("Error while collecting disk stats vm: " + userVm.getHostName() + " from host: " + host.getName() + "; details: " + diskStatsAnswer.getDetails()); return; } - Transaction txn = Transaction.open(Transaction.CLOUD_DB); try { - txn.start(); - HashMap> vmDiskStatsByName = diskStatsAnswer.getVmDiskStatsMap(); + final GetVmDiskStatsAnswer diskStatsAnswerFinal = diskStatsAnswer; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + HashMap> vmDiskStatsByName = diskStatsAnswerFinal.getVmDiskStatsMap(); if (vmDiskStatsByName == null) return; List vmDiskStats = vmDiskStatsByName.get(userVm.getInstanceName()); @@ -3530,17 +3582,56 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _vmDiskStatsDao.update(vmDiskStat_lock.getId(), vmDiskStat_lock); } - txn.commit(); + } + }); } catch (Exception e) { - txn.rollback(); s_logger.warn("Unable to update vm disk statistics for vm: " + userVm.getId() + " from host: " + hostId, e); - } finally { - txn.close(); } } } + @Override + public UserVm expungeVm(long vmId) throws ResourceUnavailableException, + ConcurrentOperationException { + Account caller = CallContext.current().getCallingAccount(); + Long userId = caller.getId(); + // Verify input parameters + UserVmVO vm = _vmDao.findById(vmId); + if (vm == null) { + InvalidParameterValueException ex = new InvalidParameterValueException( + "Unable to find a virtual machine with specified vmId"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); + throw ex; + } + + if (vm.getRemoved() != null) { + s_logger.trace("Vm id=" + vmId + " is already expunged"); + return vm; + } + + if ((vm.getState() != State.Destroyed) && (vm.getState() != State.Expunging)) { + CloudRuntimeException ex = new CloudRuntimeException( + "Please destroy vm with specified vmId before expunge"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); + throw ex; + } + + _accountMgr.checkAccess(caller, null, true, vm); + + boolean status; + + status = expunge(vm, userId, caller); + if (status) { + return _vmDao.findByIdIncludingRemoved(vmId); + } else { + CloudRuntimeException ex = new CloudRuntimeException( + "Failed to expunge vm with specified vmId"); + ex.addProxyObject(String.valueOf(vmId), "vmId"); + throw ex; + } + + } @Override @@ -3923,7 +4014,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } private boolean isServiceOfferingUsingPlannerInPreferredMode(long serviceOfferingId) { boolean preferred = false; - Map details = serviceOfferingDetailsDao.findDetails(serviceOfferingId); + Map details = serviceOfferingDetailsDao.listDetailsKeyPairs(serviceOfferingId); if (details != null && !details.isEmpty()) { String preferredAttribute = details.get("ImplicitDedicationMode"); if (preferredAttribute != null && preferredAttribute.equals("Preferred")) { @@ -4097,7 +4188,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir @DB @Override @ActionEvent(eventType = EventTypes.EVENT_VM_MOVE, eventDescription = "move VM to another user", async = false) - public UserVm moveVMToUser(AssignVMCmd cmd) + public UserVm moveVMToUser(final AssignVMCmd cmd) throws ResourceAllocationException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException { // VERIFICATIONS and VALIDATIONS @@ -4117,7 +4208,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir } // get and check the valid VM - UserVmVO vm = _vmDao.findById(cmd.getVmId()); + final UserVmVO vm = _vmDao.findById(cmd.getVmId()); if (vm == null) { throw new InvalidParameterValueException( "There is no vm by that id " + cmd.getVmId()); @@ -4132,7 +4223,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir throw ex; } - Account oldAccount = _accountService.getActiveAccountById(vm + final Account oldAccount = _accountService.getActiveAccountById(vm .getAccountId()); if (oldAccount == null) { throw new InvalidParameterValueException("Invalid account for VM " @@ -4145,7 +4236,7 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir ex.addProxyObject(vm.getUuid(), "vmId"); throw ex; } - Account newAccount = _accountService.getActiveAccountByName( + final Account newAccount = _accountService.getActiveAccountByName( cmd.getAccountName(), cmd.getDomainId()); if (newAccount == null || newAccount.getType() == Account.ACCOUNT_TYPE_PROJECT) { @@ -4202,8 +4293,8 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir DataCenterVO zone = _dcDao.findById(vm.getDataCenterId()); // Get serviceOffering and Volumes for Virtual Machine - ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); - List volumes = _volsDao.findByInstance(cmd.getVmId()); + final ServiceOfferingVO offering = _serviceOfferingDao.findByIdIncludingRemoved(vm.getServiceOfferingId()); + final List volumes = _volsDao.findByInstance(cmd.getVmId()); //Remove vm from instance group removeInstanceFromInstanceGroup(cmd.getVmId()); @@ -4233,8 +4324,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir DomainVO domain = _domainDao.findById(cmd.getDomainId()); _accountMgr.checkAccess(newAccount, domain); - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { //generate destroy vm event for usage UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_DESTROY, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), vm.getHostName(), vm.getServiceOfferingId(), vm.getTemplateId(), vm.getHypervisorType().toString(), @@ -4280,8 +4372,9 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir UsageEventUtils.publishUsageEvent(EventTypes.EVENT_VM_CREATE, vm.getAccountId(), vm.getDataCenterId(), vm.getId(), vm.getHostName(), vm.getServiceOfferingId(), vm.getTemplateId(), vm.getHypervisorType().toString(), VirtualMachine.class.getName(), vm.getUuid()); + } + }); - txn.commit(); VirtualMachine vmoi = _itMgr.findById(vm.getId()); VirtualMachineProfileImpl vmOldProfile = new VirtualMachineProfileImpl(vmoi); @@ -4641,6 +4734,17 @@ public class UserVmManagerImpl extends ManagerBase implements UserVmManager, Vir _volsDao.detachVolume(root.getId()); volumeMgr.destroyVolume(root); + // For VMware hypervisor since the old root volume is replaced by the new root volume in storage, force expunge old root volume + if (vm.getHypervisorType() == HypervisorType.VMware) { + s_logger.info("Expunging volume " + root.getId() + " from primary data store"); + AsyncCallFuture future = _volService.expungeVolumeAsync(volFactory.getVolume(root.getId())); + try { + future.get(); + } catch (Exception e) { + s_logger.debug("Failed to expunge volume:" + root.getId(), e); + } + } + if (template.getEnablePassword()) { String password = generateRandomPassword(); boolean result = resetVMPasswordInternal(vmId, password); diff --git a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java index aa772fefa9d..ee81c82fe3f 100644 --- a/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java +++ b/server/src/com/cloud/vm/snapshot/VMSnapshotManagerImpl.java @@ -27,72 +27,42 @@ import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VMSnapshotStrategy; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import org.apache.cloudstack.api.command.user.vmsnapshot.ListVMSnapshotCmd; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; - -import com.cloud.agent.AgentManager; -import com.cloud.agent.api.Answer; -import com.cloud.agent.api.Command; -import com.cloud.agent.api.CreateVMSnapshotAnswer; -import com.cloud.agent.api.CreateVMSnapshotCommand; -import com.cloud.agent.api.DeleteVMSnapshotAnswer; -import com.cloud.agent.api.DeleteVMSnapshotCommand; -import com.cloud.agent.api.RevertToVMSnapshotAnswer; -import com.cloud.agent.api.RevertToVMSnapshotCommand; -import com.cloud.agent.api.VMSnapshotTO; -import com.cloud.agent.api.to.VolumeTO; import com.cloud.event.ActionEvent; import com.cloud.event.EventTypes; -import com.cloud.event.UsageEventUtils; -import com.cloud.exception.AgentUnavailableException; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.exception.OperationTimedoutException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.host.Host; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.hypervisor.HypervisorGuruManager; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.service.dao.ServiceOfferingDao; -import com.cloud.storage.DiskOfferingVO; -import com.cloud.storage.GuestOSVO; import com.cloud.storage.Snapshot; import com.cloud.storage.SnapshotVO; -import com.cloud.storage.StoragePool; import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.DiskOfferingDao; import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.SnapshotDao; import com.cloud.storage.dao.VolumeDao; import com.cloud.user.Account; import com.cloud.user.AccountManager; import com.cloud.user.dao.AccountDao; -import com.cloud.user.dao.UserDao; import com.cloud.uservm.UserVm; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Ternary; import com.cloud.utils.component.ManagerBase; -import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; -import com.cloud.utils.db.Transaction; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.utils.fsm.NoTransitionException; -import com.cloud.utils.fsm.StateMachine2; import com.cloud.vm.UserVmVO; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.VirtualMachine; @@ -100,7 +70,6 @@ import com.cloud.vm.VirtualMachine.State; import com.cloud.vm.VirtualMachineManager; import com.cloud.vm.VirtualMachineProfile; import com.cloud.vm.dao.UserVmDao; -import com.cloud.vm.dao.VMInstanceDao; import com.cloud.vm.snapshot.dao.VMSnapshotDao; @Component @@ -111,25 +80,19 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana @Inject VMSnapshotDao _vmSnapshotDao; @Inject VolumeDao _volumeDao; @Inject AccountDao _accountDao; - @Inject VMInstanceDao _vmInstanceDao; @Inject UserVmDao _userVMDao; - @Inject HostDao _hostDao; - @Inject UserDao _userDao; - @Inject AgentManager _agentMgr; - @Inject HypervisorGuruManager _hvGuruMgr; @Inject AccountManager _accountMgr; @Inject GuestOSDao _guestOSDao; - @Inject PrimaryDataStoreDao _storagePoolDao; @Inject SnapshotDao _snapshotDao; @Inject VirtualMachineManager _itMgr; - @Inject DataStoreManager dataStoreMgr; @Inject ConfigurationDao _configDao; @Inject HypervisorCapabilitiesDao _hypervisorCapabilitiesDao; - @Inject DiskOfferingDao _diskOfferingDao; - @Inject ServiceOfferingDao _serviceOfferingDao; + @Inject + StorageStrategyFactory storageStrategyFactory; + int _vmSnapshotMax; int _wait; - StateMachine2 _vmSnapshottateMachine ; + @Override public boolean configure(String name, Map params) throws ConfigurationException { @@ -144,7 +107,6 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana String value = _configDao.getValue("vmsnapshot.create.wait"); _wait = NumbersUtil.parseInt(value, 1800); - _vmSnapshottateMachine = VMSnapshot.State.getStateMachine(); return true; } @@ -336,6 +298,16 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana return _name; } + private VMSnapshotStrategy findVMSnapshotStrategy(VMSnapshot vmSnapshot) { + VMSnapshotStrategy snapshotStrategy = storageStrategyFactory.getVmSnapshotStrategy(vmSnapshot); + + if (snapshotStrategy == null) { + throw new CloudRuntimeException("can't find vm snapshot strategy for vmsnapshot: " + vmSnapshot.getId()); + } + + return snapshotStrategy; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_SNAPSHOT_CREATE, eventDescription = "creating VM snapshot", async = true) public VMSnapshot creatVMSnapshot(Long vmId, Long vmSnapshotId) { @@ -347,241 +319,20 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana if(vmSnapshot == null){ throw new CloudRuntimeException("VM snapshot id: " + vmSnapshotId + " can not be found"); } - Long hostId = pickRunningHost(vmId); try { - vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.CreateRequested); - } catch (NoTransitionException e) { - throw new CloudRuntimeException(e.getMessage()); - } - return createVmSnapshotInternal(userVm, vmSnapshot, hostId); - } - - protected VMSnapshot createVmSnapshotInternal(UserVmVO userVm, VMSnapshotVO vmSnapshot, Long hostId) { - CreateVMSnapshotAnswer answer = null; - try { - GuestOSVO guestOS = _guestOSDao.findById(userVm.getGuestOSId()); - - // prepare snapshotVolumeTos - List volumeTOs = getVolumeTOList(userVm.getId()); - - // prepare target snapshotTO and its parent snapshot (current snapshot) - VMSnapshotTO current = null; - VMSnapshotVO currentSnapshot = _vmSnapshotDao.findCurrentSnapshotByVmId(userVm.getId()); - if (currentSnapshot != null) - current = getSnapshotWithParents(currentSnapshot); - VMSnapshotTO target = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(), vmSnapshot.getType(), null, vmSnapshot.getDescription(), false, - current); - if (current == null) - vmSnapshot.setParent(null); - else - vmSnapshot.setParent(current.getId()); - - CreateVMSnapshotCommand ccmd = new CreateVMSnapshotCommand(userVm.getInstanceName(),target ,volumeTOs, guestOS.getDisplayName(),userVm.getState()); - ccmd.setWait(_wait); - - answer = (CreateVMSnapshotAnswer) sendToPool(hostId, ccmd); - if (answer != null && answer.getResult()) { - processAnswer(vmSnapshot, userVm, answer, hostId); - s_logger.debug("Create vm snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); - }else{ - - String errMsg = "Creating VM snapshot: " + vmSnapshot.getName() + " failed"; - if(answer != null && answer.getDetails() != null) - errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); - vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); - throw new CloudRuntimeException(errMsg); - } - return vmSnapshot; + VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); + VMSnapshot snapshot = strategy.takeVMSnapshot(vmSnapshot); + return snapshot; } catch (Exception e) { - if(e instanceof AgentUnavailableException){ - try { - vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationFailed); - } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); - } - } - String msg = e.getMessage(); - s_logger.error("Create vm snapshot " + vmSnapshot.getName() + " failed for vm: " + userVm.getInstanceName() + " due to " + msg); - throw new CloudRuntimeException(msg); - } finally{ - if(vmSnapshot.getState() == VMSnapshot.State.Allocated){ - s_logger.warn("Create vm snapshot " + vmSnapshot.getName() + " failed for vm: " + userVm.getInstanceName()); - _vmSnapshotDao.remove(vmSnapshot.getId()); - } - if(vmSnapshot.getState() == VMSnapshot.State.Ready && answer != null){ - for (VolumeTO volumeTo : answer.getVolumeTOs()){ - publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_CREATE,vmSnapshot,userVm,volumeTo); - } - } + s_logger.debug("Failed to create vm snapshot: " + vmSnapshotId ,e); + return null; } } - private void publishUsageEvent(String type, VMSnapshot vmSnapshot, UserVm userVm, VolumeTO volumeTo){ - VolumeVO volume = _volumeDao.findById(volumeTo.getId()); - Long diskOfferingId = volume.getDiskOfferingId(); - Long offeringId = null; - if (diskOfferingId != null) { - DiskOfferingVO offering = _diskOfferingDao.findById(diskOfferingId); - if (offering != null - && (offering.getType() == DiskOfferingVO.Type.Disk)) { - offeringId = offering.getId(); - } - } - UsageEventUtils.publishUsageEvent( - type, - vmSnapshot.getAccountId(), - userVm.getDataCenterId(), - userVm.getId(), - vmSnapshot.getName(), - offeringId, - volume.getId(), // save volume's id into templateId field - volumeTo.getChainSize(), - VMSnapshot.class.getName(), vmSnapshot.getUuid()); - } - - protected List getVolumeTOList(Long vmId) { - List volumeTOs = new ArrayList(); - List volumeVos = _volumeDao.findByInstance(vmId); - - for (VolumeVO volume : volumeVos) { - StoragePool pool = (StoragePool)dataStoreMgr.getPrimaryDataStore(volume.getPoolId()); - VolumeTO volumeTO = new VolumeTO(volume, pool); - volumeTOs.add(volumeTO); - } - return volumeTOs; - } - - // get snapshot and its parents recursively - private VMSnapshotTO getSnapshotWithParents(VMSnapshotVO snapshot) { - Map snapshotMap = new HashMap(); - List allSnapshots = _vmSnapshotDao.findByVm(snapshot.getVmId()); - for (VMSnapshotVO vmSnapshotVO : allSnapshots) { - snapshotMap.put(vmSnapshotVO.getId(), vmSnapshotVO); - } - - VMSnapshotTO currentTO = convert2VMSnapshotTO(snapshot); - VMSnapshotTO result = currentTO; - VMSnapshotVO current = snapshot; - while (current.getParent() != null) { - VMSnapshotVO parent = snapshotMap.get(current.getParent()); - currentTO.setParent(convert2VMSnapshotTO(parent)); - current = snapshotMap.get(current.getParent()); - currentTO = currentTO.getParent(); - } - return result; - } - - private VMSnapshotTO convert2VMSnapshotTO(VMSnapshotVO vo) { - return new VMSnapshotTO(vo.getId(), vo.getName(), vo.getType(), vo.getCreated().getTime(), vo.getDescription(), - vo.getCurrent(), null); - } - - protected boolean vmSnapshotStateTransitTo(VMSnapshotVO vsnp, VMSnapshot.Event event) throws NoTransitionException { - return _vmSnapshottateMachine.transitTo(vsnp, event, null, _vmSnapshotDao); - } - - @DB - protected void processAnswer(VMSnapshotVO vmSnapshot, UserVmVO userVm, Answer as, Long hostId) { - final Transaction txn = Transaction.currentTxn(); - try { - txn.start(); - if (as instanceof CreateVMSnapshotAnswer) { - CreateVMSnapshotAnswer answer = (CreateVMSnapshotAnswer) as; - finalizeCreate(vmSnapshot, answer.getVolumeTOs()); - vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); - } else if (as instanceof RevertToVMSnapshotAnswer) { - RevertToVMSnapshotAnswer answer = (RevertToVMSnapshotAnswer) as; - finalizeRevert(vmSnapshot, answer.getVolumeTOs()); - vmSnapshotStateTransitTo(vmSnapshot, VMSnapshot.Event.OperationSucceeded); - } else if (as instanceof DeleteVMSnapshotAnswer) { - DeleteVMSnapshotAnswer answer = (DeleteVMSnapshotAnswer) as; - finalizeDelete(vmSnapshot, answer.getVolumeTOs()); - _vmSnapshotDao.remove(vmSnapshot.getId()); - } - txn.commit(); - } catch (Exception e) { - String errMsg = "Error while process answer: " + as.getClass() + " due to " + e.getMessage(); - s_logger.error(errMsg, e); - txn.rollback(); - throw new CloudRuntimeException(errMsg); - } finally { - txn.close(); - } - } - - protected void finalizeDelete(VMSnapshotVO vmSnapshot, List VolumeTOs) { - // update volumes path - updateVolumePath(VolumeTOs); - - // update children's parent snapshots - List children= _vmSnapshotDao.listByParent(vmSnapshot.getId()); - for (VMSnapshotVO child : children) { - child.setParent(vmSnapshot.getParent()); - _vmSnapshotDao.persist(child); - } - - // update current snapshot - VMSnapshotVO current = _vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); - if(current != null && current.getId() == vmSnapshot.getId() && vmSnapshot.getParent() != null){ - VMSnapshotVO parent = _vmSnapshotDao.findById(vmSnapshot.getParent()); - parent.setCurrent(true); - _vmSnapshotDao.persist(parent); - } - vmSnapshot.setCurrent(false); - _vmSnapshotDao.persist(vmSnapshot); - } - - protected void finalizeCreate(VMSnapshotVO vmSnapshot, List VolumeTOs) { - // update volumes path - updateVolumePath(VolumeTOs); - - vmSnapshot.setCurrent(true); - - // change current snapshot - if (vmSnapshot.getParent() != null) { - VMSnapshotVO previousCurrent = _vmSnapshotDao.findById(vmSnapshot.getParent()); - previousCurrent.setCurrent(false); - _vmSnapshotDao.persist(previousCurrent); - } - _vmSnapshotDao.persist(vmSnapshot); - } - - protected void finalizeRevert(VMSnapshotVO vmSnapshot, List volumeToList) { - // update volumes path - updateVolumePath(volumeToList); - - // update current snapshot, current snapshot is the one reverted to - VMSnapshotVO previousCurrent = _vmSnapshotDao.findCurrentSnapshotByVmId(vmSnapshot.getVmId()); - if(previousCurrent != null){ - previousCurrent.setCurrent(false); - _vmSnapshotDao.persist(previousCurrent); - } - vmSnapshot.setCurrent(true); - _vmSnapshotDao.persist(vmSnapshot); - } - - private void updateVolumePath(List volumeTOs) { - for (VolumeTO volume : volumeTOs) { - if (volume.getPath() != null) { - VolumeVO volumeVO = _volumeDao.findById(volume.getId()); - volumeVO.setPath(volume.getPath()); - volumeVO.setVmSnapshotChainSize(volume.getChainSize()); - _volumeDao.persist(volumeVO); - } - } - } - public VMSnapshotManagerImpl() { } - - protected Answer sendToPool(Long hostId, Command cmd) throws AgentUnavailableException, OperationTimedoutException { - long targetHostId = _hvGuruMgr.getGuruProcessedCommandTargetHost(hostId, cmd); - Answer answer = _agentMgr.send(targetHostId, cmd); - return answer; - } - + @Override public boolean hasActiveVMSnapshotTasks(Long vmId){ List activeVMSnapshots = _vmSnapshotDao.listByInstanceId(vmId, @@ -617,50 +368,14 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana if(vmSnapshot.getState() == VMSnapshot.State.Allocated){ return _vmSnapshotDao.remove(vmSnapshot.getId()); - }else{ - return deleteSnapshotInternal(vmSnapshot); - } - } - - @DB - protected boolean deleteSnapshotInternal(VMSnapshotVO vmSnapshot) { - UserVmVO userVm = _userVMDao.findById(vmSnapshot.getVmId()); - DeleteVMSnapshotAnswer answer = null; - try { - vmSnapshotStateTransitTo(vmSnapshot,VMSnapshot.Event.ExpungeRequested); - Long hostId = pickRunningHost(vmSnapshot.getVmId()); - - // prepare snapshotVolumeTos - List volumeTOs = getVolumeTOList(vmSnapshot.getVmId()); - - // prepare DeleteVMSnapshotCommand - String vmInstanceName = userVm.getInstanceName(); - VMSnapshotTO parent = getSnapshotWithParents(vmSnapshot).getParent(); - VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(vmSnapshot.getId(), vmSnapshot.getName(), vmSnapshot.getType(), - vmSnapshot.getCreated().getTime(), vmSnapshot.getDescription(), vmSnapshot.getCurrent(), parent); - GuestOSVO guestOS = _guestOSDao.findById(userVm.getGuestOSId()); - DeleteVMSnapshotCommand deleteSnapshotCommand = new DeleteVMSnapshotCommand(vmInstanceName, vmSnapshotTO, volumeTOs,guestOS.getDisplayName()); - - answer = (DeleteVMSnapshotAnswer) sendToPool(hostId, deleteSnapshotCommand); - - if (answer != null && answer.getResult()) { - processAnswer(vmSnapshot, userVm, answer, hostId); - s_logger.debug("Delete VM snapshot " + vmSnapshot.getName() + " succeeded for vm: " + userVm.getInstanceName()); - return true; - } else { - s_logger.error("Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + answer.getDetails()); + } else{ + try { + VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshot); + return strategy.deleteVMSnapshot(vmSnapshot); + } catch (Exception e) { + s_logger.debug("Failed to delete vm snapshot: " + vmSnapshotId, e); return false; } - } catch (Exception e) { - String msg = "Delete vm snapshot " + vmSnapshot.getName() + " of vm " + userVm.getInstanceName() + " failed due to " + e.getMessage(); - s_logger.error(msg , e); - throw new CloudRuntimeException(e.getMessage()); - } finally{ - if(answer != null && answer.getResult()){ - for (VolumeTO volumeTo : answer.getVolumeTOs()){ - publishUsageEvent(EventTypes.EVENT_VM_SNAPSHOT_DELETE,vmSnapshot,userVm,volumeTo); - } - } } } @@ -726,108 +441,29 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana throw new CloudRuntimeException(e.getMessage()); } } - hostId = pickRunningHost(userVm.getId()); } - - if(hostId == null) - throw new CloudRuntimeException("Can not find any host to revert snapshot " + vmSnapshotVo.getName()); - + // check if there are other active VM snapshot tasks if (hasActiveVMSnapshotTasks(userVm.getId())) { throw new InvalidParameterValueException("There is other active vm snapshot tasks on the instance, please try again later"); } - - userVm = _userVMDao.findById(userVm.getId()); - try { - vmSnapshotStateTransitTo(vmSnapshotVo, VMSnapshot.Event.RevertRequested); - } catch (NoTransitionException e) { - throw new CloudRuntimeException(e.getMessage()); - } - return revertInternal(userVm, vmSnapshotVo, hostId); - } - private UserVm revertInternal(UserVmVO userVm, VMSnapshotVO vmSnapshotVo, Long hostId) { try { - VMSnapshotVO snapshot = _vmSnapshotDao.findById(vmSnapshotVo.getId()); - // prepare RevertToSnapshotCommand - List volumeTOs = getVolumeTOList(userVm.getId()); - String vmInstanceName = userVm.getInstanceName(); - VMSnapshotTO parent = getSnapshotWithParents(snapshot).getParent(); - VMSnapshotTO vmSnapshotTO = new VMSnapshotTO(snapshot.getId(), snapshot.getName(), snapshot.getType(), - snapshot.getCreated().getTime(), snapshot.getDescription(), snapshot.getCurrent(), parent); - - GuestOSVO guestOS = _guestOSDao.findById(userVm.getGuestOSId()); - RevertToVMSnapshotCommand revertToSnapshotCommand = new RevertToVMSnapshotCommand(vmInstanceName, vmSnapshotTO, volumeTOs, guestOS.getDisplayName()); - - RevertToVMSnapshotAnswer answer = (RevertToVMSnapshotAnswer) sendToPool(hostId, revertToSnapshotCommand); - if (answer != null && answer.getResult()) { - processAnswer(vmSnapshotVo, userVm, answer, hostId); - s_logger.debug("RevertTo " + vmSnapshotVo.getName() + " succeeded for vm: " + userVm.getInstanceName()); - } else { - String errMsg = "Revert VM: " + userVm.getInstanceName() + " to snapshot: "+ vmSnapshotVo.getName() + " failed"; - if(answer != null && answer.getDetails() != null) - errMsg = errMsg + " due to " + answer.getDetails(); - s_logger.error(errMsg); - // agent report revert operation fails - vmSnapshotStateTransitTo(vmSnapshotVo, VMSnapshot.Event.OperationFailed); - throw new CloudRuntimeException(errMsg); - } + VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshotVo); + strategy.revertVMSnapshot(vmSnapshotVo); + return userVm; } catch (Exception e) { - if(e instanceof AgentUnavailableException){ - try { - vmSnapshotStateTransitTo(vmSnapshotVo, VMSnapshot.Event.OperationFailed); - } catch (NoTransitionException e1) { - s_logger.error("Cannot set vm snapshot state due to: " + e1.getMessage()); - } - } - // for other exceptions, do not change VM snapshot state, leave it for snapshotSync - String errMsg = "revert vm: " + userVm.getInstanceName() + " to snapshot " + vmSnapshotVo.getName() + " failed due to " + e.getMessage(); - s_logger.error(errMsg); - throw new CloudRuntimeException(e.getMessage()); + s_logger.debug("Failed to revert vmsnapshot: " + vmSnapshotId, e); + return null; } - return userVm; } - @Override public VMSnapshot getVMSnapshotById(Long id) { VMSnapshotVO vmSnapshot = _vmSnapshotDao.findById(id); return vmSnapshot; } - protected Long pickRunningHost(Long vmId) { - UserVmVO vm = _userVMDao.findById(vmId); - // use VM's host if VM is running - if(vm.getState() == State.Running) - return vm.getHostId(); - - // check if lastHostId is available - if(vm.getLastHostId() != null){ - HostVO lastHost = _hostDao.findById(vm.getLastHostId()); - if(lastHost.getStatus() == com.cloud.host.Status.Up && !lastHost.isInMaintenanceStates()) - return lastHost.getId(); - } - - List listVolumes = _volumeDao.findByInstance(vmId); - if (listVolumes == null || listVolumes.size() == 0) { - throw new InvalidParameterValueException("vmInstance has no volumes"); - } - VolumeVO volume = listVolumes.get(0); - Long poolId = volume.getPoolId(); - if (poolId == null) { - throw new InvalidParameterValueException("pool id is not found"); - } - StoragePoolVO storagePool = _storagePoolDao.findById(poolId); - if (storagePool == null) { - throw new InvalidParameterValueException("storage pool is not found"); - } - List listHost = _hostDao.listAllUpAndEnabledNonHAHosts(Host.Type.Routing, storagePool.getClusterId(), storagePool.getPodId(), - storagePool.getDataCenterId(), null); - if (listHost == null || listHost.size() == 0) { - throw new InvalidParameterValueException("no host in up state is found"); - } - return listHost.get(0).getId(); - } @Override public VirtualMachine getVMBySnapshotId(Long id) { @@ -851,7 +487,8 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana VMSnapshotVO target = _vmSnapshotDao.findById(snapshot.getId()); if(type != null && target.getType() != type) continue; - if (!deleteSnapshotInternal(target)) { + VMSnapshotStrategy strategy = findVMSnapshotStrategy(target); + if (!strategy.deleteVMSnapshot(target)) { result = false; break; } @@ -869,12 +506,13 @@ public class VMSnapshotManagerImpl extends ManagerBase implements VMSnapshotMana List vmSnapshotsInExpungingStates = _vmSnapshotDao.listByInstanceId(vm.getId(), VMSnapshot.State.Expunging, VMSnapshot.State.Reverting, VMSnapshot.State.Creating); for (VMSnapshotVO vmSnapshotVO : vmSnapshotsInExpungingStates) { + VMSnapshotStrategy strategy = findVMSnapshotStrategy(vmSnapshotVO); if(vmSnapshotVO.getState() == VMSnapshot.State.Expunging){ - return deleteSnapshotInternal(vmSnapshotVO); + return strategy.deleteVMSnapshot(vmSnapshotVO); }else if(vmSnapshotVO.getState() == VMSnapshot.State.Creating){ - return createVmSnapshotInternal(userVm, vmSnapshotVO, hostId) != null; + return strategy.takeVMSnapshot(vmSnapshotVO) != null; }else if(vmSnapshotVO.getState() == VMSnapshot.State.Reverting){ - return revertInternal(userVm, vmSnapshotVO, hostId) != null; + return strategy.revertVMSnapshot(vmSnapshotVO); } } }catch (Exception e) { diff --git a/server/src/org/apache/cloudstack/acl/AclServiceImpl.java b/server/src/org/apache/cloudstack/acl/AclServiceImpl.java index ccd3bf0c27a..c7badd0cb18 100644 --- a/server/src/org/apache/cloudstack/acl/AclServiceImpl.java +++ b/server/src/org/apache/cloudstack/acl/AclServiceImpl.java @@ -61,6 +61,9 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.SearchCriteria.Op; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; @Local(value = {AclService.class}) public class AclServiceImpl extends ManagerBase implements AclService, Manager { @@ -111,7 +114,7 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_ROLE_CREATE, eventDescription = "Creating Acl Role", create = true) - public AclRole createAclRole(Long domainId, String aclRoleName, String description, Long parentRoleId) { + public AclRole createAclRole(Long domainId, final String aclRoleName, final String description, final Long parentRoleId) { Account caller = CallContext.current().getCallingAccount(); if (domainId == null) { domainId = caller.getDomainId(); @@ -130,22 +133,27 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { + " already exisits for domain " + domainId); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - AclRoleVO rvo = new AclRoleVO(aclRoleName, description); - rvo.setDomainId(domainId); - AclRole role = _aclRoleDao.persist(rvo); - if (parentRoleId != null) { - // copy parent role permissions - List perms = _rolePermissionDao.listByRole(parentRoleId); - if (perms != null) { - for (AclRolePermissionVO perm : perms) { - perm.setAclRoleId(role.getId()); - _rolePermissionDao.persist(perm); + final long domain_id = domainId; + AclRole role = Transaction.execute(new TransactionCallback() { + @Override + public AclRole doInTransaction(TransactionStatus status) { + AclRoleVO rvo = new AclRoleVO(aclRoleName, description); + rvo.setDomainId(domain_id); + AclRole role = _aclRoleDao.persist(rvo); + if (parentRoleId != null) { + // copy parent role permissions + List perms = _rolePermissionDao.listByRole(parentRoleId); + if (perms != null) { + for (AclRolePermissionVO perm : perms) { + perm.setAclRoleId(role.getId()); + _rolePermissionDao.persist(perm); + } + } } + return role; } - } - txn.commit(); + }); + return role; } @@ -153,10 +161,10 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_ROLE_DELETE, eventDescription = "Deleting Acl Role") - public boolean deleteAclRole(long aclRoleId) { + public boolean deleteAclRole(final long aclRoleId) { Account caller = CallContext.current().getCallingAccount(); // get the Acl Role entity - AclRole role = _aclRoleDao.findById(aclRoleId); + final AclRole role = _aclRoleDao.findById(aclRoleId); if (role == null) { throw new InvalidParameterValueException("Unable to find acl role: " + aclRoleId + "; failed to delete acl role."); @@ -164,27 +172,29 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check permissions _accountMgr.checkAccess(caller, null, true, role); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // remove this role related entry in acl_group_role_map - List groupRoleMap = _aclGroupRoleMapDao.listByRoleId(role.getId()); - if (groupRoleMap != null) { - for (AclGroupRoleMapVO gr : groupRoleMap) { - _aclGroupRoleMapDao.remove(gr.getId()); - } - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // remove this role related entry in acl_group_role_map + List groupRoleMap = _aclGroupRoleMapDao.listByRoleId(role.getId()); + if (groupRoleMap != null) { + for (AclGroupRoleMapVO gr : groupRoleMap) { + _aclGroupRoleMapDao.remove(gr.getId()); + } + } - // remove this role related entry in acl_api_permission table - List roleApiMap = _apiPermissionDao.listByRoleId(role.getId()); - if (roleApiMap != null) { - for (AclApiPermissionVO roleApi : roleApiMap) { - _apiPermissionDao.remove(roleApi.getId()); - } - } + // remove this role related entry in acl_api_permission table + List roleApiMap = _apiPermissionDao.listByRoleId(role.getId()); + if (roleApiMap != null) { + for (AclApiPermissionVO roleApi : roleApiMap) { + _apiPermissionDao.remove(roleApi.getId()); + } + } - // remove this role from acl_role table - _aclRoleDao.remove(aclRoleId); - txn.commit(); + // remove this role from acl_role table + _aclRoleDao.remove(aclRoleId); + } + }); return true; } @@ -193,7 +203,7 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_ROLE_GRANT, eventDescription = "Granting permission to Acl Role") - public AclRole grantApiPermissionToAclRole(long aclRoleId, List apiNames) { + public AclRole grantApiPermissionToAclRole(final long aclRoleId, final List apiNames) { Account caller = CallContext.current().getCallingAccount(); // get the Acl Role entity AclRole role = _aclRoleDao.findById(aclRoleId); @@ -204,18 +214,21 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check permissions _accountMgr.checkAccess(caller, null, true, role); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // add entries in acl_api_permission table - for (String api : apiNames) { - AclApiPermissionVO perm = _apiPermissionDao.findByRoleAndApi(aclRoleId, api); - if (perm == null) { - // not there already - perm = new AclApiPermissionVO(aclRoleId, api); - _apiPermissionDao.persist(perm); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // add entries in acl_api_permission table + for (String api : apiNames) { + AclApiPermissionVO perm = _apiPermissionDao.findByRoleAndApi(aclRoleId, api); + if (perm == null) { + // not there already + perm = new AclApiPermissionVO(aclRoleId, api); + _apiPermissionDao.persist(perm); + } + } } - } - txn.commit(); + }); + return role; } @@ -223,7 +236,7 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_ROLE_REVOKE, eventDescription = "Revoking permission from Acl Role") - public AclRole revokeApiPermissionFromAclRole(long aclRoleId, List apiNames) { + public AclRole revokeApiPermissionFromAclRole(final long aclRoleId, final List apiNames) { Account caller = CallContext.current().getCallingAccount(); // get the Acl Role entity AclRole role = _aclRoleDao.findById(aclRoleId); @@ -234,17 +247,19 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check permissions _accountMgr.checkAccess(caller, null, true, role); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // remove entries from acl_api_permission table - for (String api : apiNames) { - AclApiPermissionVO perm = _apiPermissionDao.findByRoleAndApi(aclRoleId, api); - if (perm != null) { - // not removed yet - _apiPermissionDao.remove(perm.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // remove entries from acl_api_permission table + for (String api : apiNames) { + AclApiPermissionVO perm = _apiPermissionDao.findByRoleAndApi(aclRoleId, api); + if (perm != null) { + // not removed yet + _apiPermissionDao.remove(perm.getId()); + } + } } - } - txn.commit(); + }); return role; } @@ -325,8 +340,8 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_GROUP_UPDATE, eventDescription = "Adding roles to acl group") - public AclGroup addAclRolesToGroup(List roleIds, Long groupId) { - Account caller = CallContext.current().getCallingAccount(); + public AclGroup addAclRolesToGroup(final List roleIds, final Long groupId) { + final Account caller = CallContext.current().getCallingAccount(); // get the Acl Group entity AclGroup group = _aclGroupDao.findById(groupId); if (group == null) { @@ -336,34 +351,37 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check group permissions _accountMgr.checkAccess(caller, null, true, group); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // add entries in acl_group_role_map table - for (Long roleId : roleIds) { - // check role permissions - AclRole role = _aclRoleDao.findById(roleId); - if ( role == null ){ - throw new InvalidParameterValueException("Unable to find acl role: " + roleId - + "; failed to add roles to acl group."); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // add entries in acl_group_role_map table + for (Long roleId : roleIds) { + // check role permissions + AclRole role = _aclRoleDao.findById(roleId); + if (role == null) { + throw new InvalidParameterValueException("Unable to find acl role: " + roleId + + "; failed to add roles to acl group."); + } + _accountMgr.checkAccess(caller, null, true, role); + + AclGroupRoleMapVO grMap = _aclGroupRoleMapDao.findByGroupAndRole(groupId, roleId); + if (grMap == null) { + // not there already + grMap = new AclGroupRoleMapVO(groupId, roleId); + _aclGroupRoleMapDao.persist(grMap); + } + } } - _accountMgr.checkAccess(caller,null, true, role); - - AclGroupRoleMapVO grMap = _aclGroupRoleMapDao.findByGroupAndRole(groupId, roleId); - if (grMap == null) { - // not there already - grMap = new AclGroupRoleMapVO(groupId, roleId); - _aclGroupRoleMapDao.persist(grMap); - } - } - txn.commit(); + }); + return group; } @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_GROUP_UPDATE, eventDescription = "Removing roles from acl group") - public AclGroup removeAclRolesFromGroup(List roleIds, Long groupId) { - Account caller = CallContext.current().getCallingAccount(); + public AclGroup removeAclRolesFromGroup(final List roleIds, final Long groupId) { + final Account caller = CallContext.current().getCallingAccount(); // get the Acl Group entity AclGroup group = _aclGroupDao.findById(groupId); if (group == null) { @@ -373,33 +391,35 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check group permissions _accountMgr.checkAccess(caller, null, true, group); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // add entries in acl_group_role_map table - for (Long roleId : roleIds) { - // check role permissions - AclRole role = _aclRoleDao.findById(roleId); - if (role == null) { - throw new InvalidParameterValueException("Unable to find acl role: " + roleId - + "; failed to add roles to acl group."); - } - _accountMgr.checkAccess(caller, null, true, role); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // add entries in acl_group_role_map table + for (Long roleId : roleIds) { + // check role permissions + AclRole role = _aclRoleDao.findById(roleId); + if (role == null) { + throw new InvalidParameterValueException("Unable to find acl role: " + roleId + + "; failed to add roles to acl group."); + } + _accountMgr.checkAccess(caller, null, true, role); - AclGroupRoleMapVO grMap = _aclGroupRoleMapDao.findByGroupAndRole(groupId, roleId); - if (grMap != null) { - // not removed yet - _aclGroupRoleMapDao.remove(grMap.getId()); + AclGroupRoleMapVO grMap = _aclGroupRoleMapDao.findByGroupAndRole(groupId, roleId); + if (grMap != null) { + // not removed yet + _aclGroupRoleMapDao.remove(grMap.getId()); + } + } } - } - txn.commit(); + }); return group; } @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_GROUP_UPDATE, eventDescription = "Adding accounts to acl group") - public AclGroup addAccountsToGroup(List acctIds, Long groupId) { - Account caller = CallContext.current().getCallingAccount(); + public AclGroup addAccountsToGroup(final List acctIds, final Long groupId) { + final Account caller = CallContext.current().getCallingAccount(); // get the Acl Group entity AclGroup group = _aclGroupDao.findById(groupId); if (group == null) { @@ -409,34 +429,36 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check group permissions _accountMgr.checkAccess(caller, null, true, group); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // add entries in acl_group_account_map table - for (Long acctId : acctIds) { - // check account permissions - Account account = _accountDao.findById(acctId); - if (account == null) { - throw new InvalidParameterValueException("Unable to find account: " + acctId - + "; failed to add account to acl group."); - } - _accountMgr.checkAccess(caller, null, true, account); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // add entries in acl_group_account_map table + for (Long acctId : acctIds) { + // check account permissions + Account account = _accountDao.findById(acctId); + if (account == null) { + throw new InvalidParameterValueException("Unable to find account: " + acctId + + "; failed to add account to acl group."); + } + _accountMgr.checkAccess(caller, null, true, account); - AclGroupAccountMapVO grMap = _aclGroupAccountMapDao.findByGroupAndAccount(groupId, acctId); - if (grMap == null) { - // not there already - grMap = new AclGroupAccountMapVO(groupId, acctId); - _aclGroupAccountMapDao.persist(grMap); + AclGroupAccountMapVO grMap = _aclGroupAccountMapDao.findByGroupAndAccount(groupId, acctId); + if (grMap == null) { + // not there already + grMap = new AclGroupAccountMapVO(groupId, acctId); + _aclGroupAccountMapDao.persist(grMap); + } + } } - } - txn.commit(); + }); return group; } @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_GROUP_UPDATE, eventDescription = "Removing accounts from acl group") - public AclGroup removeAccountsFromGroup(List acctIds, Long groupId) { - Account caller = CallContext.current().getCallingAccount(); + public AclGroup removeAccountsFromGroup(final List acctIds, final Long groupId) { + final Account caller = CallContext.current().getCallingAccount(); // get the Acl Group entity AclGroup group = _aclGroupDao.findById(groupId); if (group == null) { @@ -446,25 +468,27 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check group permissions _accountMgr.checkAccess(caller, null, true, group); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // remove entries from acl_group_account_map table - for (Long acctId : acctIds) { - // check account permissions - Account account = _accountDao.findById(acctId); - if (account == null) { - throw new InvalidParameterValueException("Unable to find account: " + acctId - + "; failed to add account to acl group."); - } - _accountMgr.checkAccess(caller, null, true, account); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // remove entries from acl_group_account_map table + for (Long acctId : acctIds) { + // check account permissions + Account account = _accountDao.findById(acctId); + if (account == null) { + throw new InvalidParameterValueException("Unable to find account: " + acctId + + "; failed to add account to acl group."); + } + _accountMgr.checkAccess(caller, null, true, account); - AclGroupAccountMapVO grMap = _aclGroupAccountMapDao.findByGroupAndAccount(groupId, acctId); - if (grMap != null) { - // not removed yet - _aclGroupAccountMapDao.remove(grMap.getId()); + AclGroupAccountMapVO grMap = _aclGroupAccountMapDao.findByGroupAndAccount(groupId, acctId); + if (grMap != null) { + // not removed yet + _aclGroupAccountMapDao.remove(grMap.getId()); + } + } } - } - txn.commit(); + }); return group; } @@ -498,10 +522,10 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { @DB @Override @ActionEvent(eventType = EventTypes.EVENT_ACL_GROUP_DELETE, eventDescription = "Deleting Acl Group") - public boolean deleteAclGroup(Long aclGroupId) { + public boolean deleteAclGroup(final Long aclGroupId) { Account caller = CallContext.current().getCallingAccount(); // get the Acl Role entity - AclGroup grp = _aclGroupDao.findById(aclGroupId); + final AclGroup grp = _aclGroupDao.findById(aclGroupId); if (grp == null) { throw new InvalidParameterValueException("Unable to find acl group: " + aclGroupId + "; failed to delete acl group."); @@ -509,27 +533,29 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { // check permissions _accountMgr.checkAccess(caller, null, true, grp); - Transaction txn = Transaction.currentTxn(); - txn.start(); - // remove this group related entry in acl_group_role_map - List groupRoleMap = _aclGroupRoleMapDao.listByGroupId(grp.getId()); - if (groupRoleMap != null) { - for (AclGroupRoleMapVO gr : groupRoleMap) { - _aclGroupRoleMapDao.remove(gr.getId()); - } - } + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // remove this group related entry in acl_group_role_map + List groupRoleMap = _aclGroupRoleMapDao.listByGroupId(grp.getId()); + if (groupRoleMap != null) { + for (AclGroupRoleMapVO gr : groupRoleMap) { + _aclGroupRoleMapDao.remove(gr.getId()); + } + } - // remove this group related entry in acl_group_account table - List groupAcctMap = _aclGroupAccountMapDao.listByGroupId(grp.getId()); - if (groupAcctMap != null) { - for (AclGroupAccountMapVO grpAcct : groupAcctMap) { - _aclGroupAccountMapDao.remove(grpAcct.getId()); - } - } + // remove this group related entry in acl_group_account table + List groupAcctMap = _aclGroupAccountMapDao.listByGroupId(grp.getId()); + if (groupAcctMap != null) { + for (AclGroupAccountMapVO grpAcct : groupAcctMap) { + _aclGroupAccountMapDao.remove(grpAcct.getId()); + } + } - // remove this group from acl_group table - _aclGroupDao.remove(aclGroupId); - txn.commit(); + // remove this group from acl_group table + _aclGroupDao.remove(aclGroupId); + } + }); return true; } @@ -542,7 +568,7 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { groupSB.and("account", groupSB.entity().getAccountId(), Op.EQ); GenericSearchBuilder roleSB = _aclGroupRoleMapDao.createSearchBuilder(Long.class); - roleSB.selectField(roleSB.entity().getAclRoleId()); + roleSB.selectFields(roleSB.entity().getAclRoleId()); roleSB.join("accountgroupjoin", groupSB, groupSB.entity().getAclGroupId(), roleSB.entity().getAclGroupId(), JoinType.INNER); roleSB.done(); @@ -583,7 +609,7 @@ public class AclServiceImpl extends ManagerBase implements AclService, Manager { public List getAclGroups(long accountId) { GenericSearchBuilder groupSB = _aclGroupAccountMapDao.createSearchBuilder(Long.class); - groupSB.selectField(groupSB.entity().getAclGroupId()); + groupSB.selectFields(groupSB.entity().getAclGroupId()); groupSB.and("account", groupSB.entity().getAccountId(), Op.EQ); SearchCriteria groupSc = groupSB.create(); diff --git a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java index 9343b22808e..ebcdc6068ea 100644 --- a/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java +++ b/server/src/org/apache/cloudstack/affinity/AffinityGroupServiceImpl.java @@ -34,12 +34,16 @@ import org.apache.cloudstack.affinity.dao.AffinityGroupDao; import org.apache.cloudstack.affinity.dao.AffinityGroupDomainMapDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.context.CallContext; - import org.apache.log4j.Logger; import org.springframework.context.annotation.Primary; + + + + + import com.cloud.deploy.DeploymentPlanner; import com.cloud.domain.DomainVO; import com.cloud.domain.dao.DomainDao; @@ -65,6 +69,9 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.fsm.StateListener; import com.cloud.vm.UserVmVO; @@ -142,8 +149,8 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro @DB @Override - public AffinityGroup createAffinityGroupInternal(String account, Long domainId, String affinityGroupName, - String affinityGroupType, String description) { + public AffinityGroup createAffinityGroupInternal(String account, final Long domainId, final String affinityGroupName, + final String affinityGroupType, final String description) { Account caller = CallContext.current().getCallingAccount(); @@ -159,7 +166,7 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro "Unable to create affinity group, no Affinity Group Types configured"); } - AffinityGroupProcessor processor = typeProcessorMap.get(affinityGroupType); + final AffinityGroupProcessor processor = typeProcessorMap.get(affinityGroupType); if (processor.isAdminControlledGroup() && !_accountMgr.isRootAdmin(caller.getId())) { throw new PermissionDeniedException("Cannot create the affinity group"); @@ -210,21 +217,26 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro + affinityGroupName + " already exisits under the domain."); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - AffinityGroupVO group = new AffinityGroupVO(affinityGroupName, affinityGroupType, description, owner.getDomainId(), - owner.getId(), aclType); + final Account ownerFinal = owner; + final ControlledEntity.ACLType aclTypeFinal = aclType; + AffinityGroupVO group = Transaction.execute(new TransactionCallback() { + @Override + public AffinityGroupVO doInTransaction(TransactionStatus status) { + AffinityGroupVO group = new AffinityGroupVO(affinityGroupName, affinityGroupType, description, ownerFinal.getDomainId(), + ownerFinal.getId(), aclTypeFinal); _affinityGroupDao.persist(group); - if (domainId != null && aclType == ACLType.Domain) { + if (domainId != null && aclTypeFinal == ACLType.Domain) { boolean subDomainAccess = false; subDomainAccess = processor.subDomainAccess(); AffinityGroupDomainMapVO domainMap = new AffinityGroupDomainMapVO(group.getId(), domainId, subDomainAccess); _affinityGroupDomainMapDao.persist(domainMap); } - txn.commit(); + return group; + } + }); + if (s_logger.isDebugEnabled()) { s_logger.debug("Created affinity group =" + affinityGroupName); @@ -265,22 +277,24 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro // check permissions _accountMgr.checkAccess(caller, AccessType.ModifyEntry, true, group); - final Transaction txn = Transaction.currentTxn(); - txn.start(); + final Long affinityGroupIdFinal = affinityGroupId; + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { - group = _affinityGroupDao.lockRow(affinityGroupId, true); + AffinityGroupVO group = _affinityGroupDao.lockRow(affinityGroupIdFinal, true); if (group == null) { - throw new InvalidParameterValueException("Unable to find affinity group by id " + affinityGroupId); + throw new InvalidParameterValueException("Unable to find affinity group by id " + affinityGroupIdFinal); } - List affinityGroupVmMap = _affinityGroupVMMapDao.listByAffinityGroup(affinityGroupId); + List affinityGroupVmMap = _affinityGroupVMMapDao.listByAffinityGroup(affinityGroupIdFinal); if (!affinityGroupVmMap.isEmpty()) { SearchBuilder listByAffinityGroup = _affinityGroupVMMapDao.createSearchBuilder(); listByAffinityGroup.and("affinityGroupId", listByAffinityGroup.entity().getAffinityGroupId(), SearchCriteria.Op.EQ); listByAffinityGroup.done(); SearchCriteria sc = listByAffinityGroup.create(); - sc.setParameters("affinityGroupId", affinityGroupId); + sc.setParameters("affinityGroupId", affinityGroupIdFinal); _affinityGroupVMMapDao.lockRows(sc, null, true); _affinityGroupVMMapDao.remove(sc); @@ -292,8 +306,9 @@ public class AffinityGroupServiceImpl extends ManagerBase implements AffinityGro processor.handleDeleteGroup(group); } - _affinityGroupDao.expunge(affinityGroupId); - txn.commit(); + _affinityGroupDao.expunge(affinityGroupIdFinal); + } + }); if (s_logger.isDebugEnabled()) { s_logger.debug("Deleted affinity group id=" + affinityGroupId); diff --git a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java index 2385edcd1c5..b7cd231831c 100644 --- a/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java +++ b/server/src/org/apache/cloudstack/network/lb/ApplicationLoadBalancerManagerImpl.java @@ -26,7 +26,6 @@ import javax.inject.Inject; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; - import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.command.user.loadbalancer.ListApplicationLoadBalancersCmd; import org.apache.cloudstack.context.CallContext; @@ -58,7 +57,7 @@ import com.cloud.network.lb.LoadBalancingRulesService; import com.cloud.network.rules.FirewallRule.State; import com.cloud.network.rules.LoadBalancerContainer.Scheme; import com.cloud.projects.Project.ListProjectResourcesCriteria; -import com.cloud.server.ResourceTag.TaggedResourceType; +import com.cloud.server.ResourceTag.ResourceObjectType; import com.cloud.tags.ResourceTagVO; import com.cloud.tags.dao.ResourceTagDao; import com.cloud.user.Account; @@ -72,6 +71,8 @@ import com.cloud.utils.db.JoinBuilder; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallbackWithException; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.Ip; import com.cloud.utils.net.NetUtils; @@ -156,37 +157,40 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A @DB - protected ApplicationLoadBalancerRule persistLbRule(ApplicationLoadBalancerRuleVO newRule) throws NetworkRuleConflictException { - - Transaction txn = Transaction.currentTxn(); - txn.start(); - - //1) Persist the rule - newRule = _lbDao.persist(newRule); + protected ApplicationLoadBalancerRule persistLbRule(final ApplicationLoadBalancerRuleVO newRuleFinal) throws NetworkRuleConflictException { boolean success = true; - + ApplicationLoadBalancerRuleVO newRule = null; try { - //2) Detect conflicts - detectLbRulesConflicts(newRule); - if (!_firewallDao.setStateToAdd(newRule)) { - throw new CloudRuntimeException("Unable to update the state to add for " + newRule); - } - s_logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " - + newRule.getSourcePortStart() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); - CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); - Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, newRule.getAccountId(), - ntwk.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), - newRule.getUuid()); - txn.commit(); + newRule = Transaction.execute(new TransactionCallbackWithException() { + @Override + public ApplicationLoadBalancerRuleVO doInTransaction(TransactionStatus status) throws NetworkRuleConflictException { + //1) Persist the rule + ApplicationLoadBalancerRuleVO newRule = _lbDao.persist(newRuleFinal); + //2) Detect conflicts + detectLbRulesConflicts(newRule); + if (!_firewallDao.setStateToAdd(newRule)) { + throw new CloudRuntimeException("Unable to update the state to add for " + newRule); + } + s_logger.debug("Load balancer " + newRule.getId() + " for Ip address " + newRule.getSourceIp().addr() + ", source port " + + newRule.getSourcePortStart() + ", instance port " + newRule.getDefaultPortStart() + " is added successfully."); + CallContext.current().setEventDetails("Load balancer Id: " + newRule.getId()); + Network ntwk = _networkModel.getNetwork(newRule.getNetworkId()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_LOAD_BALANCER_CREATE, newRule.getAccountId(), + ntwk.getDataCenterId(), newRule.getId(), null, LoadBalancingRule.class.getName(), + newRule.getUuid()); + + return newRule; + } + }); + return newRule; } catch (Exception e) { success = false; if (e instanceof NetworkRuleConflictException) { throw (NetworkRuleConflictException) e; } - throw new CloudRuntimeException("Unable to add lb rule for ip address " + newRule.getSourceIpAddressId(), e); + throw new CloudRuntimeException("Unable to add lb rule for ip address " + newRuleFinal.getSourceIpAddressId(), e); } finally { if (!success && newRule != null) { _lbMgr.removeLBRule(newRule); @@ -455,7 +459,7 @@ public class ApplicationLoadBalancerManagerImpl extends ManagerBase implements A if (tags != null && !tags.isEmpty()) { int count = 0; - sc.setJoinParameters("tagSearch", "resourceType", TaggedResourceType.LoadBalancer.toString()); + sc.setJoinParameters("tagSearch", "resourceType", ResourceObjectType.LoadBalancer.toString()); for (String key : tags.keySet()) { sc.setJoinParameters("tagSearch", "key" + String.valueOf(count), key); sc.setJoinParameters("tagSearch", "value" + String.valueOf(count), tags.get(key)); diff --git a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java index 9bcf4be69c8..40ac46c176c 100755 --- a/server/src/org/apache/cloudstack/region/RegionManagerImpl.java +++ b/server/src/org/apache/cloudstack/region/RegionManagerImpl.java @@ -26,22 +26,30 @@ import com.cloud.user.DomainManager; import com.cloud.user.UserAccount; import com.cloud.user.dao.AccountDao; import com.cloud.user.dao.UserAccountDao; +import com.cloud.utils.PropertiesUtil; import com.cloud.utils.component.Manager; import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.crypt.EncryptionSecretKeyChecker; import org.apache.cloudstack.api.command.admin.account.UpdateAccountCmd; import org.apache.cloudstack.api.command.admin.domain.UpdateDomainCmd; import org.apache.cloudstack.api.command.admin.user.DeleteUserCmd; import org.apache.cloudstack.api.command.admin.user.UpdateUserCmd; import org.apache.cloudstack.region.dao.RegionDao; import org.apache.log4j.Logger; +import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; +import org.jasypt.properties.EncryptableProperties; import org.springframework.stereotype.Component; import javax.ejb.Local; import javax.inject.Inject; import javax.naming.ConfigurationException; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Properties; @Component @Local(value = { RegionManager.class }) @@ -63,7 +71,28 @@ public class RegionManagerImpl extends ManagerBase implements RegionManager, Man @Override public boolean configure(final String name, final Map params) throws ConfigurationException { _name = name; - _id = _regionDao.getRegionId(); + File dbPropsFile = PropertiesUtil.findConfigFile("db.properties"); + final Properties dbProps; + if (EncryptionSecretKeyChecker.useEncryption()) { + StandardPBEStringEncryptor encryptor = EncryptionSecretKeyChecker.getEncryptor(); + dbProps = new EncryptableProperties(encryptor); + } else { + dbProps = new Properties(); + } + try { + PropertiesUtil.loadFromFile(dbProps, dbPropsFile); + } catch (IOException e) { + s_logger.fatal("Unable to load db properties file, pl. check the classpath and file path configuration", e); + return false; + } catch (NullPointerException e) { + s_logger.fatal("Unable to locate db properties file within classpath or absolute path: db.properties"); + return false; + } + String regionId = dbProps.getProperty("region.id"); + _id = 1; + if(regionId != null){ + _id = Integer.parseInt(regionId); + } return true; } diff --git a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java index 25a40ee9051..62b9748de3a 100644 --- a/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java +++ b/server/src/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImpl.java @@ -37,6 +37,9 @@ import com.cloud.user.AccountManager; import com.cloud.utils.Pair; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionCallbackNoReturn; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; @@ -46,7 +49,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.region.Region; import org.apache.cloudstack.region.dao.RegionDao; - import org.apache.log4j.Logger; import javax.ejb.Local; @@ -94,15 +96,15 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR "balancer rule", create = true) public GlobalLoadBalancerRule createGlobalLoadBalancerRule(CreateGlobalLoadBalancerRuleCmd newRule) { - Integer regionId = newRule.getRegionId(); - String algorithm = newRule.getAlgorithm(); - String stickyMethod = newRule.getStickyMethod(); - String name = newRule.getName(); - String description = newRule.getDescription(); - String domainName = newRule.getServiceDomainName(); - String serviceType = newRule.getServiceType(); + final Integer regionId = newRule.getRegionId(); + final String algorithm = newRule.getAlgorithm(); + final String stickyMethod = newRule.getStickyMethod(); + final String name = newRule.getName(); + final String description = newRule.getDescription(); + final String domainName = newRule.getServiceDomainName(); + final String serviceType = newRule.getServiceType(); - Account gslbOwner = _accountMgr.getAccount(newRule.getEntityOwnerId()); + final Account gslbOwner = _accountMgr.getAccount(newRule.getEntityOwnerId()); if (!GlobalLoadBalancerRule.Algorithm.isValidAlgorithm(algorithm)) { throw new InvalidParameterValueException("Invalid Algorithm: " + algorithm); @@ -135,18 +137,21 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw new CloudRuntimeException("GSLB service is not enabled in region : " + region.getName()); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - GlobalLoadBalancerRuleVO newGslbRule = new GlobalLoadBalancerRuleVO(name, description, domainName, algorithm, - stickyMethod, serviceType, regionId, gslbOwner.getId(), gslbOwner.getDomainId(), - GlobalLoadBalancerRule.State.Staged); - _gslbRuleDao.persist(newGslbRule); + GlobalLoadBalancerRuleVO newGslbRule = Transaction.execute(new TransactionCallback() { + @Override + public GlobalLoadBalancerRuleVO doInTransaction(TransactionStatus status) { + GlobalLoadBalancerRuleVO newGslbRule = new GlobalLoadBalancerRuleVO(name, description, domainName, algorithm, + stickyMethod, serviceType, regionId, gslbOwner.getId(), gslbOwner.getDomainId(), + GlobalLoadBalancerRule.State.Staged); + _gslbRuleDao.persist(newGslbRule); - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_CREATE, newGslbRule.getAccountId(), - 0, newGslbRule.getId(), name, GlobalLoadBalancerRule.class.getName(), - newGslbRule.getUuid()); + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_CREATE, newGslbRule.getAccountId(), + 0, newGslbRule.getId(), name, GlobalLoadBalancerRule.class.getName(), + newGslbRule.getUuid()); - txn.commit(); + return newGslbRule; + } + }); s_logger.debug("successfully created new global load balancer rule for the account " + gslbOwner.getId()); @@ -162,8 +167,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR CallContext ctx = CallContext.current(); Account caller = ctx.getCallingAccount(); - long gslbRuleId = assignToGslbCmd.getGlobalLoadBalancerRuleId(); - GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); + final long gslbRuleId = assignToGslbCmd.getGlobalLoadBalancerRuleId(); + final GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } @@ -175,7 +180,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR + " is in revoked state"); } - List newLbRuleIds = assignToGslbCmd.getLoadBalancerRulesIds(); + final List newLbRuleIds = assignToGslbCmd.getLoadBalancerRulesIds(); if (newLbRuleIds == null || newLbRuleIds.isEmpty()) { throw new InvalidParameterValueException("empty list of load balancer rule Ids specified to be assigned" + " global load balancer rule"); @@ -244,30 +249,31 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } } - Map lbRuleWeightMap = assignToGslbCmd.getLoadBalancerRuleWeightMap(); + final Map lbRuleWeightMap = assignToGslbCmd.getLoadBalancerRuleWeightMap(); - Transaction txn = Transaction.currentTxn(); - txn.start(); - - // persist the mapping for the new Lb rule that needs to assigned to a gslb rule - for (Long lbRuleId : newLbRuleIds) { - GlobalLoadBalancerLbRuleMapVO newGslbLbMap = new GlobalLoadBalancerLbRuleMapVO(); - newGslbLbMap.setGslbLoadBalancerId(gslbRuleId); - newGslbLbMap.setLoadBalancerId(lbRuleId); - if (lbRuleWeightMap != null && lbRuleWeightMap.get(lbRuleId) != null) { - newGslbLbMap.setWeight(lbRuleWeightMap.get(lbRuleId)); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // persist the mapping for the new Lb rule that needs to assigned to a gslb rule + for (Long lbRuleId : newLbRuleIds) { + GlobalLoadBalancerLbRuleMapVO newGslbLbMap = new GlobalLoadBalancerLbRuleMapVO(); + newGslbLbMap.setGslbLoadBalancerId(gslbRuleId); + newGslbLbMap.setLoadBalancerId(lbRuleId); + if (lbRuleWeightMap != null && lbRuleWeightMap.get(lbRuleId) != null) { + newGslbLbMap.setWeight(lbRuleWeightMap.get(lbRuleId)); + } + _gslbLbMapDao.persist(newGslbLbMap); + } + + // mark the gslb rule state as add + if (gslbRule.getState() == GlobalLoadBalancerRule.State.Staged || gslbRule.getState() == + GlobalLoadBalancerRule.State.Active ) { + gslbRule.setState(GlobalLoadBalancerRule.State.Add); + _gslbRuleDao.update(gslbRule.getId(), gslbRule); + } } - _gslbLbMapDao.persist(newGslbLbMap); - } + }); - // mark the gslb rule state as add - if (gslbRule.getState() == GlobalLoadBalancerRule.State.Staged || gslbRule.getState() == - GlobalLoadBalancerRule.State.Active ) { - gslbRule.setState(GlobalLoadBalancerRule.State.Add); - _gslbRuleDao.update(gslbRule.getId(), gslbRule); - } - - txn.commit(); boolean success = false; try { @@ -304,8 +310,8 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR CallContext ctx = CallContext.current(); Account caller = ctx.getCallingAccount(); - long gslbRuleId = removeFromGslbCmd.getGlobalLoadBalancerRuleId(); - GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); + final long gslbRuleId = removeFromGslbCmd.getGlobalLoadBalancerRuleId(); + final GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); } @@ -316,7 +322,7 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw new InvalidParameterValueException("global load balancer rule id: " + gslbRuleId + " is already in revoked state"); } - List lbRuleIdsToremove = removeFromGslbCmd.getLoadBalancerRulesIds(); + final List lbRuleIdsToremove = removeFromGslbCmd.getLoadBalancerRulesIds(); if (lbRuleIdsToremove == null || lbRuleIdsToremove.isEmpty()) { throw new InvalidParameterValueException("empty list of load balancer rule Ids specified to be un-assigned" + " to global load balancer rule"); @@ -356,23 +362,24 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } } - Transaction txn = Transaction.currentTxn(); - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // update the mapping of gslb rule to Lb rule, to revoke state + for (Long lbRuleId : lbRuleIdsToremove) { + GlobalLoadBalancerLbRuleMapVO removeGslbLbMap = _gslbLbMapDao.findByGslbRuleIdAndLbRuleId(gslbRuleId, lbRuleId); + removeGslbLbMap.setRevoke(true); + _gslbLbMapDao.update(removeGslbLbMap.getId(), removeGslbLbMap); + } - // update the mapping of gslb rule to Lb rule, to revoke state - for (Long lbRuleId : lbRuleIdsToremove) { - GlobalLoadBalancerLbRuleMapVO removeGslbLbMap = _gslbLbMapDao.findByGslbRuleIdAndLbRuleId(gslbRuleId, lbRuleId); - removeGslbLbMap.setRevoke(true); - _gslbLbMapDao.update(removeGslbLbMap.getId(), removeGslbLbMap); - } + // mark the gslb rule state as add + if (gslbRule.getState() == GlobalLoadBalancerRule.State.Staged) { + gslbRule.setState(GlobalLoadBalancerRule.State.Add); + _gslbRuleDao.update(gslbRule.getId(), gslbRule); + } - // mark the gslb rule state as add - if (gslbRule.getState() == GlobalLoadBalancerRule.State.Staged) { - gslbRule.setState(GlobalLoadBalancerRule.State.Add); - _gslbRuleDao.update(gslbRule.getId(), gslbRule); - } - - txn.commit(); + } + }); boolean success = false; try { @@ -387,19 +394,21 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw ex; } - txn.start(); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + // remove the mappings of gslb rule to Lb rule that are in revoked state + for (Long lbRuleId : lbRuleIdsToremove) { + GlobalLoadBalancerLbRuleMapVO removeGslbLbMap = _gslbLbMapDao.findByGslbRuleIdAndLbRuleId(gslbRuleId, lbRuleId); + _gslbLbMapDao.remove(removeGslbLbMap.getId()); + } - // remove the mappings of gslb rule to Lb rule that are in revoked state - for (Long lbRuleId : lbRuleIdsToremove) { - GlobalLoadBalancerLbRuleMapVO removeGslbLbMap = _gslbLbMapDao.findByGslbRuleIdAndLbRuleId(gslbRuleId, lbRuleId); - _gslbLbMapDao.remove(removeGslbLbMap.getId()); - } + // on success set state back to Active + gslbRule.setState(GlobalLoadBalancerRule.State.Active); + _gslbRuleDao.update(gslbRule.getId(), gslbRule); - // on success set state back to Active - gslbRule.setState(GlobalLoadBalancerRule.State.Active); - _gslbRuleDao.update(gslbRule.getId(), gslbRule); - - txn.commit(); + } + }); success = true; } catch (ResourceUnavailableException e) { @@ -429,9 +438,9 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } @DB - private void revokeGslbRule(long gslbRuleId, Account caller) { + private void revokeGslbRule(final long gslbRuleId, Account caller) { - GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); + final GlobalLoadBalancerRuleVO gslbRule = _gslbRuleDao.findById(gslbRuleId); if (gslbRule == null) { throw new InvalidParameterValueException("Invalid global load balancer rule id: " + gslbRuleId); @@ -454,19 +463,21 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR _gslbRuleDao.update(gslbRuleId, gslbRule); } - Transaction txn = Transaction.currentTxn(); - txn.start(); - - List gslbLbMapVos = _gslbLbMapDao.listByGslbRuleId(gslbRuleId); - if (gslbLbMapVos != null) { - //mark all the GSLB-LB mapping to be in revoke state - for (GlobalLoadBalancerLbRuleMapVO gslbLbMap : gslbLbMapVos) { - gslbLbMap.setRevoke(true); - _gslbLbMapDao.update(gslbLbMap.getId(), gslbLbMap); + final List gslbLbMapVos = Transaction.execute(new TransactionCallback>() { + @Override + public List doInTransaction(TransactionStatus status) { + List gslbLbMapVos = _gslbLbMapDao.listByGslbRuleId(gslbRuleId); + if (gslbLbMapVos != null) { + //mark all the GSLB-LB mapping to be in revoke state + for (GlobalLoadBalancerLbRuleMapVO gslbLbMap : gslbLbMapVos) { + gslbLbMap.setRevoke(true); + _gslbLbMapDao.update(gslbLbMap.getId(), gslbLbMap); + } + } + + return gslbLbMapVos; } - } - - txn.commit(); + }); boolean success = false; try { @@ -478,22 +489,25 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw new CloudRuntimeException("Failed to update the gloabal load balancer"); } - txn.start(); - //remove all mappings between GSLB rule and load balancer rules - if (gslbLbMapVos != null) { - for (GlobalLoadBalancerLbRuleMapVO gslbLbMap : gslbLbMapVos) { - _gslbLbMapDao.remove(gslbLbMap.getId()); + Transaction.execute(new TransactionCallbackNoReturn() { + @Override + public void doInTransactionWithoutResult(TransactionStatus status) { + //remove all mappings between GSLB rule and load balancer rules + if (gslbLbMapVos != null) { + for (GlobalLoadBalancerLbRuleMapVO gslbLbMap : gslbLbMapVos) { + _gslbLbMapDao.remove(gslbLbMap.getId()); + } + } + + //remove the GSLB rule itself + _gslbRuleDao.remove(gslbRuleId); + + UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, gslbRule.getAccountId(), + 0, gslbRule.getId(), gslbRule.getName(), GlobalLoadBalancerRule.class.getName(), + gslbRule.getUuid()); } - } + }); - //remove the GSLB rule itself - _gslbRuleDao.remove(gslbRuleId); - - UsageEventUtils.publishUsageEvent(EventTypes.EVENT_GLOBAL_LOAD_BALANCER_DELETE, gslbRule.getAccountId(), - 0, gslbRule.getId(), gslbRule.getName(), GlobalLoadBalancerRule.class.getName(), - gslbRule.getUuid()); - - txn.commit(); } @Override @@ -523,8 +537,6 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR throw new InvalidParameterValueException("Invalid persistence: " + stickyMethod); } - Transaction txn = Transaction.currentTxn(); - txn.start(); if (algorithm != null) { gslbRule.setAlgorithm(algorithm); } @@ -536,7 +548,6 @@ public class GlobalLoadBalancingRulesServiceImpl implements GlobalLoadBalancingR } gslbRule.setState(GlobalLoadBalancerRule.State.Add); _gslbRuleDao.update(gslbRule.getId(), gslbRule); - txn.commit(); try { s_logger.debug("Updating global load balancer with id " + gslbRule.getUuid()); diff --git a/server/test/com/cloud/api/ApiDispatcherTest.java b/server/test/com/cloud/api/ApiDispatcherTest.java new file mode 100644 index 00000000000..4fca289a774 --- /dev/null +++ b/server/test/com/cloud/api/ApiDispatcherTest.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.api; + +import java.util.HashMap; +import java.util.Map; + +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.context.CallContext; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.user.Account; +import com.cloud.user.AccountManager; +import com.cloud.user.User; + +@RunWith(MockitoJUnitRunner.class) +public class ApiDispatcherTest { + + @Mock + AccountManager accountManager; + + public static class TestCmd extends BaseCmd { + + @Parameter(name = "strparam1") + String strparam1; + + @Parameter(name="intparam1", type=CommandType.INTEGER) + int intparam1; + + @Parameter(name="boolparam1", type=CommandType.BOOLEAN) + boolean boolparam1; + + @Override + public void execute() throws ResourceUnavailableException, + InsufficientCapacityException, ServerApiException, + ConcurrentOperationException, ResourceAllocationException, + NetworkRuleConflictException { + // well documented nothing + } + + @Override + public String getCommandName() { + return "test"; + } + + @Override + public long getEntityOwnerId() { + return 0; + } + + } + + @Before + public void setup() { + CallContext.register(Mockito.mock(User.class), Mockito.mock(Account.class)); + new ApiDispatcher().init(); + ApiDispatcher.getInstance()._accountMgr = accountManager; + } + + @After + public void cleanup() { + CallContext.unregister(); + } + + @Test + public void processParameters() { + HashMap params = new HashMap(); + params.put("strparam1", "foo"); + params.put("intparam1", "100"); + params.put("boolparam1", "true"); + TestCmd cmd = new TestCmd(); + //how lucky that field is not protected, this test would be impossible + ApiDispatcher.processParameters(cmd, params); + Assert.assertEquals("foo", cmd.strparam1); + Assert.assertEquals(100, cmd.intparam1); + } + +} diff --git a/server/test/com/cloud/configuration/ConfigurationManagerTest.java b/server/test/com/cloud/configuration/ConfigurationManagerTest.java index e49d93adb47..2d2c03dcbaf 100755 --- a/server/test/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/test/com/cloud/configuration/ConfigurationManagerTest.java @@ -27,7 +27,9 @@ import static org.mockito.Mockito.when; import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.UUID; import junit.framework.Assert; @@ -38,7 +40,6 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - import org.apache.cloudstack.api.command.admin.vlan.DedicatePublicIpRangeCmd; import org.apache.cloudstack.api.command.admin.vlan.ReleasePublicIpRangeCmd; import org.apache.cloudstack.context.CallContext; @@ -53,7 +54,9 @@ import com.cloud.dc.VlanVO; import com.cloud.dc.dao.AccountVlanMapDao; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.VlanDao; +import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.IpAddressManager; +import com.cloud.network.Network.Capability; import com.cloud.network.dao.FirewallRulesDao; import com.cloud.network.dao.IPAddressDao; import com.cloud.network.dao.IPAddressVO; @@ -65,6 +68,7 @@ import com.cloud.user.ResourceLimitService; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.Ip; public class ConfigurationManagerTest { @@ -209,7 +213,7 @@ public class ConfigurationManagerTest { } void runDedicatePublicIpRangePostiveTest() throws Exception { - Transaction txn = Transaction.open("runDedicatePublicIpRangePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runDedicatePublicIpRangePostiveTest"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -235,7 +239,7 @@ public class ConfigurationManagerTest { } void runDedicatePublicIpRangeInvalidRange() throws Exception { - Transaction txn = Transaction.open("runDedicatePublicIpRangeInvalidRange"); + TransactionLegacy txn = TransactionLegacy.open("runDedicatePublicIpRangeInvalidRange"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(null); try { @@ -248,7 +252,7 @@ public class ConfigurationManagerTest { } void runDedicatePublicIpRangeDedicatedRange() throws Exception { - Transaction txn = Transaction.open("runDedicatePublicIpRangeDedicatedRange"); + TransactionLegacy txn = TransactionLegacy.open("runDedicatePublicIpRangeDedicatedRange"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -277,7 +281,7 @@ public class ConfigurationManagerTest { } void runDedicatePublicIpRangeInvalidZone() throws Exception { - Transaction txn = Transaction.open("runDedicatePublicIpRangeInvalidZone"); + TransactionLegacy txn = TransactionLegacy.open("runDedicatePublicIpRangeInvalidZone"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -303,7 +307,7 @@ public class ConfigurationManagerTest { } void runDedicatePublicIpRangeIPAdressAllocated() throws Exception { - Transaction txn = Transaction.open("runDedicatePublicIpRangeIPAdressAllocated"); + TransactionLegacy txn = TransactionLegacy.open("runDedicatePublicIpRangeIPAdressAllocated"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -330,7 +334,7 @@ public class ConfigurationManagerTest { } void runReleasePublicIpRangePostiveTest1() throws Exception { - Transaction txn = Transaction.open("runReleasePublicIpRangePostiveTest1"); + TransactionLegacy txn = TransactionLegacy.open("runReleasePublicIpRangePostiveTest1"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -354,7 +358,7 @@ public class ConfigurationManagerTest { } void runReleasePublicIpRangePostiveTest2() throws Exception { - Transaction txn = Transaction.open("runReleasePublicIpRangePostiveTest2"); + TransactionLegacy txn = TransactionLegacy.open("runReleasePublicIpRangePostiveTest2"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -388,7 +392,7 @@ public class ConfigurationManagerTest { } void runReleasePublicIpRangeInvalidIpRange() throws Exception { - Transaction txn = Transaction.open("runReleasePublicIpRangeInvalidIpRange"); + TransactionLegacy txn = TransactionLegacy.open("runReleasePublicIpRangeInvalidIpRange"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(null); try { @@ -401,7 +405,7 @@ public class ConfigurationManagerTest { } void runReleaseNonDedicatedPublicIpRange() throws Exception { - Transaction txn = Transaction.open("runReleaseNonDedicatedPublicIpRange"); + TransactionLegacy txn = TransactionLegacy.open("runReleaseNonDedicatedPublicIpRange"); when(configurationMgr._vlanDao.findById(anyLong())).thenReturn(vlan); @@ -415,6 +419,70 @@ public class ConfigurationManagerTest { } } + @Test + public void validateEmptyStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } + + @Test + public void validateInvalidStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + staticNatServiceCapabilityMap.put(Capability.AssociatePublicIP, "Frue and Talse"); + + boolean caught = false; + try { + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } + catch (InvalidParameterValueException e) { + Assert.assertTrue(e.getMessage(),e.getMessage().contains("(frue and talse)")); + caught = true; + } + Assert.assertTrue("should not be accepted",caught); + } + + @Test + public void validateTTStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + staticNatServiceCapabilityMap.put(Capability.AssociatePublicIP, "true and Talse"); + staticNatServiceCapabilityMap.put(Capability.ElasticIp, "True"); + + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } + @Test + public void validateFTStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + staticNatServiceCapabilityMap.put(Capability.AssociatePublicIP, "false"); + staticNatServiceCapabilityMap.put(Capability.ElasticIp, "True"); + + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } + @Test + public void validateTFStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + staticNatServiceCapabilityMap.put(Capability.AssociatePublicIP, "true and Talse"); + staticNatServiceCapabilityMap.put(Capability.ElasticIp, "false"); + + boolean caught = false; + try { + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } + catch (InvalidParameterValueException e) { + Assert.assertTrue(e.getMessage(),e.getMessage().contains("Capability " + Capability.AssociatePublicIP.getName() + + " can only be set when capability " + Capability.ElasticIp.getName() + " is true")); + caught = true; + } + Assert.assertTrue("should not be accepted",caught); + } + @Test + public void validateFFStaticNatServiceCapablitiesTest() { + Map staticNatServiceCapabilityMap = new HashMap(); + staticNatServiceCapabilityMap.put(Capability.AssociatePublicIP, "false"); + staticNatServiceCapabilityMap.put(Capability.ElasticIp, "False"); + + configurationMgr.validateStaticNatServiceCapablities(staticNatServiceCapabilityMap); + } public class DedicatePublicIpRangeCmdExtn extends DedicatePublicIpRangeCmd { @Override diff --git a/server/test/com/cloud/ha/KVMFencerTest.java b/server/test/com/cloud/ha/KVMFencerTest.java new file mode 100644 index 00000000000..d34ef018bef --- /dev/null +++ b/server/test/com/cloud/ha/KVMFencerTest.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package com.cloud.ha; + +import java.util.Arrays; +import java.util.Collections; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.runners.MockitoJUnitRunner; + +import com.cloud.agent.AgentManager; +import com.cloud.agent.api.FenceAnswer; +import com.cloud.agent.api.FenceCommand; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor.HypervisorType; +import com.cloud.resource.ResourceManager; +import com.cloud.vm.VirtualMachine; + +@RunWith(MockitoJUnitRunner.class) +public class KVMFencerTest { + + @Mock + HostDao hostDao; + @Mock + AgentManager agentManager; + @Mock + ResourceManager resourceManager; + + KVMFencer fencer; + + @Before + public void setup() { + fencer = new KVMFencer(); + fencer._agentMgr = agentManager; + fencer._hostDao = hostDao; + fencer._resourceMgr = resourceManager; + } + + @Test + public void testWithSingleHost() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithSingleHostDown() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Down); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithHosts() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + FenceAnswer answer = new FenceAnswer(null, true, "ok"); + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenReturn(answer); + + Assert.assertTrue(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithFailingFence() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenThrow( + new AgentUnavailableException(2l)); + + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithTimeoutingFence() throws AgentUnavailableException, + OperationTimedoutException { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.KVM); + Mockito.when(host.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(1l); + + HostVO secondHost = Mockito.mock(HostVO.class); + Mockito.when(secondHost.getClusterId()).thenReturn(1l); + Mockito.when(secondHost.getHypervisorType()).thenReturn( + HypervisorType.KVM); + Mockito.when(secondHost.getStatus()).thenReturn(Status.Up); + Mockito.when(host.getId()).thenReturn(2l); + + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Arrays.asList(host, secondHost)); + + Mockito.when( + agentManager.send(Mockito.anyLong(), + Mockito.any(FenceCommand.class))).thenThrow( + new OperationTimedoutException(null, 2l, 0l, 0, false)); + + Assert.assertFalse(fencer.fenceOff(virtualMachine, host)); + } + + @Test + public void testWithSingleNotKVM() { + HostVO host = Mockito.mock(HostVO.class); + Mockito.when(host.getClusterId()).thenReturn(1l); + Mockito.when(host.getHypervisorType()).thenReturn(HypervisorType.Any); + Mockito.when(host.getStatus()).thenReturn(Status.Down); + Mockito.when(host.getId()).thenReturn(1l); + VirtualMachine virtualMachine = Mockito.mock(VirtualMachine.class); + + Mockito.when(resourceManager.listAllHostsInCluster(1l)).thenReturn( + Collections.singletonList(host)); + Assert.assertNull(fencer.fenceOff(virtualMachine, host)); + } + +} diff --git a/server/test/com/cloud/metadata/ResourceMetaDataManagerTest.java b/server/test/com/cloud/metadata/ResourceMetaDataManagerTest.java index 2ab9216b766..84000fc2b70 100644 --- a/server/test/com/cloud/metadata/ResourceMetaDataManagerTest.java +++ b/server/test/com/cloud/metadata/ResourceMetaDataManagerTest.java @@ -17,42 +17,27 @@ package com.cloud.metadata; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyFloat; -import static org.mockito.Matchers.anyInt; import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.anyString; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.when; -import java.lang.reflect.Field; -import java.util.List; import java.util.Map; -import com.cloud.server.TaggedResourceService; -import com.cloud.utils.db.DB; -import com.cloud.vm.dao.NicDetailDao; -import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.command.user.vm.RestoreVMCmd; -import org.apache.cloudstack.api.command.user.vm.ScaleVMCmd; +import javax.naming.ConfigurationException; + import org.apache.commons.collections.map.HashedMap; import org.junit.Before; -import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; import org.mockito.Spy; import com.cloud.exception.ResourceAllocationException; -import com.cloud.metadata.ResourceMetaDataManager; -import com.cloud.metadata.ResourceMetaDataManagerImpl; import com.cloud.server.ResourceTag; -import com.cloud.storage.Volume; +import com.cloud.server.TaggedResourceService; import com.cloud.storage.dao.VolumeDetailsDao; -import com.cloud.user.dao.UserDao; - -import javax.naming.ConfigurationException; +import com.cloud.vm.dao.NicDetailsDao; public class ResourceMetaDataManagerTest { @@ -62,7 +47,7 @@ public class ResourceMetaDataManagerTest { @Spy ResourceMetaDataManagerImpl _resourceMetaDataMgr = new ResourceMetaDataManagerImpl(); @Mock VolumeDetailsDao _volumeDetailDao; @Mock - NicDetailDao _nicDetailDao; + NicDetailsDao _nicDetailDao; @Mock TaggedResourceService _taggedResourceMgr; @Before @@ -83,17 +68,17 @@ public class ResourceMetaDataManagerTest { // Test removing details - @Test + //@Test public void testResourceDetails() throws ResourceAllocationException { //when(_resourceMetaDataMgr.getResourceId(anyString(), eq(ResourceTag.TaggedResourceType.Volume))).thenReturn(1L); - doReturn(1L).when(_taggedResourceMgr).getResourceId(anyString(), eq(ResourceTag.TaggedResourceType.Volume)); + doReturn(1L).when(_taggedResourceMgr).getResourceId(anyString(), eq(ResourceTag.ResourceObjectType.Volume)); // _volumeDetailDao.removeDetails(id, key); - doNothing().when(_volumeDetailDao).removeDetails(anyLong(), anyString()); - doNothing().when(_nicDetailDao).removeDetails(anyLong(), anyString()); - _resourceMetaDataMgr.deleteResourceMetaData(anyString(), eq(ResourceTag.TaggedResourceType.Volume), anyString()); + doNothing().when(_volumeDetailDao).removeDetail(anyLong(), anyString()); + doNothing().when(_nicDetailDao).removeDetail(anyLong(), anyString()); + _resourceMetaDataMgr.deleteResourceMetaData(anyString(), eq(ResourceTag.ResourceObjectType.Volume), anyString()); } @@ -103,14 +88,14 @@ public class ResourceMetaDataManagerTest { - doReturn(1L).when(_taggedResourceMgr).getResourceId("1", ResourceTag.TaggedResourceType.Volume); + doReturn(1L).when(_taggedResourceMgr).getResourceId("1", ResourceTag.ResourceObjectType.Volume); // _volumeDetailDao.removeDetails(id, key); - doNothing().when(_volumeDetailDao).removeDetails(anyLong(), anyString()); - doNothing().when(_nicDetailDao).removeDetails(anyLong(), anyString()); + doNothing().when(_volumeDetailDao).removeDetail(anyLong(), anyString()); + doNothing().when(_nicDetailDao).removeDetail(anyLong(), anyString()); Map map = new HashedMap(); map.put("key","value"); - _resourceMetaDataMgr.addResourceMetaData("1", ResourceTag.TaggedResourceType.Volume, map); + _resourceMetaDataMgr.addResourceMetaData("1", ResourceTag.ResourceObjectType.Volume, map); } diff --git a/server/test/com/cloud/network/CreatePrivateNetworkTest.java b/server/test/com/cloud/network/CreatePrivateNetworkTest.java index 0e57b027217..9276309037c 100644 --- a/server/test/com/cloud/network/CreatePrivateNetworkTest.java +++ b/server/test/com/cloud/network/CreatePrivateNetworkTest.java @@ -36,7 +36,6 @@ import org.junit.Ignore; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; @@ -63,6 +62,7 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.utils.db.DB; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; //@Ignore("Requires database to be set up") @@ -152,8 +152,8 @@ public class CreatePrivateNetworkTest { @Test @DB public void createInvalidlyHostedPrivateNetwork() { - Transaction __txn; - __txn = Transaction.open("createInvalidlyHostedPrivateNetworkTest"); + TransactionLegacy __txn; + __txn = TransactionLegacy.open("createInvalidlyHostedPrivateNetworkTest"); /* Network nw; */ try { /* nw = */ diff --git a/server/test/com/cloud/network/DedicateGuestVlanRangesTest.java b/server/test/com/cloud/network/DedicateGuestVlanRangesTest.java index d6ee630569e..ea7167b471f 100644 --- a/server/test/com/cloud/network/DedicateGuestVlanRangesTest.java +++ b/server/test/com/cloud/network/DedicateGuestVlanRangesTest.java @@ -37,7 +37,6 @@ import org.junit.Before; import org.junit.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; - import org.apache.cloudstack.api.command.admin.network.DedicateGuestVlanRangeCmd; import org.apache.cloudstack.api.command.admin.network.ListDedicatedGuestVlanRangesCmd; import org.apache.cloudstack.api.command.admin.network.ReleaseDedicatedGuestVlanRangeCmd; @@ -56,6 +55,7 @@ import com.cloud.user.AccountVO; import com.cloud.user.UserVO; import com.cloud.user.dao.AccountDao; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; public class DedicateGuestVlanRangesTest { @@ -177,7 +177,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangePostiveTest() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangePostiveTest"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -213,7 +213,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangeInvalidFormat() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangeInvalidFormat"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangeInvalidFormat"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -234,7 +234,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangeInvalidRangeValue() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangeInvalidRangeValue"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangeInvalidRangeValue"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -255,7 +255,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangeAllocatedVlans() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangeAllocatedVlans"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangeAllocatedVlans"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -281,7 +281,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangeDedicatedRange() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangeDedicatedRange"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangeDedicatedRange"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -310,7 +310,7 @@ public class DedicateGuestVlanRangesTest { } void runDedicateGuestVlanRangePartiallyDedicated() throws Exception { - Transaction txn = Transaction.open("runDedicateGuestVlanRangePartiallyDedicated"); + TransactionLegacy txn = TransactionLegacy.open("runDedicateGuestVlanRangePartiallyDedicated"); Field dedicateVlanField = _dedicateGuestVlanRangeClass.getDeclaredField("vlan"); dedicateVlanField.setAccessible(true); @@ -339,7 +339,7 @@ public class DedicateGuestVlanRangesTest { } void runReleaseDedicatedGuestVlanRangePostiveTest() throws Exception { - Transaction txn = Transaction.open("runReleaseDedicatedGuestVlanRangePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runReleaseDedicatedGuestVlanRangePostiveTest"); AccountGuestVlanMapVO accountGuestVlanMap = new AccountGuestVlanMapVO(1L, 1L); when(networkService._accountGuestVlanMapDao.findById(anyLong())).thenReturn(accountGuestVlanMap); @@ -357,7 +357,7 @@ public class DedicateGuestVlanRangesTest { } void runReleaseDedicatedGuestVlanRangeInvalidRange() throws Exception { - Transaction txn = Transaction.open("runReleaseDedicatedGuestVlanRangeInvalidRange"); + TransactionLegacy txn = TransactionLegacy.open("runReleaseDedicatedGuestVlanRangeInvalidRange"); when(networkService._accountGuestVlanMapDao.findById(anyLong())).thenReturn(null); diff --git a/server/test/com/cloud/network/UpdatePhysicalNetworkTest.java b/server/test/com/cloud/network/UpdatePhysicalNetworkTest.java index 1c0eff6453a..264f91ccdff 100644 --- a/server/test/com/cloud/network/UpdatePhysicalNetworkTest.java +++ b/server/test/com/cloud/network/UpdatePhysicalNetworkTest.java @@ -24,7 +24,8 @@ import com.cloud.network.dao.PhysicalNetworkDao; import com.cloud.network.dao.PhysicalNetworkVO; import com.cloud.utils.Pair; import com.cloud.utils.db.Transaction; -import org.junit.Test; +import com.cloud.utils.db.TransactionLegacy; + import org.junit.*; import org.mockito.ArgumentCaptor; import org.mockito.MockitoAnnotations.*; @@ -58,7 +59,7 @@ public class UpdatePhysicalNetworkTest { @Test public void updatePhysicalNetworkTest(){ - Transaction txn = Transaction.open("updatePhysicalNetworkTest"); + TransactionLegacy txn = TransactionLegacy.open("updatePhysicalNetworkTest"); NetworkServiceImpl networkService = setUp(); existingRange.add("524"); when(_physicalNetworkDao.findById(anyLong())).thenReturn(physicalNetworkVO); diff --git a/server/test/com/cloud/network/security/SecurityGroupManagerTestConfiguration.java b/server/test/com/cloud/network/security/SecurityGroupManagerTestConfiguration.java index 4d3a453a89d..ed290a54308 100644 --- a/server/test/com/cloud/network/security/SecurityGroupManagerTestConfiguration.java +++ b/server/test/com/cloud/network/security/SecurityGroupManagerTestConfiguration.java @@ -41,7 +41,7 @@ import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterVnetDaoImpl; -import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.DataCenterDetailsDaoImpl; import com.cloud.dc.dao.HostPodDaoImpl; import com.cloud.dc.dao.PodVlanDaoImpl; import com.cloud.domain.dao.DomainDaoImpl; @@ -97,7 +97,7 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; DataCenterLinkLocalIpAddressDaoImpl.class, DataCenterVnetDaoImpl.class, PodVlanDaoImpl.class, - DcDetailsDaoImpl.class, + DataCenterDetailsDaoImpl.class, SecurityGroupRuleDaoImpl.class, NicDaoImpl.class, SecurityGroupJoinDaoImpl.class}, diff --git a/server/test/com/cloud/network/vpn/MockRemoteAccessVPNServiceProvider.java b/server/test/com/cloud/network/vpn/MockRemoteAccessVPNServiceProvider.java index 1dde4a84ebb..6e8cd69a802 100644 --- a/server/test/com/cloud/network/vpn/MockRemoteAccessVPNServiceProvider.java +++ b/server/test/com/cloud/network/vpn/MockRemoteAccessVPNServiceProvider.java @@ -23,7 +23,6 @@ import javax.ejb.Local; import javax.naming.ConfigurationException; import com.cloud.exception.ResourceUnavailableException; -import com.cloud.network.Network; import com.cloud.network.RemoteAccessVpn; import com.cloud.network.VpnUser; import com.cloud.network.element.RemoteAccessVPNServiceProvider; @@ -63,14 +62,14 @@ public class MockRemoteAccessVPNServiceProvider extends ManagerBase implements } @Override - public boolean startVpn(Network network, RemoteAccessVpn vpn) + public boolean startVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; } @Override - public boolean stopVpn(Network network, RemoteAccessVpn vpn) + public boolean stopVpn(RemoteAccessVpn vpn) throws ResourceUnavailableException { // TODO Auto-generated method stub return false; diff --git a/server/test/com/cloud/user/MockDomainManagerImpl.java b/server/test/com/cloud/user/MockDomainManagerImpl.java index 616e12de0d3..aab8001a02c 100644 --- a/server/test/com/cloud/user/MockDomainManagerImpl.java +++ b/server/test/com/cloud/user/MockDomainManagerImpl.java @@ -51,6 +51,12 @@ public class MockDomainManagerImpl extends ManagerBase implements DomainManager, return null; } + @Override + public Domain getDomainByName(String name, long parentId) { + // TODO Auto-generated method stub + return null; + } + @Override public boolean isChildDomain(Long parentId, Long childId) { // TODO Auto-generated method stub diff --git a/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java b/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java index 055b2b08984..da6e7af8e43 100644 --- a/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java +++ b/server/test/com/cloud/vm/snapshot/VMSnapshotManagerTest.java @@ -192,13 +192,6 @@ public class VMSnapshotManagerTest { when(vmMock.getState()).thenReturn(State.Running); _vmSnapshotMgr.allocVMSnapshot(TEST_VM_ID,"","",true); - when(_vmSnapshotDao.findCurrentSnapshotByVmId(anyLong())).thenReturn(null); - doReturn(new ArrayList()).when(_vmSnapshotMgr).getVolumeTOList(anyLong()); - doReturn(new CreateVMSnapshotAnswer(null,true,"")).when(_vmSnapshotMgr).sendToPool(anyLong(), any(CreateVMSnapshotCommand.class)); - doNothing().when(_vmSnapshotMgr).processAnswer(any(VMSnapshotVO.class), - any(UserVmVO.class), any(Answer.class), anyLong()); - doReturn(true).when(_vmSnapshotMgr).vmSnapshotStateTransitTo(any(VMSnapshotVO.class),any(VMSnapshot.Event.class)); - _vmSnapshotMgr.createVmSnapshotInternal(vmMock, mock(VMSnapshotVO.class), 5L); } } diff --git a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java index c9a0480809b..3147f1f9752 100755 --- a/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java +++ b/server/test/com/cloud/vpc/MockConfigurationManagerImpl.java @@ -432,7 +432,7 @@ public class MockConfigurationManagerImpl extends ManagerBase implements Configu @Override public NetworkOfferingVO createNetworkOffering(String name, String displayText, TrafficType trafficType, String tags, boolean specifyVlan, Availability availability, Integer networkRate, Map> serviceProviderMap, boolean isDefault, GuestType type, boolean systemOnly, Long serviceOfferingId, boolean conserveMode, - Map> serviceCapabilityMap, boolean specifyIpRanges, boolean isPersistent, Map details, boolean egressDefaultPolicy, Integer maxconn) { + Map> serviceCapabilityMap, boolean specifyIpRanges, boolean isPersistent, Map details, boolean egressDefaultPolicy, Integer maxconn, boolean enableKeepAlive) { // TODO Auto-generated method stub return null; } diff --git a/server/test/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java b/server/test/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java index 7f1048fa616..acdd9dc8750 100644 --- a/server/test/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java +++ b/server/test/com/cloud/vpc/MockVpcVirtualNetworkApplianceManager.java @@ -426,4 +426,26 @@ VpcVirtualNetworkApplianceService { // TODO Auto-generated method stub return false; } + + @Override + public boolean startRemoteAccessVpn(RemoteAccessVpn vpn, + VirtualRouter router) throws ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + + @Override + public boolean stopRemoteAccessVpn(RemoteAccessVpn vpn, VirtualRouter router) + throws ResourceUnavailableException { + // TODO Auto-generated method stub + return false; + } + + @Override + public String[] applyVpnUsers(RemoteAccessVpn vpn, + List users, VirtualRouter router) + throws ResourceUnavailableException { + // TODO Auto-generated method stub + return null; + } } diff --git a/server/test/com/cloud/vpc/VpcTestConfiguration.java b/server/test/com/cloud/vpc/VpcTestConfiguration.java index 9a22587e311..d34c46771fa 100644 --- a/server/test/com/cloud/vpc/VpcTestConfiguration.java +++ b/server/test/com/cloud/vpc/VpcTestConfiguration.java @@ -43,7 +43,7 @@ import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterVnetDaoImpl; -import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.DataCenterDetailsDaoImpl; import com.cloud.dc.dao.HostPodDaoImpl; import com.cloud.dc.dao.PodVlanDaoImpl; import com.cloud.dc.dao.PodVlanMapDaoImpl; @@ -153,7 +153,7 @@ import com.cloud.vpc.dao.MockVpcOfferingServiceMapDaoImpl; VMTemplateHostDaoImpl.class, MockVpcDaoImpl.class, VMTemplateDaoImpl.class,VMTemplateZoneDaoImpl.class,VMTemplateDetailsDaoImpl.class,DataCenterDaoImpl.class,DataCenterIpAddressDaoImpl.class,DataCenterLinkLocalIpAddressDaoImpl.class,DataCenterVnetDaoImpl.class,PodVlanDaoImpl.class, - DcDetailsDaoImpl.class,MockNetworkManagerImpl.class,MockVpcVirtualNetworkApplianceManager.class, + DataCenterDetailsDaoImpl.class,MockNetworkManagerImpl.class,MockVpcVirtualNetworkApplianceManager.class, EntityManagerImpl.class,LoadBalancerDaoImpl.class,FirewallRulesCidrsDaoImpl.class,VirtualRouterProviderDaoImpl.class, ProjectDaoImpl.class,ProjectAccountDaoImpl.class,MockVpcOfferingDaoImpl.class, MockConfigurationManagerImpl.class, MockNetworkOfferingServiceMapDaoImpl.class, diff --git a/server/test/com/cloud/vpc/dao/MockConfigurationDaoImpl.java b/server/test/com/cloud/vpc/dao/MockConfigurationDaoImpl.java index a0110b56dae..0802b7a324d 100644 --- a/server/test/com/cloud/vpc/dao/MockConfigurationDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockConfigurationDaoImpl.java @@ -72,6 +72,15 @@ public class MockConfigurationDaoImpl extends GenericDaoBase implements NetworkDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java index a8208dd7d9c..f6a3b13fe90 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkOfferingDaoImpl.java @@ -33,10 +33,9 @@ import com.cloud.offerings.NetworkOfferingVO; import com.cloud.offerings.dao.NetworkOfferingDao; import com.cloud.offerings.dao.NetworkOfferingDaoImpl; import com.cloud.utils.db.DB; -import com.cloud.utils.db.GenericDaoBase; @Local(value = NetworkOfferingDao.class) -@DB(txn = false) +@DB() public class MockNetworkOfferingDaoImpl extends NetworkOfferingDaoImpl implements NetworkOfferingDao{ private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); diff --git a/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java index d1e835471c8..a7f77bc0e48 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkOfferingServiceMapDaoImpl.java @@ -16,19 +16,15 @@ // under the License. package com.cloud.vpc.dao; -import java.util.ArrayList; -import java.util.List; - import javax.ejb.Local; import com.cloud.network.Network.Service; import com.cloud.offerings.dao.NetworkOfferingServiceMapDao; import com.cloud.offerings.dao.NetworkOfferingServiceMapDaoImpl; import com.cloud.utils.db.DB; -import com.cloud.utils.db.SearchCriteria; @Local(value = NetworkOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class MockNetworkOfferingServiceMapDaoImpl extends NetworkOfferingServiceMapDaoImpl{ @Override diff --git a/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java index 103f04ea8b9..c5c0a063013 100644 --- a/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockNetworkServiceMapDaoImpl.java @@ -28,7 +28,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = NetworkServiceMapDao.class) -@DB(txn = false) +@DB() public class MockNetworkServiceMapDaoImpl extends GenericDaoBase implements NetworkServiceMapDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java index 562d67dc207..e7674e09c3d 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcDaoImpl.java @@ -32,7 +32,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcDao.class) -@DB(txn = false) +@DB() public class MockVpcDaoImpl extends GenericDaoBase implements VpcDao{ private static final Logger s_logger = Logger.getLogger(MockNetworkOfferingDaoImpl.class); @@ -86,12 +86,12 @@ public class MockVpcDaoImpl extends GenericDaoBase implements VpcDa } @Override - public VpcVO persist(VpcVO vpc, Map serviceProviderMap) { + public VpcVO persist(VpcVO vpc, Map> serviceProviderMap) { return null; } @Override - public void persistVpcServiceProviders(long vpcId, Map serviceProviderMap) { + public void persistVpcServiceProviders(long vpcId, Map> serviceProviderMap) { return; } diff --git a/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java index 329931e1dd4..48df3d466ef 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcOfferingDaoImpl.java @@ -24,7 +24,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcOfferingDao.class) -@DB(txn = false) +@DB() public class MockVpcOfferingDaoImpl extends GenericDaoBase implements VpcOfferingDao{ /* (non-Javadoc) diff --git a/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java b/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java index 3357686af87..9618536aa75 100644 --- a/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java +++ b/server/test/com/cloud/vpc/dao/MockVpcOfferingServiceMapDaoImpl.java @@ -27,7 +27,7 @@ import com.cloud.utils.db.DB; import com.cloud.utils.db.GenericDaoBase; @Local(value = VpcOfferingServiceMapDao.class) -@DB(txn = false) +@DB() public class MockVpcOfferingServiceMapDaoImpl extends GenericDaoBase implements VpcOfferingServiceMapDao{ /* (non-Javadoc) diff --git a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java index c32940bc757..d7ac3f7ce80 100644 --- a/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java +++ b/server/test/org/apache/cloudstack/networkoffering/ChildTestConfiguration.java @@ -61,7 +61,7 @@ import com.cloud.dc.dao.DataCenterDaoImpl; import com.cloud.dc.dao.DataCenterIpAddressDaoImpl; import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDao; import com.cloud.dc.dao.DataCenterVnetDaoImpl; -import com.cloud.dc.dao.DcDetailsDaoImpl; +import com.cloud.dc.dao.DataCenterDetailsDaoImpl; import com.cloud.dc.dao.DedicatedResourceDao; import com.cloud.dc.dao.HostPodDaoImpl; import com.cloud.dc.dao.PodVlanDaoImpl; @@ -154,7 +154,7 @@ import com.cloud.vm.dao.VMInstanceDaoImpl; DataCenterIpAddressDaoImpl.class, DataCenterVnetDaoImpl.class, PodVlanDaoImpl.class, - DcDetailsDaoImpl.class, + DataCenterDetailsDaoImpl.class, NicSecondaryIpDaoImpl.class, UserIpv6AddressDaoImpl.class, UserDaoImpl.class, diff --git a/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java b/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java index 1f1fb7521b0..f2ebf67acf9 100644 --- a/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java +++ b/server/test/org/apache/cloudstack/networkoffering/CreateNetworkOfferingTest.java @@ -106,7 +106,7 @@ public class CreateNetworkOfferingTest extends TestCase{ public void createSharedNtwkOffWithVlan() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, true, false, null, false, null); + null, false, null, true, false, null, false, null, true); assertNotNull("Shared network offering with specifyVlan=true failed to create ", off); } @@ -115,7 +115,7 @@ public class CreateNetworkOfferingTest extends TestCase{ try { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, false, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, true, false, null, false, null); + null, false, null, true, false, null, false, null, true); assertNull("Shared network offering with specifyVlan=false was created", off); } catch (InvalidParameterValueException ex) { } @@ -125,7 +125,7 @@ public class CreateNetworkOfferingTest extends TestCase{ public void createSharedNtwkOffWithSpecifyIpRanges() { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, true, false, null, false, null); + null, false, null, true, false, null, false, null, true); assertNotNull("Shared network offering with specifyIpRanges=true failed to create ", off); } @@ -135,7 +135,7 @@ public class CreateNetworkOfferingTest extends TestCase{ try { NetworkOfferingVO off = configMgr.createNetworkOffering("shared", "shared", TrafficType.Guest, null, true, Availability.Optional, 200, null, false, Network.GuestType.Shared, false, - null, false, null, false, false, null, false, null); + null, false, null, false, false, null, false, null, true); assertNull("Shared network offering with specifyIpRanges=false was created", off); } catch (InvalidParameterValueException ex) { } @@ -150,7 +150,7 @@ public class CreateNetworkOfferingTest extends TestCase{ serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, - null, false, null, false, false, null, false, null); + null, false, null, false, false, null, false, null, true); assertNotNull("Isolated network offering with specifyIpRanges=false failed to create ", off); } @@ -163,7 +163,7 @@ public class CreateNetworkOfferingTest extends TestCase{ serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, - null, false, null, false, false, null, false, null); + null, false, null, false, false, null, false, null, true); assertNotNull("Isolated network offering with specifyVlan=true wasn't created", off); } @@ -177,7 +177,7 @@ public class CreateNetworkOfferingTest extends TestCase{ serviceProviderMap.put(Network.Service.SourceNat, vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, - null, false, null, true, false, null, false, null); + null, false, null, true, false, null, false, null, true); assertNull("Isolated network offering with specifyIpRanges=true and source nat service enabled, was created", off); } catch (InvalidParameterValueException ex) { } @@ -190,7 +190,7 @@ public class CreateNetworkOfferingTest extends TestCase{ Set vrProvider = new HashSet(); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, false, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, - null, false, null, true, false, null, false, null); + null, false, null, true, false, null, false, null, true); assertNotNull("Isolated network offering with specifyIpRanges=true and with no sourceNatService, failed to create", off); } @@ -208,7 +208,7 @@ public class CreateNetworkOfferingTest extends TestCase{ serviceProviderMap.put(Network.Service.Lb , vrProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, - null, false, null, false, false, null, false, null); + null, false, null, false, false, null, false, null, true); // System.out.println("Creating Vpc Network Offering"); assertNotNull("Vpc Isolated network offering with Vpc provider ", off); } @@ -228,7 +228,7 @@ public class CreateNetworkOfferingTest extends TestCase{ serviceProviderMap.put(Network.Service.Lb, lbProvider); NetworkOfferingVO off = configMgr.createNetworkOffering("isolated", "isolated", TrafficType.Guest, null, true, Availability.Optional, 200, serviceProviderMap, false, Network.GuestType.Isolated, false, null, false, - null, false, false, null, false, null); + null, false, false, null, false, null, true); // System.out.println("Creating Vpc Network Offering"); assertNotNull("Vpc Isolated network offering with Vpc and Netscaler provider ", off); } diff --git a/server/test/org/apache/cloudstack/region/RegionManagerTest.java b/server/test/org/apache/cloudstack/region/RegionManagerTest.java index db6bf20cfb7..d1d6de4ddb5 100644 --- a/server/test/org/apache/cloudstack/region/RegionManagerTest.java +++ b/server/test/org/apache/cloudstack/region/RegionManagerTest.java @@ -18,33 +18,20 @@ package org.apache.cloudstack.region; +import java.util.HashMap; + +import javax.naming.ConfigurationException; + import junit.framework.Assert; -import junit.framework.TestCase; -import org.apache.cloudstack.api.command.admin.domain.DeleteDomainCmd; -import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.region.dao.RegionDao; - -import org.apache.log4j.Logger; -import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; import com.cloud.exception.InvalidParameterValueException; -import com.cloud.user.Account; -import com.cloud.user.dao.AccountDao; +public class RegionManagerTest { - -public class RegionManagerTest extends TestCase { - private static final Logger s_logger = Logger.getLogger(RegionManagerTest.class); - - @Before - @Override - protected void setUp() { - - } - @Test public void testUniqueName() { RegionManagerImpl regionMgr = new RegionManagerImpl(); @@ -59,4 +46,10 @@ public class RegionManagerTest extends TestCase { } } + @Test + public void configure() throws ConfigurationException { + RegionManagerImpl regionManager = new RegionManagerImpl(); + regionManager.configure("foo", new HashMap()); + Assert.assertTrue(regionManager.getId() != 0); + } } diff --git a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java index d0f09513e29..504ab9cebb2 100644 --- a/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java +++ b/server/test/org/apache/cloudstack/region/gslb/GlobalLoadBalancingRulesServiceImplTest.java @@ -33,7 +33,6 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; - import org.apache.cloudstack.api.command.user.region.ha.gslb.AssignToGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.CreateGlobalLoadBalancerRuleCmd; import org.apache.cloudstack.api.command.user.region.ha.gslb.DeleteGlobalLoadBalancerRuleCmd; @@ -59,6 +58,7 @@ import com.cloud.user.AccountManager; import com.cloud.user.AccountVO; import com.cloud.user.UserVO; import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.net.Ip; public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { @@ -181,7 +181,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runCreateGlobalLoadBalancerRulePostiveTest() throws Exception { - Transaction txn = Transaction.open("runCreateGlobalLoadBalancerRulePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runCreateGlobalLoadBalancerRulePostiveTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -246,7 +246,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runCreateGlobalLoadBalancerRuleInvalidAlgorithm() throws Exception { - Transaction txn = Transaction.open("runCreateGlobalLoadBalancerRulePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runCreateGlobalLoadBalancerRulePostiveTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -311,7 +311,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runCreateGlobalLoadBalancerRuleInvalidStickyMethod() throws Exception { - Transaction txn = Transaction.open("runCreateGlobalLoadBalancerRulePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runCreateGlobalLoadBalancerRulePostiveTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -376,7 +376,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runCreateGlobalLoadBalancerRuleInvalidServiceType() throws Exception { - Transaction txn = Transaction.open("runCreateGlobalLoadBalancerRulePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runCreateGlobalLoadBalancerRulePostiveTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -441,7 +441,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runCreateGlobalLoadBalancerRuleInvalidDomainName() throws Exception { - Transaction txn = Transaction.open("runCreateGlobalLoadBalancerRulePostiveTest"); + TransactionLegacy txn = TransactionLegacy.open("runCreateGlobalLoadBalancerRulePostiveTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -507,7 +507,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runAssignToGlobalLoadBalancerRuleTest() throws Exception { - Transaction txn = Transaction.open("runAssignToGlobalLoadBalancerRuleTest"); + TransactionLegacy txn = TransactionLegacy.open("runAssignToGlobalLoadBalancerRuleTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -570,7 +570,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runAssignToGlobalLoadBalancerRuleTestSameZoneLb() throws Exception { - Transaction txn = Transaction.open("runAssignToGlobalLoadBalancerRuleTestSameZoneLb"); + TransactionLegacy txn = TransactionLegacy.open("runAssignToGlobalLoadBalancerRuleTestSameZoneLb"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -655,7 +655,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runAssignToGlobalLoadBalancerRuleTestRevokedState() throws Exception { - Transaction txn = Transaction.open("runAssignToGlobalLoadBalancerRuleTestRevokedState"); + TransactionLegacy txn = TransactionLegacy.open("runAssignToGlobalLoadBalancerRuleTestRevokedState"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -712,7 +712,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runRemoveFromGlobalLoadBalancerRuleTest() throws Exception { - Transaction txn = Transaction.open("runRemoveFromGlobalLoadBalancerRuleTest"); + TransactionLegacy txn = TransactionLegacy.open("runRemoveFromGlobalLoadBalancerRuleTest"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -785,7 +785,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runRemoveFromGlobalLoadBalancerRuleTestUnassignedLb() throws Exception { - Transaction txn = Transaction.open("runRemoveFromGlobalLoadBalancerRuleTestUnassignedLb"); + TransactionLegacy txn = TransactionLegacy.open("runRemoveFromGlobalLoadBalancerRuleTestUnassignedLb"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -846,7 +846,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runRemoveFromGlobalLoadBalancerRuleTestInvalidLb() throws Exception { - Transaction txn = Transaction.open("runRemoveFromGlobalLoadBalancerRuleTestInvalidLb"); + TransactionLegacy txn = TransactionLegacy.open("runRemoveFromGlobalLoadBalancerRuleTestInvalidLb"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -890,7 +890,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runDeleteGlobalLoadBalancerRuleTestWithNoLbRules() throws Exception { - Transaction txn = Transaction.open("runDeleteGlobalLoadBalancerRuleTestWithNoLbRules"); + TransactionLegacy txn = TransactionLegacy.open("runDeleteGlobalLoadBalancerRuleTestWithNoLbRules"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); @@ -936,7 +936,7 @@ public class GlobalLoadBalancingRulesServiceImplTest extends TestCase { void runDeleteGlobalLoadBalancerRuleTestWithLbRules() throws Exception { - Transaction txn = Transaction.open("runDeleteGlobalLoadBalancerRuleTestWithLbRules"); + TransactionLegacy txn = TransactionLegacy.open("runDeleteGlobalLoadBalancerRuleTestWithLbRules"); GlobalLoadBalancingRulesServiceImpl gslbServiceImpl = new GlobalLoadBalancingRulesServiceImpl(); diff --git a/services/console-proxy/server/conf/agent.properties b/services/console-proxy/server/conf/agent.properties deleted file mode 100644 index 246cb1c3d08..00000000000 --- a/services/console-proxy/server/conf/agent.properties +++ /dev/null @@ -1,2 +0,0 @@ -instance=ConsoleProxy -resource=com.cloud.agent.resource.consoleproxy.ConsoleProxyResource diff --git a/services/console-proxy/server/pom.xml b/services/console-proxy/server/pom.xml index 391c15a5b29..114979025d8 100644 --- a/services/console-proxy/server/pom.xml +++ b/services/console-proxy/server/pom.xml @@ -26,9 +26,6 @@ 4.3.0-SNAPSHOT ../pom.xml - - mkisofs - log4j @@ -42,21 +39,9 @@ commons-codec commons-codec - org.apache.cloudstack - cloud-agent - ${project.version} - - - org.apache.cloudstack - cloud-patches - ${project.version} - pom - - - org.apache.cloudstack - cloud-secondary-storage + cloud-utils ${project.version} @@ -69,231 +54,5 @@ - - - maven-assembly-plugin - 2.3 - - systemvm - false - - systemvm-descriptor.xml - - - - - make-systemvm - package - - single - - - - - - maven-resources-plugin - 2.6 - - - copy-resources - - package - - copy-resources - - - dist - - - target - - systemvm.zip - - - - ../../../patches/systemvm/debian/config/root/.ssh - - authorized_keys - - - - - - - - - maven-antrun-plugin - 1.7 - - - copy-cloud-scripts - package - - run - - - - - - - - - - - maven-clean-plugin - 2.5 - - - - dist - false - - - target - false - - - - - - - - - - genisoimage - - - /usr/bin/genisoimage - - - - genisoimage - - - - vmware - - - nonoss - - - - - org.apache.cloudstack - cloud-plugin-hypervisor-vmware - ${project.version} - - - org.apache.cloudstack - cloud-vmware-base - ${project.version} - - - - - systemvm - - - systemvm - - - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - package - - exec - - - - - ${mkisofs} - dist - - -quiet - -r - -o - systemvm.iso - systemvm.zip - cloud-scripts.tgz - authorized_keys - - - - - - - - quickcloud - - - - org.codehaus.mojo - exec-maven-plugin - 1.2.1 - - - - java - - - - - com.cloud.agent.AgentShell - - zone=1 - pod=1 - host=192.168.56.1 - guid=ConsoleProxy.1 - - - - javax.net.ssl.trustStore - certs/realhostip.keystore - log.home - ${PWD}/ - - - - - - - - - diff --git a/services/secondary-storage/conf/agent.properties b/services/secondary-storage/conf/agent.properties deleted file mode 100644 index 507ea4d0c4a..00000000000 --- a/services/secondary-storage/conf/agent.properties +++ /dev/null @@ -1,4 +0,0 @@ -#mount.path=~/secondary-storage/ -resource=org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource -testCifsMount=cifs://192.168.1.1/CSHV3?user=administrator&password=1pass%40word1 -#testLocalRoot=test diff --git a/services/secondary-storage/conf/environment.properties b/services/secondary-storage/conf/environment.properties deleted file mode 100644 index 269acad9152..00000000000 --- a/services/secondary-storage/conf/environment.properties +++ /dev/null @@ -1,2 +0,0 @@ -paths.script=../../scripts/storage/secondary/ -paths.pid=. diff --git a/services/secondary-storage/conf/log4j-cloud.xml b/services/secondary-storage/conf/log4j-cloud.xml deleted file mode 100644 index 7d9d22cfa99..00000000000 --- a/services/secondary-storage/conf/log4j-cloud.xml +++ /dev/null @@ -1,102 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/services/secondary-storage/conf/log4j.xml b/services/secondary-storage/conf/log4j.xml deleted file mode 100644 index 9511f30aac2..00000000000 --- a/services/secondary-storage/conf/log4j.xml +++ /dev/null @@ -1,102 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/services/secondary-storage/pom.xml b/services/secondary-storage/pom.xml index 9fe8da3299f..b2f10f9b74a 100644 --- a/services/secondary-storage/pom.xml +++ b/services/secondary-storage/pom.xml @@ -48,12 +48,6 @@ cloud-agent ${project.version} - - org.apache.cloudstack - cloud-patches - ${project.version} - pom - org.apache.cloudstack cloud-server @@ -65,7 +59,6 @@ org.apache.maven.plugins maven-surefire-plugin - 2.14 ${skipTests} diff --git a/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/module.properties b/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/module.properties new file mode 100644 index 00000000000..7ff8a3ac13f --- /dev/null +++ b/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/module.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +name=secondary-storage-discoverer +parent=discoverer \ No newline at end of file diff --git a/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/spring-secondary-storage-discoverer-context.xml b/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/spring-secondary-storage-discoverer-context.xml new file mode 100644 index 00000000000..30521aa3261 --- /dev/null +++ b/services/secondary-storage/resources/META-INF/cloudstack/secondary-storage-discoverer/spring-secondary-storage-discoverer-context.xml @@ -0,0 +1,36 @@ + + + + + + + + + diff --git a/services/secondary-storage/scripts/_run.sh b/services/secondary-storage/scripts/_run.sh deleted file mode 100755 index cb9624c58e6..00000000000 --- a/services/secondary-storage/scripts/_run.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - - -#run.sh runs the console proxy. - -# make sure we delete the old files from the original template -rm console-proxy.jar -rm console-common.jar -rm conf/cloud.properties - -set -x - -CP=./:./conf -for file in *.jar -do - CP=${CP}:$file -done -keyvalues= - -LOGHOME=/var/log/cloud/ -CMDLINE=$(cat /var/cache/cloud/cmdline) - -#CMDLINE="graphical utf8 eth0ip=0.0.0.0 eth0mask=255.255.255.0 eth1ip=192.168.140.40 eth1mask=255.255.255.0 eth2ip=172.24.0.50 eth2mask=255.255.0.0 gateway=172.24.0.1 dns1=72.52.126.11 template=domP dns2=72.52.126.12 host=192.168.1.142 port=8250 mgmtcidr=192.168.1.0/24 localgw=192.168.140.1 zone=5 pod=5" -for i in $CMDLINE - do - KEY=$(echo $i | cut -s -d= -f1) - VALUE=$(echo $i | cut -s -d= -f2) - [ "$KEY" == "" ] && continue - case $KEY in - *) - keyvalues="${keyvalues} $KEY=$VALUE" - esac - done - -tot_mem_k=$(cat /proc/meminfo | grep MemTotal | awk '{print $2}') -let "tot_mem_m=tot_mem_k>>10" -let "eightypcnt=$tot_mem_m*8/10" -let "maxmem=$tot_mem_m-80" - -if [ $maxmem -gt $eightypcnt ] -then - maxmem=$eightypcnt -fi - -java -Djavax.net.ssl.trustStore=./certs/realhostip.keystore -Dlog.home=$LOGHOME -mx${maxmem}m -cp $CP com.cloud.agent.AgentShell $keyvalues $@ diff --git a/services/secondary-storage/scripts/config_auth.sh b/services/secondary-storage/scripts/config_auth.sh deleted file mode 100755 index 4b74f8eb995..00000000000 --- a/services/secondary-storage/scripts/config_auth.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - - - -BASE_DIR="/var/www/html/copy/template/" -HTACCESS="$BASE_DIR/.htaccess" - -PASSWDFILE="/etc/httpd/.htpasswd" -if [ -d /etc/apache2 ] -then - PASSWDFILE="/etc/apache2/.htpasswd" -fi - -config_htaccess() { - mkdir -p $BASE_DIR - result=$? - echo "Options -Indexes" > $HTACCESS - let "result=$result+$?" - echo "AuthType Basic" >> $HTACCESS - let "result=$result+$?" - echo "AuthName \"Authentication Required\"" >> $HTACCESS - let "result=$result+$?" - echo "AuthUserFile \"$PASSWDFILE\"" >> $HTACCESS - let "result=$result+$?" - echo "Require valid-user" >> $HTACCESS - let "result=$result+$?" - return $result -} - -write_passwd() { - local user=$1 - local passwd=$2 - htpasswd -bc $PASSWDFILE $user $passwd - return $? -} - -if [ $# -ne 2 ] ; then - echo $"Usage: `basename $0` username password " - exit 0 -fi - -write_passwd $1 $2 -if [ $? -ne 0 ] -then - echo "Failed to update password" - exit 2 -fi - -config_htaccess -exit $? diff --git a/services/secondary-storage/scripts/config_ssl.sh b/services/secondary-storage/scripts/config_ssl.sh deleted file mode 100755 index e4747872693..00000000000 --- a/services/secondary-storage/scripts/config_ssl.sh +++ /dev/null @@ -1,183 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - -help() { - printf " -c use customized key/cert\n" - printf " -k path of private key\n" - printf " -p path of certificate of public key\n" - printf " -t path of certificate chain\n" -} - - -config_httpd_conf() { - local ip=$1 - local srvr=$2 - cp -f /etc/httpd/conf/httpd.conf.orig /etc/httpd/conf/httpd.conf - sed -i -e "s/Listen.*:80$/Listen $ip:80/" /etc/httpd/conf/httpd.conf - echo " " >> /etc/httpd/conf/httpd.conf - echo " DocumentRoot /var/www/html/" >> /etc/httpd/conf/httpd.conf - echo " ServerName $srvr" >> /etc/httpd/conf/httpd.conf - echo " SSLEngine on" >> /etc/httpd/conf/httpd.conf - echo " SSLCertificateFile /etc/httpd/ssl/certs/realhostip.crt" >> /etc/httpd/conf/httpd.conf - echo " SSLCertificateKeyFile /etc/httpd/ssl/keys/realhostip.key" >> /etc/httpd/conf/httpd.conf - echo "" >> /etc/httpd/conf/httpd.conf -} - -config_apache2_conf() { - local ip=$1 - local srvr=$2 - cp -f /etc/apache2/sites-available/default.orig /etc/apache2/sites-available/default - cp -f /etc/apache2/sites-available/default-ssl.orig /etc/apache2/sites-available/default-ssl - sed -i -e "s///" /etc/apache2/sites-available/default - sed -i -e "s///" /etc/apache2/sites-available/default-ssl - sed -i -e "s/Listen .*:80/Listen $ip:80/g" /etc/apache2/ports.conf - sed -i -e "s/Listen .*:443/Listen $ip:443/g" /etc/apache2/ports.conf - sed -i -e "s/NameVirtualHost .*:80/NameVirtualHost $ip:80/g" /etc/apache2/ports.conf - sed -i 's/ssl-cert-snakeoil.key/cert_apache.key/' /etc/apache2/sites-available/default-ssl - sed -i 's/ssl-cert-snakeoil.pem/cert_apache.crt/' /etc/apache2/sites-available/default-ssl -} - -copy_certs() { - local certdir=$(dirname $0)/certs - local mydir=$(dirname $0) - if [ -d $certdir ] && [ -f $customPrivKey ] && [ -f $customPrivCert ] ; then - mkdir -p /etc/httpd/ssl/keys && mkdir -p /etc/httpd/ssl/certs && cp $customprivKey /etc/httpd/ssl/keys && cp $customPrivCert /etc/httpd/ssl/certs - return $? - fi - if [ ! -z customCertChain ] && [ -f $customCertChain ] ; then - cp $customCertChain /etc/httpd/ssl/certs - fi - return 1 -} - -copy_certs_apache2() { - local certdir=$(dirname $0)/certs - local mydir=$(dirname $0) - if [ -f $customPrivKey ] && [ -f $customPrivCert ] ; then - cp $customPrivKey /etc/ssl/private/cert_apache.key && cp $customPrivCert /etc/ssl/certs/cert_apache.crt - fi - if [ ! -z "$customCertChain" ] && [ -f "$customCertChain" ] ; then - cp $customCertChain /etc/ssl/certs/cert_apache_chain.crt - fi - return 0 -} - - -cflag= -cpkflag= -cpcflag= -cccflag= -customPrivKey=$(dirname $0)/certs/realhostip.key -customPrivCert=$(dirname $0)/certs/realhostip.crt -customCertChain= -publicIp= -hostName= -keyStore=$(dirname $0)/certs/realhostip.keystore -aliasName="CPVMCertificate" -storepass="vmops.com" -while getopts 'i:h:k:p:t:c' OPTION -do - case $OPTION in - c) cflag=1 - ;; - k) cpkflag=1 - customPrivKey="$OPTARG" - ;; - p) cpcflag=1 - customPrivCert="$OPTARG" - ;; - t) cccflag=1 - customCertChain="$OPTARG" - ;; - i) publicIp="$OPTARG" - ;; - h) hostName="$OPTARG" - ;; - ?) help - ;; - esac -done - - -if [ -z "$publicIp" ] || [ -z "$hostName" ] -then - help - exit 1 -fi - -if [ "$cflag" == "1" ] -then - if [ "$cpkflag$cpcflag" != "11" ] - then - help - exit 1 - fi - if [ ! -f "$customPrivKey" ] - then - printf "priviate key file is not exist\n" - exit 2 - fi - - if [ ! -f "$customPrivCert" ] - then - printf "public certificate is not exist\n" - exit 3 - fi - - if [ "$cccflag" == "1" ] - then - if [ ! -f "$customCertChain" ] - then - printf "certificate chain is not exist\n" - exit 4 - fi - fi -fi - -if [ -d /etc/apache2 ] -then - copy_certs_apache2 -else - copy_certs -fi - -if [ $? -ne 0 ] -then - echo "Failed to copy certificates" - exit 2 -fi - -if [ -f "$customPrivCert" ] -then - keytool -delete -alias $aliasName -keystore $keyStore -storepass $storepass -noprompt - keytool -import -alias $aliasName -keystore $keyStore -storepass $storepass -noprompt -file $customPrivCert -fi - -if [ -d /etc/apache2 ] -then - config_apache2_conf $publicIp $hostName - /etc/init.d/apache2 stop - /etc/init.d/apache2 start -else - config_httpd_conf $publicIp $hostName -fi - - diff --git a/services/secondary-storage/scripts/ipfirewall.sh b/services/secondary-storage/scripts/ipfirewall.sh deleted file mode 100755 index 4711b8ac6db..00000000000 --- a/services/secondary-storage/scripts/ipfirewall.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -BASE_DIR="/var/www/html/copy/" -HTACCESS="$BASE_DIR/.htaccess" - -config_htaccess() { - mkdir -p $BASE_DIR - result=$? - echo "Options -Indexes" > $HTACCESS - let "result=$result+$?" - echo "order deny,allow" >> $HTACCESS - let "result=$result+$?" - echo "deny from all" >> $HTACCESS - let "result=$result+$?" - return $result -} - -ips(){ - echo "allow from $1" >> $HTACCESS - result=$? - return $result -} - -is_append="$1" -shift -if [ $is_append != "true" ]; then - config_htaccess -fi -for i in $@ -do - ips "$i" -done -exit $? - diff --git a/services/secondary-storage/scripts/run-proxy.sh b/services/secondary-storage/scripts/run-proxy.sh deleted file mode 100644 index d6ccf7c0091..00000000000 --- a/services/secondary-storage/scripts/run-proxy.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - - -#run.sh runs the console proxy. - -# make sure we delete the old files from the original template -rm console-proxy.jar -rm console-common.jar -rm conf/cloud.properties - -CP=./:./conf -for file in *.jar -do - CP=${CP}:$file -done - -#CMDLINE=$(cat /proc/cmdline) -#for i in $CMDLINE -# do -# KEY=$(echo $i | cut -d= -f1) -# VALUE=$(echo $i | cut -d= -f2) -# case $KEY in -# mgmt_host) -# MGMT_HOST=$VALUE -# ;; -# esac -# done - -java -mx700m -cp $CP:./conf com.cloud.consoleproxy.ConsoleProxy $@ diff --git a/services/secondary-storage/scripts/run.bat b/services/secondary-storage/scripts/run.bat deleted file mode 100644 index ce6dc404574..00000000000 --- a/services/secondary-storage/scripts/run.bat +++ /dev/null @@ -1,18 +0,0 @@ -rem Licensed to the Apache Software Foundation (ASF) under one -rem or more contributor license agreements. See the NOTICE file -rem distributed with this work for additional information -rem regarding copyright ownership. The ASF licenses this file -rem to you under the Apache License, Version 2.0 (the -rem "License"); you may not use this file except in compliance -rem with the License. You may obtain a copy of the License at -rem -rem http://www.apache.org/licenses/LICENSE-2.0 -rem -rem Unless required by applicable law or agreed to in writing, -rem software distributed under the License is distributed on an -rem "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -rem KIND, either express or implied. See the License for the -rem specific language governing permissions and limitations -rem under the License. - -java -mx700m -cp cloud-console-proxy.jar;;cloud-console-common.jar;log4j-1.2.15.jar;apache-log4j-extras-1.0.jar;gson-1.3.jar;commons-logging-1.1.1.jar;.;.\conf; com.cloud.consoleproxy.ConsoleProxy %* diff --git a/services/secondary-storage/scripts/run.sh b/services/secondary-storage/scripts/run.sh deleted file mode 100755 index 146d96f0287..00000000000 --- a/services/secondary-storage/scripts/run.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - - - - - -#_run.sh runs the agent client. - -# set -x - -while true -do - ./_run.sh "$@" & - wait - ex=$? - if [ $ex -eq 0 ] || [ $ex -eq 1 ] || [ $ex -eq 66 ] || [ $ex -gt 128 ]; then - # permanent errors - sleep 5 - fi - - # user stop agent by service cloud stop - grep 'stop' /usr/local/cloud/systemvm/user_request &>/dev/null - if [ $? -eq 0 ]; then - timestamp=$(date) - echo "$timestamp User stops cloud.com service" >> /var/log/cloud.log - exit 0 - fi - sleep 5 -done diff --git a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 3ef950b1a0b..75d959be3ba 100755 --- a/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/src/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.storage.resource; + +import static com.cloud.utils.S3Utils.mputFile; import static com.cloud.utils.S3Utils.putFile; import static com.cloud.utils.StringUtils.join; import static com.cloud.utils.db.GlobalLock.executeWithNoWaitLock; @@ -23,7 +25,15 @@ import static java.lang.String.format; import static java.util.Arrays.asList; import static org.apache.commons.lang.StringUtils.substringAfterLast; -import java.io.*; +import java.io.BufferedReader; +import java.io.BufferedWriter; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.FileReader; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; import java.math.BigInteger; import java.net.InetAddress; import java.net.URI; @@ -39,10 +49,19 @@ import java.util.concurrent.Callable; import javax.naming.ConfigurationException; -import com.cloud.agent.api.storage.*; -import com.cloud.storage.VMTemplateStorageResourceAssoc; -import com.cloud.storage.template.*; -import com.cloud.utils.SwiftUtil; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.NameValuePair; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.log4j.Logger; + +import com.amazonaws.services.s3.model.S3ObjectSummary; + import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -56,18 +75,7 @@ import org.apache.cloudstack.storage.template.UploadManagerImpl; import org.apache.cloudstack.storage.to.SnapshotObjectTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.NameValuePair; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.utils.URLEncodedUtils; -import org.apache.http.impl.client.DefaultHttpClient; -import org.apache.log4j.Logger; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.cloud.agent.api.Answer; import com.cloud.agent.api.CheckHealthAnswer; import com.cloud.agent.api.CheckHealthCommand; @@ -88,6 +96,14 @@ import com.cloud.agent.api.SecStorageSetupCommand.Certificates; import com.cloud.agent.api.SecStorageVMSetupCommand; import com.cloud.agent.api.StartupCommand; import com.cloud.agent.api.StartupSecondaryStorageCommand; +import com.cloud.agent.api.storage.CreateEntityDownloadURLCommand; +import com.cloud.agent.api.storage.DeleteEntityDownloadURLCommand; +import com.cloud.agent.api.storage.DownloadAnswer; +import com.cloud.agent.api.storage.ListTemplateAnswer; +import com.cloud.agent.api.storage.ListTemplateCommand; +import com.cloud.agent.api.storage.ListVolumeAnswer; +import com.cloud.agent.api.storage.ListVolumeCommand; +import com.cloud.agent.api.storage.UploadCommand; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; @@ -102,10 +118,19 @@ import com.cloud.resource.ServerResourceBase; import com.cloud.storage.DataStoreRole; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.StorageLayer; +import com.cloud.storage.VMTemplateStorageResourceAssoc; +import com.cloud.storage.template.Processor; import com.cloud.storage.template.Processor.FormatInfo; +import com.cloud.storage.template.QCOW2Processor; +import com.cloud.storage.template.RawImageProcessor; +import com.cloud.storage.template.TemplateLocation; +import com.cloud.storage.template.TemplateProp; +import com.cloud.storage.template.VhdProcessor; +import com.cloud.storage.template.VmdkProcessor; import com.cloud.utils.NumbersUtil; import com.cloud.utils.S3Utils; import com.cloud.utils.S3Utils.FileNamingStrategy; +import com.cloud.utils.SwiftUtil; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.script.OutputInterpreter; @@ -172,7 +197,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } public void setInSystemVM(boolean inSystemVM) { - this._inSystemVM = inSystemVM; + _inSystemVM = inSystemVM; } @Override @@ -272,7 +297,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String finalFileName = templateFilename; String finalDownloadPath = destPath + File.separator + templateFilename; // compute the size of - long size = this._storage.getSize(downloadPath + File.separator + templateFilename); + long size = _storage.getSize(downloadPath + File.separator + templateFilename); DataTO newDestTO = null; @@ -349,7 +374,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S protected Answer copySnapshotToTemplateFromNfsToNfsXenserver(CopyCommand cmd, SnapshotObjectTO srcData, NfsTO srcDataStore, TemplateObjectTO destData, NfsTO destDataStore) { - String srcMountPoint = this.getRootDir(srcDataStore.getUrl()); + String srcMountPoint = getRootDir(srcDataStore.getUrl()); String snapshotPath = srcData.getPath(); int index = snapshotPath.lastIndexOf("/"); String snapshotName = snapshotPath.substring(index + 1); @@ -357,17 +382,18 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S snapshotName = snapshotName + ".vhd"; } snapshotPath = snapshotPath.substring(0, index); + snapshotPath = srcMountPoint + File.separator + snapshotPath; - String destMountPoint = this.getRootDir(destDataStore.getUrl()); + String destMountPoint = getRootDir(destDataStore.getUrl()); String destPath = destMountPoint + File.separator + destData.getPath(); String errMsg = null; try { - this._storage.mkdir(destPath); + _storage.mkdir(destPath); String templateUuid = UUID.randomUUID().toString(); String templateName = templateUuid + ".vhd"; - Script command = new Script(this.createTemplateFromSnapshotXenScript, cmd.getWait() * 1000, s_logger); + Script command = new Script(createTemplateFromSnapshotXenScript, cmd.getWait() * 1000, s_logger); command.add("-p", snapshotPath); command.add("-s", snapshotName); command.add("-n", templateName); @@ -424,7 +450,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S // get snapshot file name String templateName = srcFile.getName(); - // add kvm file extension for copied template name + // add kvm file extension for copied template name String fileName = templateName + "." + srcFormat.getFileExtension(); String destFileFullPath = destFile.getAbsolutePath() + File.separator + fileName; s_logger.debug("copy snapshot " + srcFile.getAbsolutePath() + " to template " + destFileFullPath); @@ -442,7 +468,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S bufferWriter.write("\n"); bufferWriter.write("filename=" + fileName); bufferWriter.write("\n"); - long size = this._storage.getSize(destFileFullPath); + long size = _storage.getSize(destFileFullPath); bufferWriter.write("size=" + size); bufferWriter.close(); writer.close(); @@ -509,15 +535,16 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S DataTO destData = cmd.getDestTO(); DataStoreTO srcDataStore = srcData.getDataStore(); DataStoreTO destDataStore = destData.getDataStore(); - if (srcDataStore.getRole() == DataStoreRole.Image || srcDataStore.getRole() == DataStoreRole.ImageCache) { + if (srcDataStore.getRole() == DataStoreRole.Image || srcDataStore.getRole() == DataStoreRole.ImageCache || + srcDataStore.getRole() == DataStoreRole.Primary) { if (!(srcDataStore instanceof NfsTO)) { s_logger.debug("only support nfs storage as src, when create template from snapshot"); return Answer.createUnsupportedCommandAnswer(cmd); } if (destDataStore instanceof NfsTO) { - return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO) srcDataStore, - (TemplateObjectTO) destData, (NfsTO) destDataStore); + return copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO)srcDataStore, + (TemplateObjectTO) destData, (NfsTO)destDataStore); } else if (destDataStore instanceof SwiftTO) { //create template on the same data store CopyCmdAnswer answer = (CopyCmdAnswer)copySnapshotToTemplateFromNfsToNfs(cmd, (SnapshotObjectTO) srcData, (NfsTO) srcDataStore, @@ -543,8 +570,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S execute(deleteCommand); } catch (Exception e) { s_logger.debug("Failed to clean up staging area:", e); - } - + } + TemplateObjectTO template = new TemplateObjectTO(); template.setPath(swiftPath); template.setSize(templateFile.length()); @@ -569,7 +596,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S execute(deleteCommand); } catch (Exception e) { s_logger.debug("Failed to clean up staging area:", e); - } + } return result; } } @@ -603,7 +630,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S NfsTO destImageStore = (NfsTO) destDataStore; if (srcDataStore instanceof S3TO) { S3TO s3 = (S3TO) srcDataStore; - return this.copyFromS3ToNfs(cmd, srcData, s3, destData, destImageStore); + return copyFromS3ToNfs(cmd, srcData, s3, destData, destImageStore); } else if (srcDataStore instanceof SwiftTO) { return copyFromSwiftToNfs(cmd, srcData, (SwiftTO)srcDataStore, destData, destImageStore); } @@ -792,7 +819,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S processor.configure("template processor", new HashMap()); return processor.getVirtualSize(file); } catch (Exception e) { - s_logger.debug("Failed to get virtual size:" ,e); + s_logger.debug("Failed to get virtual size:" ,e); } return file.length(); } @@ -830,9 +857,15 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } } } - ImageFormat format = this.getTemplateFormat(srcFile.getName()); + + long srcSize = srcFile.length(); + ImageFormat format = getTemplateFormat(srcFile.getName()); String key = destData.getPath() + S3Utils.SEPARATOR + srcFile.getName(); - putFile(s3, srcFile, bucket, key); + if (!s3.getSingleUpload(srcSize)){ + mputFile(s3, srcFile, bucket, key); + } else{ + putFile(s3, srcFile, bucket, key); + } DataTO retObj = null; if (destData.getObjectType() == DataObjectType.TEMPLATE) { @@ -1244,9 +1277,9 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S int index = name.lastIndexOf(File.separator); String snapshotPath = name.substring(0, index); if (deleteAllFlag) { - lPath = this.getRootDir(secondaryStorageUrl) + File.separator + snapshotPath + File.separator + "*"; + lPath = getRootDir(secondaryStorageUrl) + File.separator + snapshotPath + File.separator + "*"; } else { - lPath = this.getRootDir(secondaryStorageUrl) + File.separator + name + "*"; + lPath = getRootDir(secondaryStorageUrl) + File.separator + name + "*"; } final String result = deleteLocalFile(lPath); @@ -1434,7 +1467,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S Map s3ListTemplate(S3TO s3) { String bucket = s3.getBucketName(); // List the objects in the source directory on S3 - final List objectSummaries = S3Utils.getDirectory(s3, bucket, this.TEMPLATE_ROOT_DIR); + final List objectSummaries = S3Utils.getDirectory(s3, bucket, TEMPLATE_ROOT_DIR); if (objectSummaries == null) { return null; } @@ -1443,7 +1476,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String key = objectSummary.getKey(); // String installPath = StringUtils.substringBeforeLast(key, // S3Utils.SEPARATOR); - String uniqueName = this.determineS3TemplateNameFromKey(key); + String uniqueName = determineS3TemplateNameFromKey(key); // TODO: isPublic value, where to get? TemplateProp tInfo = new TemplateProp(uniqueName, key, objectSummary.getSize(), objectSummary.getSize(), true, false); @@ -1456,7 +1489,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S Map s3ListVolume(S3TO s3) { String bucket = s3.getBucketName(); // List the objects in the source directory on S3 - final List objectSummaries = S3Utils.getDirectory(s3, bucket, this.VOLUME_ROOT_DIR); + final List objectSummaries = S3Utils.getDirectory(s3, bucket, VOLUME_ROOT_DIR); if (objectSummaries == null) { return null; } @@ -1465,7 +1498,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S String key = objectSummary.getKey(); // String installPath = StringUtils.substringBeforeLast(key, // S3Utils.SEPARATOR); - Long id = this.determineS3VolumeIdFromKey(key); + Long id = determineS3VolumeIdFromKey(key); // TODO: how to get volume template name TemplateProp tInfo = new TemplateProp(id.toString(), key, objectSummary.getSize(), objectSummary.getSize(), true, false); @@ -2226,8 +2259,8 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S * * CIFS parameters are documented with mount.cifs at * http://linux.die.net/man/8/mount.cifs - * For simplicity, when a URI is used to specify a CIFS share, - * options such as domain,user,password are passed as query parameters. + * For simplicity, when a URI is used to specify a CIFS share, + * options such as domain,user,password are passed as query parameters. * * @param uri * crresponding to the remote device. Will throw for unsupported @@ -2262,7 +2295,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S return dir; } - + protected void umount(String localRootPath, URI uri) { ensureLocalRootPathExists(localRootPath, uri); @@ -2286,7 +2319,7 @@ public class NfsSecondaryStorageResource extends ServerResourceBase implements S } s_logger.debug("Successfully umounted " + localRootPath); } - + protected void mount(String localRootPath, String remoteDevice, URI uri) { s_logger.debug("mount " + uri.toString() + " on " + localRootPath); ensureLocalRootPathExists(localRootPath, uri); diff --git a/setup/bindir/cloud-sysvmadm.in b/setup/bindir/cloud-sysvmadm.in index 3cb7858150b..e2a626ef922 100755 --- a/setup/bindir/cloud-sysvmadm.in +++ b/setup/bindir/cloud-sysvmadm.in @@ -23,13 +23,14 @@ #set -x usage() { - printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-n] [-z]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -n - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n -z - do restart only for the instances in the specific zone. If not specified, restart will apply to instances in all zones\n\n" $(basename $0) >&2 + printf "\nThe tool stopping/starting running system vms and domain routers \n\nUsage: %s: [-d] [-u] [-p] [-m] [-s] [-r] [-a] [-t] [-n] [-z] [-v]\n\n -d - cloud DB server ip address, defaulted to localhost if not specified \n -u - user name to access cloud DB, defaulted to "root" if not specified \n -p - cloud DB user password, defaulted to no password if not specified \n\n -m - the ip address of management server, defaulted to localhost if not specified\n\n -s - stop then start all running SSVMs and Console Proxies \n -r - stop then start all running Virtual Routers\n -a - stop then start all running SSVMs, Console Proxies, and Virtual Routers \n -n - restart all Guest networks \n -t - number of parallel threads used for stopping Domain Routers. Default is 10.\n -l - log file location. Default is cloud.log under current directory.\n -z - do restart only for the instances in the specific zone. If not specified, restart will apply to instances in all zones\n -v - do restart all VPCs in the entire system\n\n" $(basename $0) >&2 } system= router= all= +vpc= db=localhost ms=localhost user=root @@ -42,7 +43,7 @@ inzone="" -while getopts 'sarhnd:m:u:p:t:l:z:' OPTION +while getopts 'sarhnvd:m:u:p:t:l:z:' OPTION do case $OPTION in s) system=1 @@ -53,6 +54,8 @@ do ;; a) all=1 ;; + v) vpc=1 + ;; d) db="$OPTARG" ;; u) user="$OPTARG" @@ -317,6 +320,92 @@ restart_network(){ } + +restart_vpc(){ + echo -e "INFO: Restarting vpc with id $1" + echo "INFO: Restarting vpc with id $1" >>$LOGFILE + jobid=`curl -sS "http://$ms:8096/?command=restartVPC&id=$1&response=json" | sed 's/\"//g' | sed 's/ //g' | sed 's/{//g' | sed 's/}//g' | awk -F: {'print $3'}` + if [ "$jobid" == "" ]; then + echo "ERROR: Failed to restart vpc with id $1" >>$LOGFILE + echo 2 + return + fi + + jobresult=$(query_async_job_result $jobid) + + if [ "$jobresult" != "1" ]; then + echo -e "ERROR: Failed to restart vpc with id $1 \n" + echo "ERROR: Failed to restart vpc with id $1" >>$LOGFILE + else + echo -e "INFO: Successfully restarted vpc with id $1 \n" + echo "INFO: Successfully restarted vpc with id $1" >>$LOGFILE + fi +} + + +restart_vpcs(){ + vpcs=(`mysql -h $db --user=$user --password=$password --skip-column-names -U cloud -e "select id from vpc WHERE removed is null$zone"`) + length_vpcs=(${#vpcs[@]}) + + echo -e "\nRestarting $length_vpcs vpcs... " + echo -e "Restarting $length_vpcs vpcs... " >>$LOGFILE + + #Spawn restart vpcs in parallel - run commands in chunks - number of threads is configurable + + pids=() + for d in "${vpcs[@]}"; do + + restart_vpc $d & + + pids=( "${pids[@]}" $! ) + + length_pids=(${#pids[@]}) + unfinishedPids=(${#pids[@]}) + + if [ $maxthreads -gt $length_vpcs ]; then + maxthreads=$length_vpcs + fi + + if [ $length_pids -ge $maxthreads ]; then + while [ $unfinishedPids -gt 0 ]; do + sleep 10 + count=0 + for (( i = 0 ; i < $length_pids; i++ )); do + if ! ps ax | grep -v grep | grep ${pids[$i]} > /dev/null; then + count=`expr $count + 1` + fi + done + + if [ $count -eq $unfinishedPids ]; then + unfinishedPids=0 + fi + + done + + #remove all elements from pids + if [ $unfinishedPids -eq 0 ]; then + pids=() + length_pids=(${#pids[@]}) + fi + + fi + + done + + + if [ "$length_vpcs" == "0" ];then + echo -e "No vpcs found \n" >>$LOGFILE + else + while [ $unfinishedPids -gt 0 ]; do + sleep 10 + done + + echo -e "Done restarting vpcs$inzone. \n" + echo -e "Done restarting vpcs$inzone. \n" >>$LOGFILE + + fi +} + query_async_job_result() { while [ 1 ] do @@ -329,7 +418,7 @@ sleep 5 done } -if [ "$system$router$all$help$redundant" == "" ] +if [ "$system$router$all$help$redundant$vpc" == "" ] then usage exit @@ -361,3 +450,10 @@ if [ "$redundant" == "1" ] then restart_networks fi + +if [ "$vpc" == "1" ] +then + restart_vpcs +fi + + diff --git a/setup/db/db/schema-410to420.sql b/setup/db/db/schema-410to420.sql index 768183bef59..522ccc4a272 100644 --- a/setup/db/db/schema-410to420.sql +++ b/setup/db/db/schema-410to420.sql @@ -265,7 +265,7 @@ CREATE TABLE `vpc_service_map` ( `created` datetime COMMENT 'date created', PRIMARY KEY (`id`), CONSTRAINT `fk_vpc_service_map__vpc_id` FOREIGN KEY(`vpc_id`) REFERENCES `vpc`(`id`) ON DELETE CASCADE, - UNIQUE (`vpc_id`, `service`) + UNIQUE (`vpc_id`, `service`, `provider`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; CREATE TABLE `cloud`.`load_balancer_healthcheck_policies` ( @@ -2371,3 +2371,7 @@ CREATE VIEW `cloud`.`data_center_view` AS `cloud`.`dedicated_resources` ON data_center.id = dedicated_resources.data_center_id left join `cloud`.`affinity_group` ON dedicated_resources.affinity_group_id = affinity_group.id; + + + +UPDATE `cloud`.`ntwk_offering_service_map` SET Provider='VpcVirtualRouter' WHERE network_offering_id IN (SELECT id from `cloud`.`network_offerings` WHERE name IN ('DefaultIsolatedNetworkOfferingForVpcNetworks', 'DefaultIsolatedNetworkOfferingForVpcNetworksNoLB')); diff --git a/setup/db/db/schema-420to421.sql b/setup/db/db/schema-420to421.sql new file mode 100644 index 00000000000..e72e1b219c6 --- /dev/null +++ b/setup/db/db/schema-420to421.sql @@ -0,0 +1,218 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.2.0 to 4.2.1; +--; + + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 's3.singleupload.max.size', '5', + 'The maximum size limit for S3 single part upload API(in GB). If it is set to 0, then it means always use multi-part upload to upload object to S3. If it is set to -1, then it means always use single-part upload to upload object to S3.'); + +INSERT IGNORE INTO `cloud`.`configuration` VALUES ("Storage", 'DEFAULT', 'management-server', "enable.ha.storage.migration", "true", "Enable/disable storage migration across primary storage during HA"); + +-- Remove Windows Server 8 from guest_os_type dropdown to use Windows Server 2012 +DELETE FROM `cloud`.`guest_os_hypervisor` where guest_os_id=168; +DELETE FROM `cloud`.`guest_os` where id=168; + + +--Add details to the user_vm_view (CLOUDSTACK-4649 - xen and UI code needs to retrieve the value of "hypervisortoolsversion" detail) + +DROP VIEW IF EXISTS `cloud`.`user_vm_view`; +CREATE VIEW `cloud`.`user_vm_view` AS + select + vm_instance.id id, + vm_instance.name name, + user_vm.display_name display_name, + user_vm.user_data user_data, + account.id account_id, + account.uuid account_uuid, + account.account_name account_name, + account.type account_type, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + projects.id project_id, + projects.uuid project_uuid, + projects.name project_name, + instance_group.id instance_group_id, + instance_group.uuid instance_group_uuid, + instance_group.name instance_group_name, + vm_instance.uuid uuid, + vm_instance.last_host_id last_host_id, + vm_instance.vm_type type, + vm_instance.vnc_password vnc_password, + vm_instance.limit_cpu_use limit_cpu_use, + vm_instance.created created, + vm_instance.state state, + vm_instance.removed removed, + vm_instance.ha_enabled ha_enabled, + vm_instance.hypervisor_type hypervisor_type, + vm_instance.instance_name instance_name, + vm_instance.guest_os_id guest_os_id, + vm_instance.display_vm display_vm, + guest_os.uuid guest_os_uuid, + vm_instance.pod_id pod_id, + host_pod_ref.uuid pod_uuid, + vm_instance.private_ip_address private_ip_address, + vm_instance.private_mac_address private_mac_address, + vm_instance.vm_type vm_type, + data_center.id data_center_id, + data_center.uuid data_center_uuid, + data_center.name data_center_name, + data_center.is_security_group_enabled security_group_enabled, + data_center.networktype data_center_type, + host.id host_id, + host.uuid host_uuid, + host.name host_name, + vm_template.id template_id, + vm_template.uuid template_uuid, + vm_template.name template_name, + vm_template.display_text template_display_text, + vm_template.enable_password password_enabled, + iso.id iso_id, + iso.uuid iso_uuid, + iso.name iso_name, + iso.display_text iso_display_text, + service_offering.id service_offering_id, + disk_offering.uuid service_offering_uuid, + service_offering.cpu cpu, + service_offering.speed speed, + service_offering.ram_size ram_size, + disk_offering.name service_offering_name, + storage_pool.id pool_id, + storage_pool.uuid pool_uuid, + storage_pool.pool_type pool_type, + volumes.id volume_id, + volumes.uuid volume_uuid, + volumes.device_id volume_device_id, + volumes.volume_type volume_type, + security_group.id security_group_id, + security_group.uuid security_group_uuid, + security_group.name security_group_name, + security_group.description security_group_description, + nics.id nic_id, + nics.uuid nic_uuid, + nics.network_id network_id, + nics.ip4_address ip_address, + nics.ip6_address ip6_address, + nics.ip6_gateway ip6_gateway, + nics.ip6_cidr ip6_cidr, + nics.default_nic is_default_nic, + nics.gateway gateway, + nics.netmask netmask, + nics.mac_address mac_address, + nics.broadcast_uri broadcast_uri, + nics.isolation_uri isolation_uri, + vpc.id vpc_id, + vpc.uuid vpc_uuid, + networks.uuid network_uuid, + networks.name network_name, + networks.traffic_type traffic_type, + networks.guest_type guest_type, + user_ip_address.id public_ip_id, + user_ip_address.uuid public_ip_uuid, + user_ip_address.public_ip_address public_ip_address, + ssh_keypairs.keypair_name keypair_name, + resource_tags.id tag_id, + resource_tags.uuid tag_uuid, + resource_tags.key tag_key, + resource_tags.value tag_value, + resource_tags.domain_id tag_domain_id, + resource_tags.account_id tag_account_id, + resource_tags.resource_id tag_resource_id, + resource_tags.resource_uuid tag_resource_uuid, + resource_tags.resource_type tag_resource_type, + resource_tags.customer tag_customer, + async_job.id job_id, + async_job.uuid job_uuid, + async_job.job_status job_status, + async_job.account_id job_account_id, + affinity_group.id affinity_group_id, + affinity_group.uuid affinity_group_uuid, + affinity_group.name affinity_group_name, + affinity_group.description affinity_group_description, + vm_instance.dynamically_scalable dynamically_scalable, + all_details.name detail_name, + all_details.value detail_value + + from + `cloud`.`user_vm` + inner join + `cloud`.`vm_instance` ON vm_instance.id = user_vm.id + and vm_instance.removed is NULL + inner join + `cloud`.`account` ON vm_instance.account_id = account.id + inner join + `cloud`.`domain` ON vm_instance.domain_id = domain.id + left join + `cloud`.`guest_os` ON vm_instance.guest_os_id = guest_os.id + left join + `cloud`.`host_pod_ref` ON vm_instance.pod_id = host_pod_ref.id + left join + `cloud`.`projects` ON projects.project_account_id = account.id + left join + `cloud`.`instance_group_vm_map` ON vm_instance.id = instance_group_vm_map.instance_id + left join + `cloud`.`instance_group` ON instance_group_vm_map.group_id = instance_group.id + left join + `cloud`.`data_center` ON vm_instance.data_center_id = data_center.id + left join + `cloud`.`host` ON vm_instance.host_id = host.id + left join + `cloud`.`vm_template` ON vm_instance.vm_template_id = vm_template.id + left join + `cloud`.`vm_template` iso ON iso.id = user_vm.iso_id + left join + `cloud`.`service_offering` ON vm_instance.service_offering_id = service_offering.id + left join + `cloud`.`disk_offering` ON vm_instance.service_offering_id = disk_offering.id + left join + `cloud`.`volumes` ON vm_instance.id = volumes.instance_id + left join + `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id + left join + `cloud`.`security_group_vm_map` ON vm_instance.id = security_group_vm_map.instance_id + left join + `cloud`.`security_group` ON security_group_vm_map.security_group_id = security_group.id + left join + `cloud`.`nics` ON vm_instance.id = nics.instance_id and nics.removed is null + left join + `cloud`.`networks` ON nics.network_id = networks.id + left join + `cloud`.`vpc` ON networks.vpc_id = vpc.id and vpc.removed is null + left join + `cloud`.`user_ip_address` ON user_ip_address.vm_id = vm_instance.id + left join + `cloud`.`user_vm_details` as ssh_details ON ssh_details.vm_id = vm_instance.id + and ssh_details.name = 'SSH.PublicKey' + left join + `cloud`.`ssh_keypairs` ON ssh_keypairs.public_key = ssh_details.value + left join + `cloud`.`resource_tags` ON resource_tags.resource_id = vm_instance.id + and resource_tags.resource_type = 'UserVm' + left join + `cloud`.`async_job` ON async_job.instance_id = vm_instance.id + and async_job.instance_type = 'VirtualMachine' + and async_job.job_status = 0 + left join + `cloud`.`affinity_group_vm_map` ON vm_instance.id = affinity_group_vm_map.instance_id + left join + `cloud`.`affinity_group` ON affinity_group_vm_map.affinity_group_id = affinity_group.id + left join + `cloud`.`user_vm_details` as all_details ON all_details.vm_id = vm_instance.id; \ No newline at end of file diff --git a/setup/db/db/schema-420to430-cleanup.sql b/setup/db/db/schema-421to430-cleanup.sql similarity index 100% rename from setup/db/db/schema-420to430-cleanup.sql rename to setup/db/db/schema-421to430-cleanup.sql diff --git a/setup/db/db/schema-420to430.sql b/setup/db/db/schema-421to430.sql similarity index 72% rename from setup/db/db/schema-420to430.sql rename to setup/db/db/schema-421to430.sql index ec68273ef2e..5d7ba4cc047 100644 --- a/setup/db/db/schema-420to430.sql +++ b/setup/db/db/schema-421to430.sql @@ -33,12 +33,23 @@ ALTER TABLE `cloud`.`async_job` ADD COLUMN `job_dispatcher` VARCHAR(64); ALTER TABLE `cloud`.`async_job` ADD COLUMN `job_executing_msid` bigint; ALTER TABLE `cloud`.`async_job` ADD COLUMN `job_pending_signals` int(10) NOT NULL DEFAULT 0; +ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `keep_alive_enabled` int(1) unsigned NOT NULL DEFAULT 1 COMMENT 'true if connection should be reset after requests.'; + ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `power_state` VARCHAR(74) DEFAULT 'PowerUnknown'; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `power_state_update_time` DATETIME; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `power_state_update_count` INT DEFAULT 0; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `power_host` bigint unsigned; ALTER TABLE `cloud`.`vm_instance` ADD CONSTRAINT `fk_vm_instance__power_host` FOREIGN KEY (`power_host`) REFERENCES `cloud`.`host`(`id`); +DROP TABLE IF EXISTS `cloud`.`vm_snapshot_details`; +CREATE TABLE `cloud`.`vm_snapshot_details` ( + `id` bigint unsigned UNIQUE NOT NULL, + `vm_snapshot_id` bigint unsigned NOT NULL, + `name` varchar(255) NOT NULL, + `value` varchar(255) NOT NULL, + PRIMARY KEY (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + CREATE TABLE `cloud`.`vm_work_job` ( `id` bigint unsigned UNIQUE NOT NULL, `step` char(32) NOT NULL COMMENT 'state', @@ -105,6 +116,9 @@ UPDATE `cloud`.`vm_template` SET `state`='Inactive' WHERE `removed` IS NOT NULL; UPDATE `cloud`.`vm_template` SET `state`='Active' WHERE `removed` IS NULL; UPDATE `cloud`.`vm_template` SET `removed`=NULL; +ALTER TABLE `cloud`.`remote_access_vpn` MODIFY COLUMN `network_id` bigint unsigned; +ALTER TABLE `cloud`.`remote_access_vpn` ADD COLUMN `vpc_id` bigint unsigned default NULL; + DROP VIEW IF EXISTS `cloud`.`disk_offering_view`; CREATE VIEW `cloud`.`disk_offering_view` AS select @@ -462,4 +476,191 @@ CREATE VIEW `cloud`.`acl_group_view` AS `cloud`.`account` ON acl_group_account_map.account_id = account.id left join `cloud`.`acl_entity_permission` ON acl_group.id = acl_entity_permission.group_id; - \ No newline at end of file + +DROP VIEW IF EXISTS `cloud`.`volume_view`; +CREATE VIEW `cloud`.`volume_view` AS + select + volumes.id, + volumes.uuid, + volumes.name, + volumes.device_id, + volumes.volume_type, + volumes.size, + volumes.min_iops, + volumes.max_iops, + volumes.created, + volumes.state, + volumes.attached, + volumes.removed, + volumes.pod_id, + volumes.display_volume, + volumes.format, + volumes.path, + account.id account_id, + account.uuid account_uuid, + account.account_name account_name, + account.type account_type, + domain.id domain_id, + domain.uuid domain_uuid, + domain.name domain_name, + domain.path domain_path, + projects.id project_id, + projects.uuid project_uuid, + projects.name project_name, + data_center.id data_center_id, + data_center.uuid data_center_uuid, + data_center.name data_center_name, + data_center.networktype data_center_type, + vm_instance.id vm_id, + vm_instance.uuid vm_uuid, + vm_instance.name vm_name, + vm_instance.state vm_state, + vm_instance.vm_type, + user_vm.display_name vm_display_name, + volume_store_ref.size volume_store_size, + volume_store_ref.download_pct, + volume_store_ref.download_state, + volume_store_ref.error_str, + volume_store_ref.created created_on_store, + disk_offering.id disk_offering_id, + disk_offering.uuid disk_offering_uuid, + disk_offering.name disk_offering_name, + disk_offering.display_text disk_offering_display_text, + disk_offering.use_local_storage, + disk_offering.system_use, + disk_offering.bytes_read_rate, + disk_offering.bytes_write_rate, + disk_offering.iops_read_rate, + disk_offering.iops_write_rate, + storage_pool.id pool_id, + storage_pool.uuid pool_uuid, + storage_pool.name pool_name, + cluster.hypervisor_type, + vm_template.id template_id, + vm_template.uuid template_uuid, + vm_template.extractable, + vm_template.type template_type, + resource_tags.id tag_id, + resource_tags.uuid tag_uuid, + resource_tags.key tag_key, + resource_tags.value tag_value, + resource_tags.domain_id tag_domain_id, + resource_tags.account_id tag_account_id, + resource_tags.resource_id tag_resource_id, + resource_tags.resource_uuid tag_resource_uuid, + resource_tags.resource_type tag_resource_type, + resource_tags.customer tag_customer, + async_job.id job_id, + async_job.uuid job_uuid, + async_job.job_status job_status, + async_job.account_id job_account_id + from + `cloud`.`volumes` + inner join + `cloud`.`account` ON volumes.account_id = account.id + inner join + `cloud`.`domain` ON volumes.domain_id = domain.id + left join + `cloud`.`projects` ON projects.project_account_id = account.id + left join + `cloud`.`data_center` ON volumes.data_center_id = data_center.id + left join + `cloud`.`vm_instance` ON volumes.instance_id = vm_instance.id + left join + `cloud`.`user_vm` ON user_vm.id = vm_instance.id + left join + `cloud`.`volume_store_ref` ON volumes.id = volume_store_ref.volume_id + left join + `cloud`.`disk_offering` ON volumes.disk_offering_id = disk_offering.id + left join + `cloud`.`storage_pool` ON volumes.pool_id = storage_pool.id + left join + `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id + left join + `cloud`.`vm_template` ON volumes.template_id = vm_template.id OR volumes.iso_id = vm_template.id + left join + `cloud`.`resource_tags` ON resource_tags.resource_id = volumes.id + and resource_tags.resource_type = 'Volume' + left join + `cloud`.`async_job` ON async_job.instance_id = volumes.id + and async_job.instance_type = 'Volume' + and async_job.job_status = 0; + + +DROP VIEW IF EXISTS `cloud`.`storage_pool_view`; +CREATE VIEW `cloud`.`storage_pool_view` AS + select + storage_pool.id, + storage_pool.uuid, + storage_pool.name, + storage_pool.status, + storage_pool.path, + storage_pool.pool_type, + storage_pool.host_address, + storage_pool.created, + storage_pool.removed, + storage_pool.capacity_bytes, + storage_pool.capacity_iops, + storage_pool.scope, + storage_pool.hypervisor, + storage_pool.storage_provider_name, + cluster.id cluster_id, + cluster.uuid cluster_uuid, + cluster.name cluster_name, + cluster.cluster_type, + data_center.id data_center_id, + data_center.uuid data_center_uuid, + data_center.name data_center_name, + data_center.networktype data_center_type, + host_pod_ref.id pod_id, + host_pod_ref.uuid pod_uuid, + host_pod_ref.name pod_name, + storage_pool_details.name tag, + op_host_capacity.used_capacity disk_used_capacity, + op_host_capacity.reserved_capacity disk_reserved_capacity, + async_job.id job_id, + async_job.uuid job_uuid, + async_job.job_status job_status, + async_job.account_id job_account_id + from + `cloud`.`storage_pool` + left join + `cloud`.`cluster` ON storage_pool.cluster_id = cluster.id + left join + `cloud`.`data_center` ON storage_pool.data_center_id = data_center.id + left join + `cloud`.`host_pod_ref` ON storage_pool.pod_id = host_pod_ref.id + left join + `cloud`.`storage_pool_details` ON storage_pool_details.pool_id = storage_pool.id + and storage_pool_details.value = 'true' + left join + `cloud`.`op_host_capacity` ON storage_pool.id = op_host_capacity.host_id + and op_host_capacity.capacity_type = 3 + left join + `cloud`.`async_job` ON async_job.instance_id = storage_pool.id + and async_job.instance_type = 'StoragePool' + and async_job.job_status = 0; + + + DROP TABLE IF EXISTS `cloud`.`vm_snapshot_details`; + +CREATE TABLE `cloud`.`firewall_rule_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `firewall_rule_id` bigint unsigned NOT NULL COMMENT 'Firewall rule id', + `name` varchar(255) NOT NULL, + `value` varchar(1024) NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user', + PRIMARY KEY (`id`), + CONSTRAINT `fk_firewall_rule_details__firewall_rule_id` FOREIGN KEY `fk_firewall_rule_details__firewall_rule_id`(`firewall_rule_id`) REFERENCES `firewall_rules`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + + +ALTER TABLE `cloud`.`data_center_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`network_details` CHANGE `display_detail` `display` tinyint(0) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`vm_template_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`volume_details` CHANGE `display_detail` `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`nic_details` CHANGE `display_detail` `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`user_vm_details` CHANGE `display_detail` `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`service_offering_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; +ALTER TABLE `cloud`.`storage_pool_details` ADD COLUMN `display` tinyint(1) NOT NULL DEFAULT '0' COMMENT 'True if the detail can be displayed to the end user'; + diff --git a/setup/db/templates.sql b/setup/db/templates.sql index e030852994f..e5a653c32de 100755 --- a/setup/db/templates.sql +++ b/setup/db/templates.sql @@ -217,7 +217,6 @@ INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (164 INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (165, UUID(), 6, 'Windows 8 (32-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (166, UUID(), 6, 'Windows 8 (64-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (167, UUID(), 6, 'Windows Server 2012 (64-bit)'); -INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (168, UUID(), 6, 'Windows Server 8 (64-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (169, UUID(), 10, 'Ubuntu 11.04 (32-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (170, UUID(), 10, 'Ubuntu 11.04 (64-bit)'); INSERT INTO `cloud`.`guest_os` (id, uuid, category_id, display_name) VALUES (171, UUID(), 1, 'CentOS 6.3 (32-bit)'); @@ -373,8 +372,6 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'Windows 8 (32-bit)', 165); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'Windows 8 (64-bit)', 166); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'Windows Server 2012 (64-bit)', 167); -INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("XenServer", 'Windows Server 8 (64-bit)', 168); - INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows 7(32-bit)', 48); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Microsoft Windows 7(64-bit)', 49); @@ -406,7 +403,6 @@ INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 (32-bit)', 165); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows 8 (64-bit)', 166); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows Server 2012 (64-bit)', 167); -INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Windows Server 8 (64-bit)', 168); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Red Hat Enterprise Linux 5.0(32-bit)', 30); INSERT INTO `cloud`.`guest_os_hypervisor` (hypervisor_type, guest_os_name, guest_os_id) VALUES ("VmWare", 'Red Hat Enterprise Linux 5.1(32-bit)', 32); diff --git a/setup/dev/advanced.cfg b/setup/dev/advanced.cfg index 4a483995f48..216314ff6bc 100644 --- a/setup/dev/advanced.cfg +++ b/setup/dev/advanced.cfg @@ -14,7 +14,6 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - { "zones": [ { @@ -220,7 +219,10 @@ "passwd": "password", "user": "root", "port": 8096, - "hypervisor" : "simulator" + "hypervisor": "simulator", + "useHttps": "False", + "certCAPath": "NA", + "certPath": "NA" } ] } diff --git a/services/console-proxy/server/bindir/cloud-setup-console-proxy.in b/systemvm/bindir/cloud-setup-console-proxy.in similarity index 100% rename from services/console-proxy/server/bindir/cloud-setup-console-proxy.in rename to systemvm/bindir/cloud-setup-console-proxy.in diff --git a/services/console-proxy/server/certs/localhost.crt b/systemvm/certs/localhost.crt similarity index 100% rename from services/console-proxy/server/certs/localhost.crt rename to systemvm/certs/localhost.crt diff --git a/services/console-proxy/server/certs/localhost.key b/systemvm/certs/localhost.key similarity index 100% rename from services/console-proxy/server/certs/localhost.key rename to systemvm/certs/localhost.key diff --git a/services/console-proxy/server/certs/realhostip.crt b/systemvm/certs/realhostip.crt similarity index 100% rename from services/console-proxy/server/certs/realhostip.crt rename to systemvm/certs/realhostip.crt diff --git a/services/console-proxy/server/certs/realhostip.csr b/systemvm/certs/realhostip.csr similarity index 100% rename from services/console-proxy/server/certs/realhostip.csr rename to systemvm/certs/realhostip.csr diff --git a/services/console-proxy/server/certs/realhostip.key b/systemvm/certs/realhostip.key similarity index 100% rename from services/console-proxy/server/certs/realhostip.key rename to systemvm/certs/realhostip.key diff --git a/services/console-proxy/server/certs/realhostip.keystore b/systemvm/certs/realhostip.keystore similarity index 100% rename from services/console-proxy/server/certs/realhostip.keystore rename to systemvm/certs/realhostip.keystore diff --git a/patches/cloudpatch-descriptor.xml b/systemvm/cloudpatch-descriptor.xml similarity index 100% rename from patches/cloudpatch-descriptor.xml rename to systemvm/cloudpatch-descriptor.xml diff --git a/services/console-proxy/server/conf.dom0/agent.properties.in b/systemvm/conf.dom0/agent.properties.in similarity index 100% rename from services/console-proxy/server/conf.dom0/agent.properties.in rename to systemvm/conf.dom0/agent.properties.in diff --git a/services/console-proxy/server/conf.dom0/consoleproxy.properties.in b/systemvm/conf.dom0/consoleproxy.properties.in similarity index 100% rename from services/console-proxy/server/conf.dom0/consoleproxy.properties.in rename to systemvm/conf.dom0/consoleproxy.properties.in diff --git a/services/console-proxy/server/conf.dom0/log4j-cloud.xml.in b/systemvm/conf.dom0/log4j-cloud.xml.in similarity index 100% rename from services/console-proxy/server/conf.dom0/log4j-cloud.xml.in rename to systemvm/conf.dom0/log4j-cloud.xml.in diff --git a/systemvm/conf/agent.properties b/systemvm/conf/agent.properties new file mode 100644 index 00000000000..051cf610710 --- /dev/null +++ b/systemvm/conf/agent.properties @@ -0,0 +1,18 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +instance=ConsoleProxy +resource=com.cloud.agent.resource.consoleproxy.ConsoleProxyResource diff --git a/docs/runbook/zh-CN/Runbook.po b/systemvm/conf/agent.properties.ssvm similarity index 69% rename from docs/runbook/zh-CN/Runbook.po rename to systemvm/conf/agent.properties.ssvm index 2eee6c1bc90..2f87b88dba1 100644 --- a/docs/runbook/zh-CN/Runbook.po +++ b/systemvm/conf/agent.properties.ssvm @@ -13,15 +13,9 @@ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations -# under the License.# -msgid "" -msgstr "" -"Project-Id-Version: 0\n" -"POT-Creation-Date: 2012-08-04T04:05:40\n" -"PO-Revision-Date: 2012-08-04T04:05:40\n" -"Last-Translator: Automatically generated\n" -"Language-Team: None\n" -"MIME-Version: 1.0\n" -"Content-Type: application/x-publican; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" +# under the License. +#mount.path=~/secondary-storage/ +resource=org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource +testCifsMount=cifs://192.168.1.1/CSHV3?user=administrator&password=1pass%40word1 +#testLocalRoot=test diff --git a/services/console-proxy/server/conf/consoleproxy.properties b/systemvm/conf/consoleproxy.properties similarity index 100% rename from services/console-proxy/server/conf/consoleproxy.properties rename to systemvm/conf/consoleproxy.properties diff --git a/services/console-proxy/server/conf/environment.properties b/systemvm/conf/environment.properties similarity index 100% rename from services/console-proxy/server/conf/environment.properties rename to systemvm/conf/environment.properties diff --git a/services/console-proxy/server/conf/log4j-cloud.xml b/systemvm/conf/log4j-cloud.xml similarity index 100% rename from services/console-proxy/server/conf/log4j-cloud.xml rename to systemvm/conf/log4j-cloud.xml diff --git a/services/console-proxy/server/css/ajaxviewer.css b/systemvm/css/ajaxviewer.css similarity index 100% rename from services/console-proxy/server/css/ajaxviewer.css rename to systemvm/css/ajaxviewer.css diff --git a/services/console-proxy/server/css/logger.css b/systemvm/css/logger.css similarity index 100% rename from services/console-proxy/server/css/logger.css rename to systemvm/css/logger.css diff --git a/services/console-proxy/server/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in b/systemvm/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in similarity index 100% rename from services/console-proxy/server/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in rename to systemvm/distro/centos/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in diff --git a/services/console-proxy/server/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in b/systemvm/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in similarity index 100% rename from services/console-proxy/server/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in rename to systemvm/distro/fedora/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in diff --git a/services/console-proxy/server/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in b/systemvm/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in similarity index 100% rename from services/console-proxy/server/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in rename to systemvm/distro/rhel/SYSCONFDIR/rc.d/init.d/cloud-console-proxy.in diff --git a/services/console-proxy/server/distro/ubuntu/SYSCONFDIR/init.d/cloud-console-proxy.in b/systemvm/distro/ubuntu/SYSCONFDIR/init.d/cloud-console-proxy.in similarity index 100% rename from services/console-proxy/server/distro/ubuntu/SYSCONFDIR/init.d/cloud-console-proxy.in rename to systemvm/distro/ubuntu/SYSCONFDIR/init.d/cloud-console-proxy.in diff --git a/services/console-proxy/server/images/back.gif b/systemvm/images/back.gif similarity index 100% rename from services/console-proxy/server/images/back.gif rename to systemvm/images/back.gif diff --git a/services/console-proxy/server/images/bright-green.png b/systemvm/images/bright-green.png similarity index 100% rename from services/console-proxy/server/images/bright-green.png rename to systemvm/images/bright-green.png diff --git a/services/console-proxy/server/images/cad.gif b/systemvm/images/cad.gif similarity index 100% rename from services/console-proxy/server/images/cad.gif rename to systemvm/images/cad.gif diff --git a/services/console-proxy/server/images/cannotconnect.jpg b/systemvm/images/cannotconnect.jpg similarity index 100% rename from services/console-proxy/server/images/cannotconnect.jpg rename to systemvm/images/cannotconnect.jpg diff --git a/services/console-proxy/server/images/clr_button.gif b/systemvm/images/clr_button.gif similarity index 100% rename from services/console-proxy/server/images/clr_button.gif rename to systemvm/images/clr_button.gif diff --git a/services/console-proxy/server/images/clr_button_hover.gif b/systemvm/images/clr_button_hover.gif similarity index 100% rename from services/console-proxy/server/images/clr_button_hover.gif rename to systemvm/images/clr_button_hover.gif diff --git a/services/console-proxy/server/images/dot.cur b/systemvm/images/dot.cur similarity index 100% rename from services/console-proxy/server/images/dot.cur rename to systemvm/images/dot.cur diff --git a/services/console-proxy/server/images/gray-green.png b/systemvm/images/gray-green.png similarity index 100% rename from services/console-proxy/server/images/gray-green.png rename to systemvm/images/gray-green.png diff --git a/services/console-proxy/server/images/grid_headerbg.gif b/systemvm/images/grid_headerbg.gif similarity index 100% rename from services/console-proxy/server/images/grid_headerbg.gif rename to systemvm/images/grid_headerbg.gif diff --git a/services/console-proxy/server/images/left.png b/systemvm/images/left.png similarity index 100% rename from services/console-proxy/server/images/left.png rename to systemvm/images/left.png diff --git a/services/console-proxy/server/images/minimize_button.gif b/systemvm/images/minimize_button.gif similarity index 100% rename from services/console-proxy/server/images/minimize_button.gif rename to systemvm/images/minimize_button.gif diff --git a/services/console-proxy/server/images/minimize_button_hover.gif b/systemvm/images/minimize_button_hover.gif similarity index 100% rename from services/console-proxy/server/images/minimize_button_hover.gif rename to systemvm/images/minimize_button_hover.gif diff --git a/services/console-proxy/server/images/notready.jpg b/systemvm/images/notready.jpg similarity index 100% rename from services/console-proxy/server/images/notready.jpg rename to systemvm/images/notready.jpg diff --git a/services/console-proxy/server/images/play_button.gif b/systemvm/images/play_button.gif similarity index 100% rename from services/console-proxy/server/images/play_button.gif rename to systemvm/images/play_button.gif diff --git a/services/console-proxy/server/images/play_button_hover.gif b/systemvm/images/play_button_hover.gif similarity index 100% rename from services/console-proxy/server/images/play_button_hover.gif rename to systemvm/images/play_button_hover.gif diff --git a/services/console-proxy/server/images/right.png b/systemvm/images/right.png similarity index 100% rename from services/console-proxy/server/images/right.png rename to systemvm/images/right.png diff --git a/services/console-proxy/server/images/right2.png b/systemvm/images/right2.png similarity index 100% rename from services/console-proxy/server/images/right2.png rename to systemvm/images/right2.png diff --git a/services/console-proxy/server/images/shrink_button.gif b/systemvm/images/shrink_button.gif similarity index 100% rename from services/console-proxy/server/images/shrink_button.gif rename to systemvm/images/shrink_button.gif diff --git a/services/console-proxy/server/images/shrink_button_hover.gif b/systemvm/images/shrink_button_hover.gif similarity index 100% rename from services/console-proxy/server/images/shrink_button_hover.gif rename to systemvm/images/shrink_button_hover.gif diff --git a/services/console-proxy/server/images/stop_button.gif b/systemvm/images/stop_button.gif similarity index 100% rename from services/console-proxy/server/images/stop_button.gif rename to systemvm/images/stop_button.gif diff --git a/services/console-proxy/server/images/stop_button_hover.gif b/systemvm/images/stop_button_hover.gif similarity index 100% rename from services/console-proxy/server/images/stop_button_hover.gif rename to systemvm/images/stop_button_hover.gif diff --git a/services/console-proxy/server/images/winlog.png b/systemvm/images/winlog.png similarity index 100% rename from services/console-proxy/server/images/winlog.png rename to systemvm/images/winlog.png diff --git a/services/console-proxy/server/js/ajaxkeys.js b/systemvm/js/ajaxkeys.js similarity index 100% rename from services/console-proxy/server/js/ajaxkeys.js rename to systemvm/js/ajaxkeys.js diff --git a/services/console-proxy/server/js/ajaxviewer.js b/systemvm/js/ajaxviewer.js similarity index 100% rename from services/console-proxy/server/js/ajaxviewer.js rename to systemvm/js/ajaxviewer.js diff --git a/services/console-proxy/server/js/cloud.logger.js b/systemvm/js/cloud.logger.js similarity index 100% rename from services/console-proxy/server/js/cloud.logger.js rename to systemvm/js/cloud.logger.js diff --git a/services/console-proxy/server/js/handler.js b/systemvm/js/handler.js similarity index 100% rename from services/console-proxy/server/js/handler.js rename to systemvm/js/handler.js diff --git a/services/console-proxy/server/js/jquery.js b/systemvm/js/jquery.js similarity index 100% rename from services/console-proxy/server/js/jquery.js rename to systemvm/js/jquery.js diff --git a/services/console-proxy/server/libexec/console-proxy-runner.in b/systemvm/libexec/console-proxy-runner.in similarity index 100% rename from services/console-proxy/server/libexec/console-proxy-runner.in rename to systemvm/libexec/console-proxy-runner.in diff --git a/patches/systemvm/debian/README b/systemvm/patches/debian/README similarity index 100% rename from patches/systemvm/debian/README rename to systemvm/patches/debian/README diff --git a/patches/systemvm/debian/buildsystemvm.sh b/systemvm/patches/debian/buildsystemvm.sh similarity index 100% rename from patches/systemvm/debian/buildsystemvm.sh rename to systemvm/patches/debian/buildsystemvm.sh diff --git a/patches/systemvm/debian/config.dat b/systemvm/patches/debian/config.dat similarity index 100% rename from patches/systemvm/debian/config.dat rename to systemvm/patches/debian/config.dat diff --git a/patches/systemvm/debian/config/etc/apache2/httpd.conf b/systemvm/patches/debian/config/etc/apache2/httpd.conf similarity index 100% rename from patches/systemvm/debian/config/etc/apache2/httpd.conf rename to systemvm/patches/debian/config/etc/apache2/httpd.conf diff --git a/patches/systemvm/debian/config/etc/apache2/ports.conf b/systemvm/patches/debian/config/etc/apache2/ports.conf similarity index 100% rename from patches/systemvm/debian/config/etc/apache2/ports.conf rename to systemvm/patches/debian/config/etc/apache2/ports.conf diff --git a/patches/systemvm/debian/config/etc/apache2/sites-available/default b/systemvm/patches/debian/config/etc/apache2/sites-available/default similarity index 100% rename from patches/systemvm/debian/config/etc/apache2/sites-available/default rename to systemvm/patches/debian/config/etc/apache2/sites-available/default diff --git a/patches/systemvm/debian/config/etc/apache2/sites-available/default-ssl b/systemvm/patches/debian/config/etc/apache2/sites-available/default-ssl similarity index 100% rename from patches/systemvm/debian/config/etc/apache2/sites-available/default-ssl rename to systemvm/patches/debian/config/etc/apache2/sites-available/default-ssl diff --git a/patches/systemvm/debian/config/etc/apache2/vhostexample.conf b/systemvm/patches/debian/config/etc/apache2/vhostexample.conf similarity index 100% rename from patches/systemvm/debian/config/etc/apache2/vhostexample.conf rename to systemvm/patches/debian/config/etc/apache2/vhostexample.conf diff --git a/patches/systemvm/debian/config/etc/cloud-nic.rules b/systemvm/patches/debian/config/etc/cloud-nic.rules similarity index 100% rename from patches/systemvm/debian/config/etc/cloud-nic.rules rename to systemvm/patches/debian/config/etc/cloud-nic.rules diff --git a/patches/systemvm/debian/config/etc/cron.daily/cloud-cleanup b/systemvm/patches/debian/config/etc/cron.daily/cloud-cleanup similarity index 100% rename from patches/systemvm/debian/config/etc/cron.daily/cloud-cleanup rename to systemvm/patches/debian/config/etc/cron.daily/cloud-cleanup diff --git a/patches/systemvm/debian/config/etc/default/cloud b/systemvm/patches/debian/config/etc/default/cloud similarity index 100% rename from patches/systemvm/debian/config/etc/default/cloud rename to systemvm/patches/debian/config/etc/default/cloud diff --git a/patches/systemvm/debian/config/etc/default/cloud-passwd-srvr b/systemvm/patches/debian/config/etc/default/cloud-passwd-srvr similarity index 100% rename from patches/systemvm/debian/config/etc/default/cloud-passwd-srvr rename to systemvm/patches/debian/config/etc/default/cloud-passwd-srvr diff --git a/patches/systemvm/debian/config/etc/dnsmasq.conf.tmpl b/systemvm/patches/debian/config/etc/dnsmasq.conf.tmpl similarity index 100% rename from patches/systemvm/debian/config/etc/dnsmasq.conf.tmpl rename to systemvm/patches/debian/config/etc/dnsmasq.conf.tmpl diff --git a/patches/systemvm/debian/config/etc/haproxy/haproxy.cfg b/systemvm/patches/debian/config/etc/haproxy/haproxy.cfg similarity index 100% rename from patches/systemvm/debian/config/etc/haproxy/haproxy.cfg rename to systemvm/patches/debian/config/etc/haproxy/haproxy.cfg diff --git a/patches/systemvm/debian/config/etc/init.d/cloud b/systemvm/patches/debian/config/etc/init.d/cloud similarity index 77% rename from patches/systemvm/debian/config/etc/init.d/cloud rename to systemvm/patches/debian/config/etc/init.d/cloud index b8e6ed2bf45..83853bcd4ef 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud +++ b/systemvm/patches/debian/config/etc/init.d/cloud @@ -5,9 +5,9 @@ # Required-Stop: $local_fs # Should-Start: # Should-Stop: -# Default-Start: 2 3 4 5 +# Default-Start: # Default-Stop: 0 1 6 -# Short-Description: Start up the cloud.com service +# Short-Description: Start up the CloudStack cloud service ### END INIT INFO # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -74,7 +74,7 @@ _failure() { fi } RETVAL=$? -CLOUD_COM_HOME="/usr/local/cloud" +CLOUDSTACK_HOME="/usr/local/cloud" # mkdir -p /var/log/vmops @@ -82,23 +82,23 @@ get_pids() { local i for i in $(ps -ef| grep java | grep -v grep | awk '{print $2}'); do - echo $(pwdx $i) | grep "$CLOUD_COM_HOME" | awk -F: '{print $1}'; + echo $(pwdx $i) | grep "$CLOUDSTACK_HOME" | awk -F: '{print $1}'; done } start() { local pid=$(get_pids) if [ "$pid" != "" ]; then - echo "cloud.com sevice is already running, PID = $pid" + echo "CloudStack cloud sevice is already running, PID = $pid" return 0 fi - echo -n "Starting cloud.com service (type=$TYPE) " - if [ -f $CLOUD_COM_HOME/systemvm/run.sh ]; + echo -n "Starting CloudStack cloud service (type=$TYPE) " + if [ -f $CLOUDSTACK_HOME/systemvm/run.sh ]; then if [ "$pid" == "" ] then - (cd $CLOUD_COM_HOME/systemvm; nohup ./run.sh > /var/log/cloud/cloud.out 2>&1 & ) + (cd $CLOUDSTACK_HOME/systemvm; nohup ./run.sh > /var/log/cloud/cloud.out 2>&1 & ) pid=$(get_pids) echo $pid > /var/run/cloud.pid fi @@ -107,29 +107,29 @@ start() { _failure fi echo - echo 'start' > $CLOUD_COM_HOME/systemvm/user_request + echo 'start' > $CLOUDSTACK_HOME/systemvm/user_request } stop() { local pid - echo -n "Stopping cloud.com service (type=$TYPE): " + echo -n "Stopping CloudStack cloud service (type=$TYPE): " for pid in $(get_pids) do kill $pid done _success echo - echo 'stop' > $CLOUD_COM_HOME/systemvm/user_request + echo 'stop' > $CLOUDSTACK_HOME/systemvm/user_request } status() { local pids=$(get_pids) if [ "$pids" == "" ] then - echo "cloud.com service is not running" + echo "CloudStack cloud service is not running" return 1 fi - echo "cloud.com service (type=$TYPE) is running: process id: $pids" + echo "CloudStack cloud service (type=$TYPE) is running: process id: $pids" return 0 } diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-early-config b/systemvm/patches/debian/config/etc/init.d/cloud-early-config similarity index 99% rename from patches/systemvm/debian/config/etc/init.d/cloud-early-config rename to systemvm/patches/debian/config/etc/init.d/cloud-early-config index 88ecc119b61..d44f94d7d17 100755 --- a/patches/systemvm/debian/config/etc/init.d/cloud-early-config +++ b/systemvm/patches/debian/config/etc/init.d/cloud-early-config @@ -1189,6 +1189,7 @@ change_password() { start() { # Clear /tmp for file lock rm -f /tmp/*.lock + rm -f /tmp/rrouter_bumped local hyp=$(hypervisor) [ $? -ne 0 ] && log_it "Failed to detect hypervisor type, bailing out of early init" && exit 10 log_it "Detected that we are running inside $hyp guest" diff --git a/patches/systemvm/debian/config/etc/init.d/cloud-passwd-srvr b/systemvm/patches/debian/config/etc/init.d/cloud-passwd-srvr similarity index 100% rename from patches/systemvm/debian/config/etc/init.d/cloud-passwd-srvr rename to systemvm/patches/debian/config/etc/init.d/cloud-passwd-srvr diff --git a/patches/systemvm/debian/config/etc/init.d/postinit b/systemvm/patches/debian/config/etc/init.d/postinit similarity index 100% rename from patches/systemvm/debian/config/etc/init.d/postinit rename to systemvm/patches/debian/config/etc/init.d/postinit diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-consoleproxy b/systemvm/patches/debian/config/etc/iptables/iptables-consoleproxy similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-consoleproxy rename to systemvm/patches/debian/config/etc/iptables/iptables-consoleproxy diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-elbvm b/systemvm/patches/debian/config/etc/iptables/iptables-elbvm similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-elbvm rename to systemvm/patches/debian/config/etc/iptables/iptables-elbvm diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-ilbvm b/systemvm/patches/debian/config/etc/iptables/iptables-ilbvm similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-ilbvm rename to systemvm/patches/debian/config/etc/iptables/iptables-ilbvm diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-router b/systemvm/patches/debian/config/etc/iptables/iptables-router similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-router rename to systemvm/patches/debian/config/etc/iptables/iptables-router diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-secstorage b/systemvm/patches/debian/config/etc/iptables/iptables-secstorage similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-secstorage rename to systemvm/patches/debian/config/etc/iptables/iptables-secstorage diff --git a/patches/systemvm/debian/config/etc/iptables/iptables-vpcrouter b/systemvm/patches/debian/config/etc/iptables/iptables-vpcrouter similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/iptables-vpcrouter rename to systemvm/patches/debian/config/etc/iptables/iptables-vpcrouter diff --git a/patches/systemvm/debian/config/etc/iptables/rt_tables_init b/systemvm/patches/debian/config/etc/iptables/rt_tables_init similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/rt_tables_init rename to systemvm/patches/debian/config/etc/iptables/rt_tables_init diff --git a/patches/systemvm/debian/config/etc/iptables/rules b/systemvm/patches/debian/config/etc/iptables/rules similarity index 100% rename from patches/systemvm/debian/config/etc/iptables/rules rename to systemvm/patches/debian/config/etc/iptables/rules diff --git a/patches/systemvm/debian/config/etc/logrotate.conf b/systemvm/patches/debian/config/etc/logrotate.conf similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.conf rename to systemvm/patches/debian/config/etc/logrotate.conf diff --git a/patches/systemvm/debian/config/etc/logrotate.d/apache2 b/systemvm/patches/debian/config/etc/logrotate.d/apache2 similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.d/apache2 rename to systemvm/patches/debian/config/etc/logrotate.d/apache2 diff --git a/patches/systemvm/debian/config/etc/logrotate.d/dnsmasq b/systemvm/patches/debian/config/etc/logrotate.d/dnsmasq similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.d/dnsmasq rename to systemvm/patches/debian/config/etc/logrotate.d/dnsmasq diff --git a/patches/systemvm/debian/config/etc/logrotate.d/haproxy b/systemvm/patches/debian/config/etc/logrotate.d/haproxy similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.d/haproxy rename to systemvm/patches/debian/config/etc/logrotate.d/haproxy diff --git a/patches/systemvm/debian/config/etc/logrotate.d/ppp b/systemvm/patches/debian/config/etc/logrotate.d/ppp similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.d/ppp rename to systemvm/patches/debian/config/etc/logrotate.d/ppp diff --git a/patches/systemvm/debian/config/etc/logrotate.d/rsyslog b/systemvm/patches/debian/config/etc/logrotate.d/rsyslog similarity index 100% rename from patches/systemvm/debian/config/etc/logrotate.d/rsyslog rename to systemvm/patches/debian/config/etc/logrotate.d/rsyslog diff --git a/patches/systemvm/debian/config/etc/modprobe.d/aesni_intel b/systemvm/patches/debian/config/etc/modprobe.d/aesni_intel similarity index 100% rename from patches/systemvm/debian/config/etc/modprobe.d/aesni_intel rename to systemvm/patches/debian/config/etc/modprobe.d/aesni_intel diff --git a/patches/systemvm/debian/config/etc/profile.d/cloud.sh b/systemvm/patches/debian/config/etc/profile.d/cloud.sh similarity index 100% rename from patches/systemvm/debian/config/etc/profile.d/cloud.sh rename to systemvm/patches/debian/config/etc/profile.d/cloud.sh diff --git a/patches/systemvm/debian/config/etc/rc.local b/systemvm/patches/debian/config/etc/rc.local similarity index 100% rename from patches/systemvm/debian/config/etc/rc.local rename to systemvm/patches/debian/config/etc/rc.local diff --git a/patches/systemvm/debian/config/etc/rsyslog.conf b/systemvm/patches/debian/config/etc/rsyslog.conf similarity index 100% rename from patches/systemvm/debian/config/etc/rsyslog.conf rename to systemvm/patches/debian/config/etc/rsyslog.conf diff --git a/patches/systemvm/debian/config/etc/ssh/sshd_config b/systemvm/patches/debian/config/etc/ssh/sshd_config similarity index 100% rename from patches/systemvm/debian/config/etc/ssh/sshd_config rename to systemvm/patches/debian/config/etc/ssh/sshd_config diff --git a/patches/systemvm/debian/config/etc/sysctl.conf b/systemvm/patches/debian/config/etc/sysctl.conf similarity index 100% rename from patches/systemvm/debian/config/etc/sysctl.conf rename to systemvm/patches/debian/config/etc/sysctl.conf diff --git a/patches/systemvm/debian/config/etc/vpcdnsmasq.conf b/systemvm/patches/debian/config/etc/vpcdnsmasq.conf similarity index 100% rename from patches/systemvm/debian/config/etc/vpcdnsmasq.conf rename to systemvm/patches/debian/config/etc/vpcdnsmasq.conf diff --git a/patches/systemvm/debian/config/opt/cloud/bin/checkbatchs2svpn.sh b/systemvm/patches/debian/config/opt/cloud/bin/checkbatchs2svpn.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/checkbatchs2svpn.sh rename to systemvm/patches/debian/config/opt/cloud/bin/checkbatchs2svpn.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/checks2svpn.sh b/systemvm/patches/debian/config/opt/cloud/bin/checks2svpn.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/checks2svpn.sh rename to systemvm/patches/debian/config/opt/cloud/bin/checks2svpn.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/cloud-nic.sh b/systemvm/patches/debian/config/opt/cloud/bin/cloud-nic.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/cloud-nic.sh rename to systemvm/patches/debian/config/opt/cloud/bin/cloud-nic.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh b/systemvm/patches/debian/config/opt/cloud/bin/get_template_version.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/get_template_version.sh rename to systemvm/patches/debian/config/opt/cloud/bin/get_template_version.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/ilb.sh b/systemvm/patches/debian/config/opt/cloud/bin/ilb.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/ilb.sh rename to systemvm/patches/debian/config/opt/cloud/bin/ilb.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh b/systemvm/patches/debian/config/opt/cloud/bin/ipassoc.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/ipassoc.sh rename to systemvm/patches/debian/config/opt/cloud/bin/ipassoc.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/ipsectunnel.sh b/systemvm/patches/debian/config/opt/cloud/bin/ipsectunnel.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/ipsectunnel.sh rename to systemvm/patches/debian/config/opt/cloud/bin/ipsectunnel.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/netusage.sh b/systemvm/patches/debian/config/opt/cloud/bin/netusage.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/netusage.sh rename to systemvm/patches/debian/config/opt/cloud/bin/netusage.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/passwd_server b/systemvm/patches/debian/config/opt/cloud/bin/passwd_server similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/passwd_server rename to systemvm/patches/debian/config/opt/cloud/bin/passwd_server diff --git a/patches/systemvm/debian/config/opt/cloud/bin/passwd_server_ip b/systemvm/patches/debian/config/opt/cloud/bin/passwd_server_ip similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/passwd_server_ip rename to systemvm/patches/debian/config/opt/cloud/bin/passwd_server_ip diff --git a/patches/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh b/systemvm/patches/debian/config/opt/cloud/bin/patchsystemvm.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/patchsystemvm.sh rename to systemvm/patches/debian/config/opt/cloud/bin/patchsystemvm.sh diff --git a/patches/systemvm/debian/config/root/savepassword.sh b/systemvm/patches/debian/config/opt/cloud/bin/savepassword.sh similarity index 89% rename from patches/systemvm/debian/config/root/savepassword.sh rename to systemvm/patches/debian/config/opt/cloud/bin/savepassword.sh index fc736039c2e..1ea27e5b702 100755 --- a/patches/systemvm/debian/config/root/savepassword.sh +++ b/systemvm/patches/debian/config/opt/cloud/bin/savepassword.sh @@ -53,6 +53,13 @@ done [ -f $PASSWD_FILE ] || touch $PASSWD_FILE sed -i /$VM_IP/d $PASSWD_FILE -echo "$VM_IP=$PASSWORD" >> $PASSWD_FILE + +ps aux | grep serve_password.sh |grep -v grep 2>&1 > /dev/null +if [ $? -eq 0 ] +then + echo "$VM_IP=$PASSWORD" >> $PASSWD_FILE +else + echo "$VM_IP=saved_password" >> $PASSWD_FILE +fi unlock_exit $? $lock $locked diff --git a/patches/systemvm/debian/config/opt/cloud/bin/serve_password.sh b/systemvm/patches/debian/config/opt/cloud/bin/serve_password.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/serve_password.sh rename to systemvm/patches/debian/config/opt/cloud/bin/serve_password.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vmdata.py b/systemvm/patches/debian/config/opt/cloud/bin/vmdata.py similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vmdata.py rename to systemvm/patches/debian/config/opt/cloud/bin/vmdata.py diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_acl.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_acl.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_acl.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_acl.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_func.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_func.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_func.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_guestnw.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_guestnw.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_guestnw.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_ipassoc.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_ipassoc.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_ipassoc.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_loadbalancer.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_loadbalancer.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_loadbalancer.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_loadbalancer.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_netusage.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_netusage.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_netusage.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_netusage.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_passwd_server b/systemvm/patches/debian/config/opt/cloud/bin/vpc_passwd_server similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_passwd_server rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_passwd_server diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_portforwarding.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_portforwarding.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_portforwarding.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_portforwarding.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_privateGateway.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_privateGateway.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_privateGateway.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_privateGateway.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_privategw_acl.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_privategw_acl.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_privategw_acl.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_privategw_acl.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_snat.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_snat.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_snat.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_snat.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_staticnat.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_staticnat.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_staticnat.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_staticnat.sh diff --git a/patches/systemvm/debian/config/opt/cloud/bin/vpc_staticroute.sh b/systemvm/patches/debian/config/opt/cloud/bin/vpc_staticroute.sh similarity index 100% rename from patches/systemvm/debian/config/opt/cloud/bin/vpc_staticroute.sh rename to systemvm/patches/debian/config/opt/cloud/bin/vpc_staticroute.sh diff --git a/patches/systemvm/debian/config/root/.ssh/authorized_keys b/systemvm/patches/debian/config/root/.ssh/authorized_keys similarity index 100% rename from patches/systemvm/debian/config/root/.ssh/authorized_keys rename to systemvm/patches/debian/config/root/.ssh/authorized_keys diff --git a/patches/systemvm/debian/config/root/bumpup_priority.sh b/systemvm/patches/debian/config/root/bumpup_priority.sh similarity index 100% rename from patches/systemvm/debian/config/root/bumpup_priority.sh rename to systemvm/patches/debian/config/root/bumpup_priority.sh diff --git a/patches/systemvm/debian/config/root/clearUsageRules.sh b/systemvm/patches/debian/config/root/clearUsageRules.sh similarity index 100% rename from patches/systemvm/debian/config/root/clearUsageRules.sh rename to systemvm/patches/debian/config/root/clearUsageRules.sh diff --git a/patches/systemvm/debian/config/root/createIpAlias.sh b/systemvm/patches/debian/config/root/createIpAlias.sh similarity index 100% rename from patches/systemvm/debian/config/root/createIpAlias.sh rename to systemvm/patches/debian/config/root/createIpAlias.sh diff --git a/patches/systemvm/debian/config/root/deleteIpAlias.sh b/systemvm/patches/debian/config/root/deleteIpAlias.sh similarity index 100% rename from patches/systemvm/debian/config/root/deleteIpAlias.sh rename to systemvm/patches/debian/config/root/deleteIpAlias.sh diff --git a/patches/systemvm/debian/config/root/dnsmasq.sh b/systemvm/patches/debian/config/root/dnsmasq.sh similarity index 100% rename from patches/systemvm/debian/config/root/dnsmasq.sh rename to systemvm/patches/debian/config/root/dnsmasq.sh diff --git a/patches/systemvm/debian/config/root/edithosts.sh b/systemvm/patches/debian/config/root/edithosts.sh similarity index 100% rename from patches/systemvm/debian/config/root/edithosts.sh rename to systemvm/patches/debian/config/root/edithosts.sh diff --git a/patches/systemvm/debian/config/root/firewall.sh b/systemvm/patches/debian/config/root/firewall.sh similarity index 100% rename from patches/systemvm/debian/config/root/firewall.sh rename to systemvm/patches/debian/config/root/firewall.sh diff --git a/patches/systemvm/debian/config/root/firewallRule_egress.sh b/systemvm/patches/debian/config/root/firewallRule_egress.sh similarity index 100% rename from patches/systemvm/debian/config/root/firewallRule_egress.sh rename to systemvm/patches/debian/config/root/firewallRule_egress.sh diff --git a/patches/systemvm/debian/config/root/firewall_rule.sh b/systemvm/patches/debian/config/root/firewall_rule.sh similarity index 100% rename from patches/systemvm/debian/config/root/firewall_rule.sh rename to systemvm/patches/debian/config/root/firewall_rule.sh diff --git a/patches/systemvm/debian/config/root/func.sh b/systemvm/patches/debian/config/root/func.sh similarity index 100% rename from patches/systemvm/debian/config/root/func.sh rename to systemvm/patches/debian/config/root/func.sh diff --git a/patches/systemvm/debian/config/root/loadbalancer.sh b/systemvm/patches/debian/config/root/loadbalancer.sh similarity index 100% rename from patches/systemvm/debian/config/root/loadbalancer.sh rename to systemvm/patches/debian/config/root/loadbalancer.sh diff --git a/patches/systemvm/debian/config/root/reconfigLB.sh b/systemvm/patches/debian/config/root/reconfigLB.sh similarity index 100% rename from patches/systemvm/debian/config/root/reconfigLB.sh rename to systemvm/patches/debian/config/root/reconfigLB.sh diff --git a/patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ b/systemvm/patches/debian/config/root/redundant_router/arping_gateways.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/arping_gateways.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/arping_gateways.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/backup.sh.templ b/systemvm/patches/debian/config/root/redundant_router/backup.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/backup.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/backup.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/check_bumpup.sh b/systemvm/patches/debian/config/root/redundant_router/check_bumpup.sh similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/check_bumpup.sh rename to systemvm/patches/debian/config/root/redundant_router/check_bumpup.sh diff --git a/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ b/systemvm/patches/debian/config/root/redundant_router/check_heartbeat.sh.templ similarity index 88% rename from patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/check_heartbeat.sh.templ index 1a390e69eea..95cabd6b067 100755 --- a/patches/systemvm/debian/config/root/redundant_router/check_heartbeat.sh.templ +++ b/systemvm/patches/debian/config/root/redundant_router/check_heartbeat.sh.templ @@ -25,10 +25,13 @@ then if [ $diff -lt 30 ] then echo Keepalived process is dead! >> [RROUTER_LOG] + [RROUTER_BIN_PATH]/services.sh stop >> [RROUTER_LOG] 2>&1 + [RROUTER_BIN_PATH]/disable_pubip.sh >> [RROUTER_LOG] 2>&1 + [RROUTER_BIN_PATH]/primary-backup.sh fault >> [RROUTER_LOG] 2>&1 service keepalived stop >> [RROUTER_LOG] 2>&1 service conntrackd stop >> [RROUTER_LOG] 2>&1 pkill -9 keepalived >> [RROUTER_LOG] 2>&1 - [RROUTER_BIN_PATH]/disable_pubip.sh >> [RROUTER_LOG] 2>&1 + pkill -9 conntrackd >> [RROUTER_LOG] 2>&1 echo Status: FAULT \(keepalived process is dead\) >> [RROUTER_LOG] exit fi diff --git a/patches/systemvm/debian/config/root/redundant_router/checkrouter.sh.templ b/systemvm/patches/debian/config/root/redundant_router/checkrouter.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/checkrouter.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/checkrouter.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/conntrackd.conf.templ b/systemvm/patches/debian/config/root/redundant_router/conntrackd.conf.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/conntrackd.conf.templ rename to systemvm/patches/debian/config/root/redundant_router/conntrackd.conf.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/disable_pubip.sh b/systemvm/patches/debian/config/root/redundant_router/disable_pubip.sh similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/disable_pubip.sh rename to systemvm/patches/debian/config/root/redundant_router/disable_pubip.sh diff --git a/patches/systemvm/debian/config/root/redundant_router/enable_pubip.sh.templ b/systemvm/patches/debian/config/root/redundant_router/enable_pubip.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/enable_pubip.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/enable_pubip.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/fault.sh.templ b/systemvm/patches/debian/config/root/redundant_router/fault.sh.templ similarity index 85% rename from patches/systemvm/debian/config/root/redundant_router/fault.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/fault.sh.templ index aecb08db247..c008a9cb449 100644 --- a/patches/systemvm/debian/config/root/redundant_router/fault.sh.templ +++ b/systemvm/patches/debian/config/root/redundant_router/fault.sh.templ @@ -27,7 +27,11 @@ fi echo To fault called >> [RROUTER_LOG] [RROUTER_BIN_PATH]/disable_pubip.sh >> [RROUTER_LOG] 2>&1 +echo Disable public ip >> [RROUTER_LOG] +[RROUTER_BIN_PATH]/services.sh stop >> [RROUTER_LOG] 2>&1 +echo Stop services $? >> [RROUTER_LOG] [RROUTER_BIN_PATH]/primary-backup.sh fault >> [RROUTER_LOG] 2>&1 +echo Switch conntrackd mode fault $? >> [RROUTER_LOG] echo Status: FAULT >> [RROUTER_LOG] releaseLockFile $lock $locked diff --git a/patches/systemvm/debian/config/root/redundant_router/heartbeat.sh.templ b/systemvm/patches/debian/config/root/redundant_router/heartbeat.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/heartbeat.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/heartbeat.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/keepalived.conf.templ b/systemvm/patches/debian/config/root/redundant_router/keepalived.conf.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/keepalived.conf.templ rename to systemvm/patches/debian/config/root/redundant_router/keepalived.conf.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/master.sh.templ b/systemvm/patches/debian/config/root/redundant_router/master.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/master.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/master.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/primary-backup.sh.templ b/systemvm/patches/debian/config/root/redundant_router/primary-backup.sh.templ similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/primary-backup.sh.templ rename to systemvm/patches/debian/config/root/redundant_router/primary-backup.sh.templ diff --git a/patches/systemvm/debian/config/root/redundant_router/services.sh b/systemvm/patches/debian/config/root/redundant_router/services.sh similarity index 100% rename from patches/systemvm/debian/config/root/redundant_router/services.sh rename to systemvm/patches/debian/config/root/redundant_router/services.sh diff --git a/patches/systemvm/debian/config/root/userdata.py b/systemvm/patches/debian/config/root/userdata.py similarity index 100% rename from patches/systemvm/debian/config/root/userdata.py rename to systemvm/patches/debian/config/root/userdata.py diff --git a/patches/systemvm/debian/config/root/userdata.sh b/systemvm/patches/debian/config/root/userdata.sh similarity index 100% rename from patches/systemvm/debian/config/root/userdata.sh rename to systemvm/patches/debian/config/root/userdata.sh diff --git a/patches/systemvm/debian/config/var/www/html/latest/.htaccess b/systemvm/patches/debian/config/var/www/html/latest/.htaccess similarity index 100% rename from patches/systemvm/debian/config/var/www/html/latest/.htaccess rename to systemvm/patches/debian/config/var/www/html/latest/.htaccess diff --git a/patches/systemvm/debian/config/var/www/html/userdata/.htaccess b/systemvm/patches/debian/config/var/www/html/userdata/.htaccess similarity index 100% rename from patches/systemvm/debian/config/var/www/html/userdata/.htaccess rename to systemvm/patches/debian/config/var/www/html/userdata/.htaccess diff --git a/patches/systemvm/debian/convert.sh b/systemvm/patches/debian/convert.sh similarity index 100% rename from patches/systemvm/debian/convert.sh rename to systemvm/patches/debian/convert.sh diff --git a/patches/systemvm/debian/qemuconvert.sh b/systemvm/patches/debian/qemuconvert.sh similarity index 100% rename from patches/systemvm/debian/qemuconvert.sh rename to systemvm/patches/debian/qemuconvert.sh diff --git a/patches/systemvm/debian/systemvm.vmx b/systemvm/patches/debian/systemvm.vmx similarity index 100% rename from patches/systemvm/debian/systemvm.vmx rename to systemvm/patches/debian/systemvm.vmx diff --git a/patches/systemvm/debian/systemvm.xml b/systemvm/patches/debian/systemvm.xml similarity index 100% rename from patches/systemvm/debian/systemvm.xml rename to systemvm/patches/debian/systemvm.xml diff --git a/patches/systemvm/debian/vhdconvert.sh b/systemvm/patches/debian/vhdconvert.sh similarity index 100% rename from patches/systemvm/debian/vhdconvert.sh rename to systemvm/patches/debian/vhdconvert.sh diff --git a/patches/systemvm/debian/vpn/etc/ipsec.conf b/systemvm/patches/debian/vpn/etc/ipsec.conf similarity index 100% rename from patches/systemvm/debian/vpn/etc/ipsec.conf rename to systemvm/patches/debian/vpn/etc/ipsec.conf diff --git a/patches/systemvm/debian/vpn/etc/ipsec.d/l2tp.conf b/systemvm/patches/debian/vpn/etc/ipsec.d/l2tp.conf similarity index 100% rename from patches/systemvm/debian/vpn/etc/ipsec.d/l2tp.conf rename to systemvm/patches/debian/vpn/etc/ipsec.d/l2tp.conf diff --git a/patches/systemvm/debian/vpn/etc/ipsec.secrets b/systemvm/patches/debian/vpn/etc/ipsec.secrets similarity index 100% rename from patches/systemvm/debian/vpn/etc/ipsec.secrets rename to systemvm/patches/debian/vpn/etc/ipsec.secrets diff --git a/patches/systemvm/debian/vpn/etc/ppp/options.xl2tpd b/systemvm/patches/debian/vpn/etc/ppp/options.xl2tpd similarity index 100% rename from patches/systemvm/debian/vpn/etc/ppp/options.xl2tpd rename to systemvm/patches/debian/vpn/etc/ppp/options.xl2tpd diff --git a/patches/systemvm/debian/vpn/etc/xl2tpd/xl2tpd.conf b/systemvm/patches/debian/vpn/etc/xl2tpd/xl2tpd.conf similarity index 100% rename from patches/systemvm/debian/vpn/etc/xl2tpd/xl2tpd.conf rename to systemvm/patches/debian/vpn/etc/xl2tpd/xl2tpd.conf diff --git a/patches/systemvm/debian/vpn/opt/cloud/bin/vpn_l2tp.sh b/systemvm/patches/debian/vpn/opt/cloud/bin/vpn_l2tp.sh similarity index 67% rename from patches/systemvm/debian/vpn/opt/cloud/bin/vpn_l2tp.sh rename to systemvm/patches/debian/vpn/opt/cloud/bin/vpn_l2tp.sh index 79661c8b0d1..86148a34624 100755 --- a/patches/systemvm/debian/vpn/opt/cloud/bin/vpn_l2tp.sh +++ b/systemvm/patches/debian/vpn/opt/cloud/bin/vpn_l2tp.sh @@ -22,8 +22,8 @@ #set -x usage() { printf "Usage:\n" - printf "Create VPN : %s -c -r -l -p -s \n" $(basename $0) - printf "Delete VPN : %s -d -s \n" $(basename $0) + printf "Create VPN : %s -c -r -l -p -s -i \n" $(basename $0) + printf "Delete VPN : %s -d -l -s -D -C < local cidr> \n" $(basename $0) printf "Add VPN User : %s -u \n" $(basename $0) printf "Remote VPN User: %s -U /dev/null + then + return + fi if sudo iptables -t mangle -N VPN_$public_ip &> /dev/null then @@ -64,17 +66,42 @@ iptables_() { sudo iptables -t mangle $op VPN_$public_ip -p esp -j ACCEPT } +start_ipsec() { + service ipsec status > /dev/null + if [ $? -ne 0 ] + then + service ipsec start > /dev/null + #Wait until ipsec started, 5 seconds at most + for i in {1..5} + do + logger -t cloud "$(basename $0): waiting ipsec start..." + service ipsec status > /dev/null + result=$? + if [ $result -eq 0 ] + then + break + fi + sleep 1 + done + fi + service ipsec status > /dev/null + return $? +} + ipsec_server() { local op=$1 - if [ "$op" == "restart" ]; then - service ipsec stop - service xl2tpd stop - service ipsec start - service xl2tpd start - return $? - fi - service ipsec $op - service xl2tpd $op + case $op in + "start") start_ipsec + sudo service xl2tpd start + ;; + "stop") sudo service xl2tpd stop + ;; + "restart") start_ipsec + sudo ipsec auto --rereadall + service xl2tpd stop + service xl2tpd start + ;; + esac } create_l2tp_ipsec_vpn_server() { @@ -135,8 +162,10 @@ create= destroy= useradd= userdel= +dev= +cidr= -while getopts 'cdl:p:r:s:u:U:' OPTION +while getopts 'cdl:p:r:s:u:U:i:C:' OPTION do case $OPTION in c) create=1 @@ -161,12 +190,26 @@ do s) sflag=1 server_ip="$OPTARG" ;; + i) dev="$OPTARG" + ;; + C) cidr="$OPTARG" + ;; ?) usage exit 2 ;; esac done +if [ "$dev" == "" ] +then + $dev="eth2" +fi + +if [ "$cidr" == "" ] +then + $cidr=$(get_intf_ip "eth0") +fi + [ "$create$destroy" == "11" ] || [ "$create$destroy$useradd$userdel" == "" ] && usage && exit 2 [ "$create" == "1" ] && [ "$lflag$pflag$rflag$sflag" != "1111" ] && usage && exit 2 diff --git a/patches/systemvm/debian/xe/xe-daemon b/systemvm/patches/debian/xe/xe-daemon similarity index 100% rename from patches/systemvm/debian/xe/xe-daemon rename to systemvm/patches/debian/xe/xe-daemon diff --git a/patches/systemvm/debian/xe/xe-linux-distribution b/systemvm/patches/debian/xe/xe-linux-distribution similarity index 100% rename from patches/systemvm/debian/xe/xe-linux-distribution rename to systemvm/patches/debian/xe/xe-linux-distribution diff --git a/patches/systemvm/debian/xe/xe-update-guest-attrs b/systemvm/patches/debian/xe/xe-update-guest-attrs similarity index 100% rename from patches/systemvm/debian/xe/xe-update-guest-attrs rename to systemvm/patches/debian/xe/xe-update-guest-attrs diff --git a/systemvm/pom.xml b/systemvm/pom.xml new file mode 100644 index 00000000000..9fe2688705e --- /dev/null +++ b/systemvm/pom.xml @@ -0,0 +1,261 @@ + + + 4.0.0 + cloud-systemvm + Apache CloudStack System VM + pom + + org.apache.cloudstack + cloudstack + 4.3.0-SNAPSHOT + ../pom.xml + + + mkisofs + + + + org.apache.cloudstack + cloud-agent + ${project.version} + + + org.apache.cloudstack + cloud-secondary-storage + ${project.version} + + + org.apache.cloudstack + cloud-console-proxy + ${project.version} + + + + + + maven-assembly-plugin + 2.3 + + systemvm + false + + systemvm-descriptor.xml + + + + + make-systemvm + package + + single + + + + + + maven-resources-plugin + 2.6 + + + copy-resources + + package + + copy-resources + + + dist + + + target + + systemvm.zip + + + + patches/debian/config/root/.ssh + + authorized_keys + + + + + + + + + maven-antrun-plugin + 1.7 + + + build-cloud-scripts + generate-sources + + run + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + package + + exec + + + + + ${mkisofs} + dist + + -quiet + -r + -o + systemvm.iso + systemvm.zip + cloud-scripts.tgz + authorized_keys + + + + + + + + + genisoimage + + + /usr/bin/genisoimage + + + + genisoimage + + + + vmware + + + noredist + + + + + org.apache.cloudstack + cloud-plugin-hypervisor-vmware + ${project.version} + + + org.apache.cloudstack + cloud-vmware-base + ${project.version} + + + + + quickcloud + + + + org.codehaus.mojo + exec-maven-plugin + 1.2.1 + + + + java + + + + + com.cloud.agent.AgentShell + + zone=1 + pod=1 + host=192.168.56.1 + guid=ConsoleProxy.1 + + + + javax.net.ssl.trustStore + certs/realhostip.keystore + log.home + ${PWD}/ + + + + + + + + + + diff --git a/services/console-proxy/server/scripts/_run.sh b/systemvm/scripts/_run.sh similarity index 100% rename from services/console-proxy/server/scripts/_run.sh rename to systemvm/scripts/_run.sh diff --git a/services/console-proxy/server/scripts/config_auth.sh b/systemvm/scripts/config_auth.sh similarity index 100% rename from services/console-proxy/server/scripts/config_auth.sh rename to systemvm/scripts/config_auth.sh diff --git a/services/console-proxy/server/scripts/config_ssl.sh b/systemvm/scripts/config_ssl.sh similarity index 100% rename from services/console-proxy/server/scripts/config_ssl.sh rename to systemvm/scripts/config_ssl.sh diff --git a/services/console-proxy/server/scripts/consoleproxy.sh b/systemvm/scripts/consoleproxy.sh similarity index 100% rename from services/console-proxy/server/scripts/consoleproxy.sh rename to systemvm/scripts/consoleproxy.sh diff --git a/services/console-proxy/server/scripts/ipfirewall.sh b/systemvm/scripts/ipfirewall.sh similarity index 100% rename from services/console-proxy/server/scripts/ipfirewall.sh rename to systemvm/scripts/ipfirewall.sh diff --git a/services/console-proxy/server/scripts/run-proxy.sh b/systemvm/scripts/run-proxy.sh similarity index 100% rename from services/console-proxy/server/scripts/run-proxy.sh rename to systemvm/scripts/run-proxy.sh diff --git a/services/console-proxy/server/scripts/run.bat b/systemvm/scripts/run.bat similarity index 100% rename from services/console-proxy/server/scripts/run.bat rename to systemvm/scripts/run.bat diff --git a/services/console-proxy/server/scripts/run.sh b/systemvm/scripts/run.sh similarity index 100% rename from services/console-proxy/server/scripts/run.sh rename to systemvm/scripts/run.sh diff --git a/services/console-proxy/server/scripts/secstorage.sh b/systemvm/scripts/secstorage.sh similarity index 100% rename from services/console-proxy/server/scripts/secstorage.sh rename to systemvm/scripts/secstorage.sh diff --git a/services/secondary-storage/scripts/ssvm-check.sh b/systemvm/scripts/ssvm-check.sh similarity index 100% rename from services/secondary-storage/scripts/ssvm-check.sh rename to systemvm/scripts/ssvm-check.sh diff --git a/services/console-proxy/server/systemvm-descriptor.xml b/systemvm/systemvm-descriptor.xml similarity index 85% rename from services/console-proxy/server/systemvm-descriptor.xml rename to systemvm/systemvm-descriptor.xml index 6c98d2d3eb0..1a943b0509e 100644 --- a/services/console-proxy/server/systemvm-descriptor.xml +++ b/systemvm/systemvm-descriptor.xml @@ -31,22 +31,7 @@ - ../../../scripts/storage/secondary/ - scripts/storage/secondary - 555 - 555 - - - ../../secondary-storage/scripts/ - - 555 - 555 - - ssvm-check.sh - - - - ../../../scripts/storage/secondary/ + ../scripts/storage/secondary/ scripts/storage/secondary 555 555 diff --git a/services/console-proxy/server/ui/viewer-bad-sid.ftl b/systemvm/ui/viewer-bad-sid.ftl similarity index 100% rename from services/console-proxy/server/ui/viewer-bad-sid.ftl rename to systemvm/ui/viewer-bad-sid.ftl diff --git a/services/console-proxy/server/ui/viewer-connect-failed.ftl b/systemvm/ui/viewer-connect-failed.ftl similarity index 100% rename from services/console-proxy/server/ui/viewer-connect-failed.ftl rename to systemvm/ui/viewer-connect-failed.ftl diff --git a/services/console-proxy/server/ui/viewer-update.ftl b/systemvm/ui/viewer-update.ftl similarity index 100% rename from services/console-proxy/server/ui/viewer-update.ftl rename to systemvm/ui/viewer-update.ftl diff --git a/services/console-proxy/server/ui/viewer.ftl b/systemvm/ui/viewer.ftl similarity index 100% rename from services/console-proxy/server/ui/viewer.ftl rename to systemvm/ui/viewer.ftl diff --git a/services/console-proxy/server/vm-script/vmops b/systemvm/vm-script/vmops similarity index 100% rename from services/console-proxy/server/vm-script/vmops rename to systemvm/vm-script/vmops diff --git a/test/integration/component/maint/test_egress_rules_host_maintenance.py b/test/integration/component/maint/test_egress_rules_host_maintenance.py new file mode 100644 index 00000000000..6f0f768d37c --- /dev/null +++ b/test/integration/component/maint/test_egress_rules_host_maintenance.py @@ -0,0 +1,290 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" P1 for Egresss & Ingress rules +""" +#Import Local Modules +import marvin +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.remoteSSHClient import remoteSSHClient +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * + +#Import System modules +import time +import subprocess + + +class Services: + """Test Security groups Services + """ + + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "virtual_machine": { + # Create a small virtual machine instance with disk offering + "displayname": "Test VM", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + "userdata": 'This is sample data', + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "security_group": { + "name": 'SSH', + "protocol": 'TCP', + "startport": 22, + "endport": 22, + "cidrlist": '0.0.0.0/0', + }, + "ostype": 'CentOS 5.3 (64-bit)', + # CentOS 5.3 (64-bit) + "sleep": 60, + "timeout": 10, + } + + +class TestEgressAfterHostMaintenance(cloudstackTestCase): + + def setUp(self): + + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + #Clean up, terminate the created templates + cleanup_resources(self.apiclient, self.cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @classmethod + def setUpClass(cls): + cls.services = Services().services + cls.api_client = super( + TestEgressAfterHostMaintenance, + cls + ).getClsTestClient().getApiClient() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.services['mode'] = cls.zone.networktype + cls.pod = get_pod( + cls.api_client, + zoneid=cls.zone.id + ) + + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["domainid"] = cls.domain.id + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = template.id + + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) + cls.services["account"] = cls.account.name + cls._cleanup = [ + cls.account, + cls.service_offering + ] + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + return + + @attr(speed = "slow") + @attr(tags = ["sg", "eip", "maintenance"]) + def test_egress_after_host_maintenance(self): + """Test maintenance case for egress + """ + + # Validate the following: + # 1. createaccount of type user + # 2. createsecuritygroup (ssh) for this account + # 3. authorizeSecurityGroupIngress to allow ssh access to the VM + # 4. authorizeSecurityGroupEgress to allow ssh access only out to + # CIDR: 0.0.0.0/0 + # 5. deployVirtualMachine into this security group (ssh) + # 6. deployed VM should be Running, ssh should be allowed into the VM + # 7. Enable maintenance mode for host, cance maintenance mode + # 8. User should be able to SSH into VM after maintainace + + security_group = SecurityGroup.create( + self.apiclient, + self.services["security_group"], + account=self.account.name, + domainid=self.account.domainid + ) + self.debug("Created security group with ID: %s" % security_group.id) + + # Default Security group should not have any ingress rule + sercurity_groups = SecurityGroup.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid + ) + self.assertEqual( + isinstance(sercurity_groups, list), + True, + "Check for list security groups response" + ) + + self.assertEqual( + len(sercurity_groups), + 2, + "Check List Security groups response" + ) + # Authorize Security group to SSH to VM + self.debug( + "Authorizing ingress rule for sec group ID: %s for ssh access" + % security_group.id) + ingress_rule = security_group.authorize( + self.apiclient, + self.services["security_group"], + account=self.account.name, + domainid=self.account.domainid + ) + + self.assertEqual( + isinstance(ingress_rule, dict), + True, + "Check ingress rule created properly" + ) + + ssh_rule = (ingress_rule["ingressrule"][0]).__dict__ + + # Authorize Security group to SSH to VM + self.debug( + "Authorizing egress rule for sec group ID: %s for ssh access" + % security_group.id) + egress_rule = security_group.authorizeEgress( + self.apiclient, + self.services["security_group"], + account=self.account.name, + domainid=self.account.domainid + ) + + self.assertEqual( + isinstance(egress_rule, dict), + True, + "Check egress rule created properly" + ) + ssh_egress_rule = (egress_rule["egressrule"][0]).__dict__ + + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + securitygroupids=[security_group.id] + ) + self.debug("Deploying VM in account: %s" % self.account.name) + + # Should be able to SSH VM + try: + self.debug("SSH into VM: %s" % self.virtual_machine.id) + ssh = self.virtual_machine.get_ssh_client() + except Exception as e: + self.fail("SSH Access failed for %s: %s" % \ + (self.virtual_machine.ipaddress, e) + ) + vms = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id, + listall=True + ) + self.assertEqual( + isinstance(vms, list), + True, + "Check list VMs response for valid host" + ) + vm = vms[0] + + self.debug("Enabling host maintenance for ID: %s" % vm.hostid) + cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() + cmd.id = vm.hostid + self.apiclient.prepareHostForMaintenance(cmd) + + self.debug("Canceling host maintenance for ID: %s" % vm.hostid) + cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() + cmd.id = vm.hostid + self.apiclient.cancelHostMaintenance(cmd) + + self.debug("Waiting for SSVMs to come up") + wait_for_ssvms( + self.apiclient, + zoneid=self.zone.id, + podid=self.pod.id, + ) + self.debug("Starting VM: %s" % self.virtual_machine.id) + + self.virtual_machine.start(self.apiclient) + # Should be able to SSH VM + try: + self.debug("SSH into VM: %s" % self.virtual_machine.id) + ssh = self.virtual_machine.get_ssh_client(reconnect=True) + except Exception as e: + self.fail("SSH Access failed for %s: %s" % \ + (self.virtual_machine.ipaddress, e) + ) + return diff --git a/test/integration/component/maint/test_host_high_availability.py b/test/integration/component/maint/test_host_high_availability.py index 5fb047ba6cb..b4c50c7114d 100644 --- a/test/integration/component/maint/test_host_high_availability.py +++ b/test/integration/component/maint/test_host_high_availability.py @@ -616,7 +616,7 @@ class TestHostHighAvailability(cloudstackTestCase): "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" ) - #put the Host in maintainance mode + #put the Host in maintenance mode self.debug("Enabling maintenance mode for host %s" % vm_with_ha_enabled.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = vm_with_ha_enabled.hostid @@ -748,7 +748,7 @@ class TestHostHighAvailability(cloudstackTestCase): "The virtual machine is not ha enabled so check if VM is created on host which is also not ha enabled" ) - #put the Host in maintainance mode + #put the Host in maintenance mode self.debug("Enabling maintenance mode for host %s" % vm_with_ha_disabled.hostid) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = vm_with_ha_disabled.hostid diff --git a/test/integration/component/maint/test_multiple_ip_ranges.py b/test/integration/component/maint/test_multiple_ip_ranges.py new file mode 100644 index 00000000000..dc8021bc91e --- /dev/null +++ b/test/integration/component/maint/test_multiple_ip_ranges.py @@ -0,0 +1,742 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Tests for Multiple IP Ranges feature +""" +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.cloudstackException import cloudstackAPIException +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +# from netaddr import * +import netaddr +from nose.plugins.attrib import attr + +class Services: + """Test Multiple IP Ranges + """ + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 200, # in MHz + "memory": 256, # In MBs + }, + "disk_offering": { + "displaytext": "Small Disk", + "name": "Small Disk", + "disksize": 1 + }, + "templates": { + "displaytext": 'Template', + "name": 'Template', + "ostype": "CentOS 5.3 (64-bit)", + "templatefilter": 'self', + }, + "vlan_ip_range": { + "startip": "10.147.43.130", + "endip": "10.147.43.135", + "netmask": "255.255.255.192", + "gateway": "10.147.43.129", + "forvirtualnetwork": "false", + "vlan": "untagged", + }, + "server_without_disk": { + "displayname": "Test VM-No Disk", + "username": "root", + "password": "password", + "hypervisor": 'XenServer', + }, + "host": { + "publicport": 22, + "username": "root", # Host creds for SSH + "password": "password", + }, + "ostype": "CentOS 5.3 (64-bit)", + "sleep": 60, + "timeout": 10, + } + +class TestMultipleIpRanges(cloudstackTestCase): + """Test Multiple IP Ranges for guest network + """ + @classmethod + def setUpClass(cls): + cls.api_client = super(TestMultipleIpRanges, cls).getClsTestClient().getApiClient() + cls.dbclient = super(TestMultipleIpRanges, cls).getClsTestClient().getDbConnection() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.pod = get_pod(cls.api_client, cls.zone.id, cls.services) + cls.services['mode'] = cls.zone.networktype + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) + cls.services["account"] = cls.account.name + cls.disk_offering = DiskOffering.create( + cls.api_client, + cls.services["disk_offering"] + ) + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["templates"]["ostypeid"] = cls.template.ostypeid + cls.services["diskoffering"] = cls.disk_offering.id + cls.dc_id = cls.dbclient.execute( + "select id from data_center where uuid = '%s';" % str(cls.services["zoneid"]) + ) + cls.dc_id = cls.dc_id[0][0] + cls.ids = cls.dbclient.execute( + "select id from user_ip_address where allocated is null and data_center_id = '%s';" % str(cls.dc_id) + ) + cls.id_list = [] + for i in range(len(cls.ids)): + cls.id_list.append(cls.ids[i][0]) + # Check if VR is already present in the setup + vr_list = Router.list(cls.api_client, listall='true') + cls.debug("vr list {}".format(vr_list)) + if isinstance(vr_list, list) and len(vr_list) > 0: + cls.debug("VR is running in the setup") + cls.vr_state = True + else: + cls.debug("VR is not present in the setup") + cls.vr_state = False + cls.id_list = cls.id_list[:-2] + for id in cls.id_list: + cls.dbclient.execute( + "update user_ip_address set allocated=now() where id = '%s';" % str(id) + ) + # Add IP range in the new CIDR + cls.services["vlan_ip_range"]["zoneid"] = cls.zone.id + cls.services["vlan_ip_range"]["podid"] = cls.pod.id + # create new vlan ip range + # Before creating ip range check the zone's network type + if cls.zone.networktype == 'Basic': + cls.new_vlan = PublicIpRange.create(cls.api_client, cls.services["vlan_ip_range"]) + else: + raise unittest.SkipTest("These tests can be run only on basic zone. So skipping the tests") + # Deploy vm in existing subnet if VR is not present + if cls.vr_state is False : + cls.vm_res = VirtualMachine.create( + cls.api_client, + cls.services["server_without_disk"], + templateid=cls.template.id, + accountid=cls.account.name, + domainid=cls.services["domainid"], + zoneid=cls.services["zoneid"], + serviceofferingid=cls.service_offering.id, + mode=cls.services["mode"], + ) + cls._cleanup = [ + cls.new_vlan, + cls.account, + ] + return + + @classmethod + def tearDownClass(cls): + try: + for id in cls.id_list: + cls.dbclient.execute( + "update user_ip_address set allocated=default where id = '%s';" % str(id) + ) + # Wait for expunge interval to cleanup VMs + wait_for_cleanup(cls.api_client, ["expunge.delay", "expunge.interval"]) + time.sleep(30) + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [ ] + # Deploy guest vm + try : + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["server_without_disk"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.services["domainid"], + zoneid=self.services["zoneid"], + serviceofferingid=self.service_offering.id, + mode=self.services["mode"], + ) + except Exception as e : + raise Exception("Warning: Exception during vm deployment: {}".format(e)) + self.vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id + ) + self.assertEqual( + isinstance(self.vm_response, list), + True, + "Check VM list response returned a valid list" + ) + self.ip_range = list(netaddr.iter_iprange(unicode(self.services["vlan_ip_range"]["startip"]), unicode(self.services["vlan_ip_range"]["endip"]))) + self.nic_ip = netaddr.IPAddress(unicode(self.vm_response[0].nic[0].ipaddress)) + self.debug("vm got {} as ip address".format(self.nic_ip)) + self.assertIn( + self.nic_ip, + self.ip_range, + "VM did not get the ip address from the new ip range" + ) + ip_alias = self.dbclient.execute( + "select ip4_address from nic_ip_alias;" + ) + self.alias_ip = str(ip_alias[0][0]) + self.debug("alias ip : %s" % self.alias_ip) + self.assertNotEqual( + self.alias_ip, + None, + "Error in creating ip alias. Please check MS logs" + ) + self.cleanup.append(self.virtual_machine) + return + + def tearDown(self): + try: + # Clean up, terminate the resources created + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def verify_vlan_range(self, vlan, services): + # compare vlan_list response with configured values + self.assertEqual( + isinstance(vlan, list), + True, + "Check list response returned a valid list" + ) + self.assertNotEqual( + len(vlan), + 0, + "check list vlan response" + ) + self.assertEqual( + str(vlan[0].startip), + str(services["startip"]), + "Start IP in vlan ip range is not matched with the configured start ip" + ) + self.assertEqual( + str(vlan[0].endip), + str(services["endip"]), + "End IP in vlan ip range is not matched with the configured end ip" + ) + self.assertEqual( + str(vlan[0].gateway), + str(services["gateway"]), + "gateway in vlan ip range is not matched with the configured gateway" + ) + self.assertEqual( + str(vlan[0].netmask), + str(services["netmask"]), + "netmask in vlan ip range is not matched with the configured netmask" + ) + return + + @attr(tags=["sg"]) + def test_01_deploy_vm_in_new_cidr(self): + """Deploy guest vm after adding guest IP range in new CIDR + 1.Deploy guest vm + 2.Verify vm gets the ip address from new cidr + """ + self.ip_range = list(netaddr.iter_iprange(unicode(self.services["vlan_ip_range"]["startip"]), unicode(self.services["vlan_ip_range"]["endip"]))) + self.nic_ip = netaddr.IPAddress(unicode(self.vm_response[0].nic[0].ipaddress)) + self.debug("vm got {} as ip address".format(self.nic_ip)) + self.assertIn( + self.nic_ip, + self.ip_range, + "VM did not get the ip address from the new ip range" + ) + return + + @attr(tags=["sg"]) + def test_02_dns_service_on_alias_ip(self): + """Deploy guest vm in new CIDR and verify dns service on alias ip + 1.Deploy guest vm in new cidr + 2.Verify dns service listens on alias ip in VR + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = self.alias_ip + ":53" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + "netstat -atnp | grep %s" % proc + ) + res = str(result) + self.debug("Dns process status on alias ip: %s" % res) + self.assertNotEqual( + res.find(proc) + - 1, + "dnsmasq service is not running on alias ip" + ) + return + + @attr(tags=["sg"]) + def test_03_passwd_service_on_alias_IP(self): + """Deploy guest vm in new CIDR and verify passwd service on alias ip + 1.Deploy guest vm in new cidr + 2.Verify password service(socat) listens on alias ip in VR + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = "socat" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + "netstat -atnp | grep %s" % proc + ) + res = str(result) + self.debug("password process status on VR: %s" % res) + self.assertNotEqual( + res.find(self.alias_ip) + - 1, + "password service is not running on alias ip" + ) + return + + @attr(tags=["sg"]) + def test_04_userdata_service_on_alias_IP(self): + """Deploy guest vm in new CIDR and verify userdata service on alias ip + 1.Deploy guest vm in new cidr + 2.Verify userdata service(apache2) listens on alias ip in VR + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = "apache2" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + "netstat -atnp | grep %s" % proc + ) + res = str(result) + self.debug("userdata process status on VR: %s" % res) + self.assertNotEqual( + res.find(self.alias_ip + ":80 ") + - 1, + "password service is not running on alias ip" + ) + return + + @attr(tags=["sg"]) + def test_05_del_cidr_verify_alias_removal(self): + """Destroy lastvm in the CIDR and verifly alias removal + 1.Deploy guest vm in new cidr + 2.Verify ip alias creation + 3.Destroy vm and wait for it to expunge + 4.Verify ip alias removal after vm expunge + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = "ip addr show eth0" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.debug("ip alias configuration on VR: %s" % res) + self.assertNotEqual( + res.find(self.alias_ip) + - 1, + "ip alias is not created on VR eth0" + ) + self.virtual_machine.delete(self.apiclient) + expunge_del = Configurations.list( + self.apiclient, + name='expunge.delay' + ) + expunge_int = Configurations.list( + self.apiclient, + name='expunge.interval' + ) + wait_time = int(expunge_del[0].value) + int(expunge_int[0].value) + int(30) + self.debug("Waiting for {} seconds for the vm to expunge".format(wait_time)) + # wait for the vm to expunge + time.sleep(wait_time) + self.debug("Verify that expunging the last vm in the CIDR should delete the ip alias from VR") + ip_alias2 = self.dbclient.execute( + "select ip4_address from nic_ip_alias;" + ) + self.assertEqual( + isinstance(ip_alias2, list), + True, + "Error in sql query" + ) + self.assertEqual( + len(ip_alias2), + 0, + "Failure in clearing ip alias entry from cloud db" + ) + proc = "ip addr show eth0" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.assertEqual( + res.find(self.alias_ip), + - 1, + "Failed to clean up ip alias from VR even after last vm expunge in the CIDR" + ) + self.debug("IP alias got deleted from VR successfully.") + self.cleanup.remove(self.virtual_machine) + return + + @attr(tags=["sg"]) + def test_06_reboot_VR_verify_ip_alias(self): + """Reboot VR and verify ip alias + 1.Deploy guest vm in new cidr + 2.Verify ip alias creation + 3.Reboot VR + 4.Verify ip alias on VR + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = "ip addr show eth0" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.debug("ip alias configuration on VR: %s" % res) + self.assertNotEqual( + res.find(self.alias_ip) + - 1, + "ip alias is not created on VR eth0" + ) + resp = Router.reboot( + self.apiclient, + router.id + ) + self.debug("Reboot router api response: %s" % resp) + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + self.assertEqual( + router.state, + 'Running', + "Router is not in running state after reboot" + ) + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.assertNotEqual( + res.find(self.alias_ip), + - 1, + "IP alias not present on VR after VR reboot" + ) + return + + @attr(tags=["sg"]) + def test_07_stop_start_VR_verify_ip_alias(self): + """Reboot VR and verify ip alias + 1.Deploy guest vm in new cidr + 2.Verify ip alias creation + 3.Stop and Start VR + 4.Verify ip alias on VR + """ + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + hosts = list_hosts( + self.apiclient, + zoneid=router.zoneid, + type='Routing', + state='Up', + id=router.hostid + ) + self.assertEqual( + isinstance(hosts, list), + True, + "Check list host returns a valid list" + ) + host = hosts[0] + self.debug("Router ID: %s, state: %s" % (router.id, router.state)) + self.assertEqual( + router.state, + 'Running', + "Check list router response for router state" + ) + proc = "ip addr show eth0" + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.debug("ip alias configuration on VR: %s" % res) + self.assertNotEqual( + res.find(self.alias_ip) + - 1, + "ip alias is not created on VR eth0" + ) + self.debug("Stopping VR") + stop_res = Router.stop( + self.apiclient, + router.id, + ) + self.debug("Starting VR") + start_res = Router.start( + self.apiclient, + router.id + ) + list_router_response = list_routers( + self.apiclient, + zoneid=self.zone.id, + listall=True + ) + self.assertEqual( + isinstance(list_router_response, list), + True, + "Check list response returns a valid list" + ) + router = list_router_response[0] + self.assertEqual( + router.state, + 'Running', + "Router is not in running state after reboot" + ) + self.debug("VR is up and Running") + result = get_process_status( + host.ipaddress, + self.services['host']["publicport"], + self.services['host']["username"], + self.services['host']["password"], + router.linklocalip, + proc + ) + res = str(result) + self.assertNotEqual( + res.find(self.alias_ip), + - 1, + "IP alias not present on VR after VR stop and start" + ) + return diff --git a/test/integration/component/memory_limits/test_domain_limits.py b/test/integration/component/memory_limits/test_domain_limits.py index 479ec0ba3e9..d87db84a470 100644 --- a/test/integration/component/memory_limits/test_domain_limits.py +++ b/test/integration/component/memory_limits/test_domain_limits.py @@ -32,7 +32,8 @@ from marvin.integration.lib.common import (get_domain, cleanup_resources, wait_for_cleanup, find_suitable_host, - get_resource_type + get_resource_type, + update_resource_count ) class Services: @@ -219,12 +220,16 @@ class TestDomainMemoryLimits(cloudstackTestCase): self.debug("Setting up account and domain hierarchy") self.setupAccounts() - users = { self.domain: self.admin, - self.child_domain: self.child_do_admin + users = { self.child_domain_1: self.child_do_admin_1, + self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin self.domain = domain + + #Resetting memory count in service offering + self.services["service_offering"]["memory"] = 5120 + self.debug("Creating an instance with service offering: %s" % self.service_offering.name) @@ -258,7 +263,7 @@ class TestDomainMemoryLimits(cloudstackTestCase): ) resource_count_after_stop = account_list[0].memorytotal - self.asserEqual(resource_count_after_stop, expected_resource_count, + self.assertEqual(resource_count_after_stop, expected_resource_count, "Resource count should be same after stopping the instance") self.debug("Creating service offering with 7 GB RAM") @@ -283,6 +288,8 @@ class TestDomainMemoryLimits(cloudstackTestCase): self.fail("Failed to change service offering of vm %s - %s" % (vm.name, e)) + update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM + account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, @@ -308,13 +315,18 @@ class TestDomainMemoryLimits(cloudstackTestCase): self.fail("Failed to change service offering of vm %s - %s" % (vm.name, e)) + update_resource_count(self.apiclient, domainid=self.domain.id, rtype=9) #RAM + account_list = Account.list(self.apiclient, id=self.account.id) self.assertIsInstance(account_list, list, "List Accounts should return a valid response" ) + resource_count_after_downgrade = account_list[0].memorytotal + self.debug(resource_count_after_downgrade) + self.assertTrue(resource_count_after_downgrade < resource_count_after_upgrade, "Resource count should be less than before, after downgrading service offering") @@ -344,10 +356,13 @@ class TestDomainMemoryLimits(cloudstackTestCase): # 2. List Resource count for the root admin Memory usage # 3. Migrate vm to another host, resource count should list properly. + #Resetting memory count in service offering + self.services["service_offering"]["memory"] = 5120 + self.debug("Setting up account and domain hierarchy") self.setupAccounts() - users = { self.domain: self.admin, - self.child_domain: self.child_do_admin + users = { self.child_domain_1: self.child_do_admin_1, + self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin @@ -400,10 +415,13 @@ class TestDomainMemoryLimits(cloudstackTestCase): # 2. List Resource count for the root admin Memory usage # 3. Delete vm, resource count should list as 0 after delete operation. + # Resetting the memory count of service offering + self.services["service_offering"]["memory"] = 5120 + self.debug("Setting up account and domain hierarchy") self.setupAccounts() - users = { self.domain: self.admin, - self.child_domain: self.child_do_admin + users = { self.child_domain_1: self.child_do_admin_1, + self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin @@ -457,10 +475,13 @@ class TestDomainMemoryLimits(cloudstackTestCase): # 3. List Resource count for the root admin Memory usage # 4. Memory usage should list properly + # Resetting the memory count of service offering + self.services["service_offering"]["memory"] = 5120 + self.debug("Setting up account and domain hierarchy") self.setupAccounts() - users = { self.domain: self.admin, - self.child_domain: self.child_do_admin + users = { self.child_domain_1: self.child_do_admin_1, + self.child_domain_2: self.child_do_admin_2 } for domain, admin in users.items(): self.account = admin @@ -496,7 +517,6 @@ class TestDomainMemoryLimits(cloudstackTestCase): vm_3.delete(self.apiclient) return - class TestMultipleChildDomainsMemory(cloudstackTestCase): @classmethod @@ -507,7 +527,7 @@ class TestMultipleChildDomainsMemory(cloudstackTestCase): # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client, cls.services) cls.zone = get_zone(cls.api_client, cls.services) - cls.services["mode"] = cls.zone.networktype + cls.services["mode"] = cls.zone.networktype cls.template = get_template( cls.api_client, @@ -548,8 +568,8 @@ class TestMultipleChildDomainsMemory(cloudstackTestCase): self.debug("Deploying an instance in account: %s" % account.name) - if api_client is None: - api_client = self.apiclient + if api_client is None: + api_client = self.apiclient try: vm = VirtualMachine.create( @@ -644,12 +664,12 @@ class TestMultipleChildDomainsMemory(cloudstackTestCase): account=self.cadmin_2.name, domainid=self.cadmin_2.domainid) - # Cleanup the resources created at end of test + # Cleanup the resources created at end of test self.cleanup.append(self.cadmin_1) self.cleanup.append(self.cadmin_2) self.cleanup.append(self.cdomain_1) - self.cleanup.append(self.cdomain_2) - self.cleanup.append(self.parentd_admin) + self.cleanup.append(self.cdomain_2) + self.cleanup.append(self.parentd_admin) self.cleanup.append(self.parent_domain) users = { @@ -687,11 +707,11 @@ class TestMultipleChildDomainsMemory(cloudstackTestCase): self.debug("Setting up account and domain hierarchy") self.setupAccounts() - api_client_cadmin_1 = self.testClient.createUserApiClient( + api_client_cadmin_1 = self.testClient.createUserApiClient( UserName=self.cadmin_1.name, DomainName=self.cadmin_1.domain) - api_client_cadmin_2 = self.testClient.createUserApiClient( + api_client_cadmin_2 = self.testClient.createUserApiClient( UserName=self.cadmin_2.name, DomainName=self.cadmin_2.domain) diff --git a/test/integration/component/test_accounts.py b/test/integration/component/test_accounts.py index 1af408e03c9..4c73c3acc84 100644 --- a/test/integration/component/test_accounts.py +++ b/test/integration/component/test_accounts.py @@ -77,13 +77,14 @@ class Services: "template": { "displaytext": "Public Template", "name": "Public template", - "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", - "hypervisor": 'XenServer', - "format": 'VHD', + "ostype": 'CentOS 5.3 (64-bit)', + "url": "", + "hypervisor": '', + "format": '', "isfeatured": True, "ispublic": True, "isextractable": True, - "ostype": 'CentOS 5.3 (64-bit)', + "templatefilter": "self" }, "natrule": { "publicport": 22, @@ -731,8 +732,7 @@ class TestTemplateHierarchy(cloudstackTestCase): cls.services = Services().services # Get Zone settings cls.zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = cls.zone.networktype - cls.services["template"]["zoneid"] = cls.zone.id + cls.services['mode'] = cls.zone.networktype # Create domains, accounts and template cls.domain_1 = Domain.create( @@ -761,11 +761,18 @@ class TestTemplateHierarchy(cloudstackTestCase): domainid=cls.domain_2.id ) + builtin_info = get_builtin_template_info(cls.api_client, cls.zone.id) + cls.services["template"]["url"] = builtin_info[0] + cls.services["template"]["hypervisor"] = builtin_info[1] + cls.services["template"]["format"] = builtin_info[2] + + # Register new template cls.template = Template.register( - cls.api_client, - cls.services["template"], - account=cls.account_1.name, - domainid=cls.domain_1.id + cls.api_client, + cls.services["template"], + zoneid=cls.zone.id, + account=cls.account_1.name, + domainid=cls.domain_1.id ) # Wait for template to download diff --git a/test/integration/component/test_blocker_bugs.py b/test/integration/component/test_blocker_bugs.py index 2cdc2707020..62800f8be03 100644 --- a/test/integration/component/test_blocker_bugs.py +++ b/test/integration/component/test_blocker_bugs.py @@ -67,14 +67,14 @@ class Services: "publicport": 22, "protocol": 'TCP', }, - "templates": { + "template": { "displaytext": 'Template from snapshot', "name": 'Template from snapshot', "ostype": 'CentOS 5.3 (64-bit)', "templatefilter": 'self', - "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", - "hypervisor": 'XenServer', - "format": 'VHD', + "url": "", + "hypervisor": '', + "format": '', "isfeatured": True, "ispublic": True, "isextractable": True, @@ -118,7 +118,7 @@ class TestTemplate(cloudstackTestCase): cls.zone = get_zone(cls.api_client, cls.services) cls.services['mode'] = cls.zone.networktype cls.services["virtual_machine"]["zoneid"] = cls.zone.id - cls.services["templates"]["zoneid"] = cls.zone.id + cls.services["template"]["zoneid"] = cls.zone.id cls.service_offering = ServiceOffering.create( cls.api_client, @@ -160,16 +160,26 @@ class TestTemplate(cloudstackTestCase): #2. Deploy VM using this template #3. Deploy VM should return password set in template. + builtin_info = get_builtin_template_info(self.apiclient, self.zone.id) + self.services["template"]["url"] = builtin_info[0] + self.services["template"]["hypervisor"] = builtin_info[1] + self.services["template"]["format"] = builtin_info[2] + self.debug("Registering a new template") + # Register new template template = Template.register( - self.apiclient, - self.services["templates"], - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid - ) - self.debug("Registering template with ID: %s" % template.id) + self.apiclient, + self.services["template"], + zoneid=self.zone.id, + account=self.account.name, + domainid=self.account.domainid + ) + self.debug( + "Registered a template of format: %s with ID: %s" % ( + self.services["template"]["format"], + template.id + )) try: # Wait for template to download template.download(self.apiclient) @@ -185,7 +195,7 @@ class TestTemplate(cloudstackTestCase): list_template_response = Template.list( self.apiclient, templatefilter=\ - self.services["templates"]["templatefilter"], + self.services["template"]["templatefilter"], id=template.id, zoneid=self.zone.id ) @@ -796,7 +806,7 @@ class TestTemplates(cloudstackTestCase): #Create template from volume template = Template.create( self.apiclient, - self.services["templates"], + self.services["template"], self.volume.id, account=self.account.name, domainid=self.account.domainid @@ -856,7 +866,7 @@ class TestTemplates(cloudstackTestCase): template = Template.create_from_snapshot( self.apiclient, snapshot, - self.services["templates"] + self.services["template"] ) self.cleanup.append(template) @@ -864,7 +874,7 @@ class TestTemplates(cloudstackTestCase): templates = Template.list( self.apiclient, templatefilter=\ - self.services["templates"]["templatefilter"], + self.services["template"]["templatefilter"], id=template.id ) self.assertEqual( @@ -938,14 +948,14 @@ class TestTemplates(cloudstackTestCase): template = Template.create_from_snapshot( self.apiclient, snapshot, - self.services["templates"], + self.services["template"], random_name=False ) self.debug("Created template from snapshot: %s" % template.id) templates = Template.list( self.apiclient, templatefilter=\ - self.services["templates"]["templatefilter"], + self.services["template"]["templatefilter"], id=template.id ) self.assertEqual( @@ -977,14 +987,14 @@ class TestTemplates(cloudstackTestCase): template = Template.create_from_snapshot( self.apiclient, snapshot, - self.services["templates"], + self.services["template"], random_name=False ) templates = Template.list( self.apiclient, templatefilter=\ - self.services["templates"]["templatefilter"], + self.services["template"]["templatefilter"], id=template.id ) self.assertEqual( @@ -1000,7 +1010,7 @@ class TestTemplates(cloudstackTestCase): self.assertEqual( templates[0].name, - self.services["templates"]["name"], + self.services["template"]["name"], "Check the name of the template" ) return diff --git a/test/integration/component/test_egress_fw_rules.py b/test/integration/component/test_egress_fw_rules.py index 5c18f9c10a2..253cc46cab9 100644 --- a/test/integration/component/test_egress_fw_rules.py +++ b/test/integration/component/test_egress_fw_rules.py @@ -42,16 +42,6 @@ from marvin.cloudstackAPI.deleteEgressFirewallRule import deleteEgressFirewallRu from marvin.remoteSSHClient import remoteSSHClient import time -def log_test_exceptions(func): - def test_wrap_exception_log(self, *args, **kwargs): - try: - func(self, *args, **kwargs) - except Exception as e: - self.debug('Test %s Failed due to Exception=%s' % (func, e)) - raise e - test_wrap_exception_log.__doc__ = func.__doc__ - return test_wrap_exception_log - class Services: """Test service data: Egress Firewall rules Tests for Advance Zone. """ @@ -117,6 +107,7 @@ class Services: "serviceCapabilityList": { "SourceNat": { "SupportedSourceNatTypes": "peraccount", + "RedundantRouter": "true" } }, }, @@ -186,7 +177,7 @@ class TestEgressFWRules(cloudstackTestCase): if RR: self.debug("Redundant Router Enabled") - self.services["network_offering"]["serviceCapabilityList"]["RedundantRouter"] = "true" + self.services["network_offering"]["serviceCapabilityList"]["SourceNat"]["RedundantRouter"] = "true" self.network_offering = NetworkOffering.create(self.apiclient, self.services["network_offering"], @@ -288,16 +279,31 @@ class TestEgressFWRules(cloudstackTestCase): ssh.execute('chmod +x %s' % script_file) self.debug("%s %s" % (script_file, exec_cmd_params)) - self.debug('sleep %s seconds for egress rule to affect on Router.' % self.services['sleep']) - time.sleep(self.services['sleep']) - - result = ssh.execute("%s %s" % (script_file, exec_cmd_params)) - self.debug('Result is=%s' % result) - exec_success = False - if str(result).strip() == expected_result: - self.debug('script executed successfully exec_success=True') - exec_success = True + #Timeout set to 3 minutes + timeout = 180 + while timeout: + self.debug('sleep %s seconds for egress rule to affect on Router.' % self.services['sleep']) + time.sleep(self.services['sleep']) + result = ssh.execute("%s %s" % (script_file, exec_cmd_params)) + self.debug('Result is=%s' % result) + self.debug('Expected result is=%s' % expected_result) + + if str(result).strip() == expected_result: + exec_success = True + break + else: + if result == []: + self.fail("Router is not accessible") + # This means router network did not come up as yet loop back. + if "send" in result[0]: + timeout -= self.services['sleep'] + else: # Failed due to some other error + break + #end while + + if timeout == 0: + self.fail("Router network failed to come up after 3 minutes.") ssh.execute('rm -rf %s' % script_file) @@ -371,18 +377,17 @@ class TestEgressFWRules(cloudstackTestCase): self.virtual_machine.delete(self.apiclient) wait_for_cleanup(self.apiclient, ["expunge.interval", "expunge.delay"]) self.debug("Sleep for VM cleanup to complete.") - time.sleep(self.services['sleep']) + #time.sleep(self.services['sleep']) self.network.delete(self.apiclient) - wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) self.debug("Sleep for Network cleanup to complete.") - time.sleep(self.services['sleep']) + wait_for_cleanup(self.apiclient, ["network.gc.wait", "network.gc.interval"]) + #time.sleep(self.services['sleep']) cleanup_resources(self.apiclient, reversed(self.cleanup)) self.debug("Cleanup complete!") except Exception as e: self.debug("Warning! Exception in tearDown: %s" % e) @attr(tags = ["advanced"]) - @log_test_exceptions def test_01_egress_fr1(self): """Test By-default the communication from guest n/w to public n/w is allowed. """ @@ -398,7 +403,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_01_1_egress_fr1(self): """Test By-default the communication from guest n/w to public n/w is NOT allowed. """ @@ -415,7 +419,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_02_egress_fr2(self): """Test Allow Communication using Egress rule with CIDR + Port Range + Protocol. """ @@ -433,7 +436,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_02_1_egress_fr2(self): """Test Allow Communication using Egress rule with CIDR + Port Range + Protocol. """ @@ -450,9 +452,7 @@ class TestEgressFWRules(cloudstackTestCase): "['0']", negative_test=False) - @attr(tags = ["advanced"]) - @log_test_exceptions def test_03_egress_fr3(self): """Test Communication blocked with network that is other than specified """ @@ -474,7 +474,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_03_1_egress_fr3(self): """Test Communication blocked with network that is other than specified """ @@ -483,7 +482,7 @@ class TestEgressFWRules(cloudstackTestCase): # 3. create egress rule with specific CIDR + port range. # 4. login to VM. # 5. Try to reach to public network with other protocol/port range - self.create_vm() + self.create_vm(egress_policy=False) self.exec_script_on_user_vm('ping -c 1 www.google.com', "| grep -oP \'\d+(?=% packet loss)\'", "['100']", @@ -495,9 +494,7 @@ class TestEgressFWRules(cloudstackTestCase): "['failed:']", negative_test=False) - @attr(tags = ["advanced"]) - @log_test_exceptions def test_04_egress_fr4(self): """Test Create Egress rule and check the Firewall_Rules DB table """ @@ -517,10 +514,10 @@ class TestEgressFWRules(cloudstackTestCase): "Check DB Query result set") self.assertEqual(qresultset[0][0], "Firewall", - "DB results not matching") + "DB results not matching, expected: Firewall found: %s " % qresultset[0][0]) self.assertEqual(qresultset[0][1], "Egress", - "DB results not matching") + "DB results not matching, expected: Egress, found: %s" % qresultset[0][1]) qresultset = self.dbclient.execute("select egress_default_policy from network_offerings where name='%s';" % self.network_offering.name) self.assertEqual(isinstance(qresultset, list), True, @@ -530,12 +527,11 @@ class TestEgressFWRules(cloudstackTestCase): 0, "Check DB Query result set") self.assertEqual(qresultset[0][0], - "1", - "DB results not matching") + 1, + "DB results not matching, expected: 1, found: %s" % qresultset[0][0]) @attr(tags = ["advanced"]) - @log_test_exceptions def test_04_1_egress_fr4(self): """Test Create Egress rule and check the Firewall_Rules DB table """ @@ -555,10 +551,10 @@ class TestEgressFWRules(cloudstackTestCase): "Check DB Query result set") self.assertEqual(qresultset[0][0], "Firewall", - "DB results not matching") + "DB results not matching, expected: Firewall found: %s " % qresultset[0][0]) self.assertEqual(qresultset[0][1], "Egress", - "DB results not matching") + "DB results not matching, expected: Egress, found: %s" % qresultset[0][1]) qresultset = self.dbclient.execute("select egress_default_policy from network_offerings where name='%s';" % self.network_offering.name) self.assertEqual(isinstance(qresultset, list), True, @@ -568,13 +564,11 @@ class TestEgressFWRules(cloudstackTestCase): 0, "Check DB Query result set") self.assertEqual(qresultset[0][0], - "0", - "DB results not matching") - - + 0, + "DB results not matching, expected: 0, found: %s" % qresultset[0][0]) + @unittest.skip("Skip") @attr(tags = ["advanced"]) - @log_test_exceptions def test_05_egress_fr5(self): """Test Create Egress rule and check the IP tables """ @@ -592,8 +586,8 @@ class TestEgressFWRules(cloudstackTestCase): #TODO: Query VR for expected route rules. + @unittest.skip("Skip") @attr(tags = ["advanced"]) - @log_test_exceptions def test_05_1_egress_fr5(self): """Test Create Egress rule and check the IP tables """ @@ -612,7 +606,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_06_egress_fr6(self): """Test Create Egress rule without CIDR """ @@ -629,7 +622,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_06_1_egress_fr6(self): """Test Create Egress rule without CIDR """ @@ -645,10 +637,7 @@ class TestEgressFWRules(cloudstackTestCase): "['0']", negative_test=False) - - @attr(tags = ["advanced"]) - @log_test_exceptions def test_07_egress_fr7(self): """Test Create Egress rule without End Port """ @@ -665,7 +654,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_07_1_egress_fr7(self): """Test Create Egress rule without End Port """ @@ -681,9 +669,8 @@ class TestEgressFWRules(cloudstackTestCase): "['failed:']", negative_test=False) - + @unittest.skip("Skip") @attr(tags = ["advanced"]) - @log_test_exceptions def test_08_egress_fr8(self): """Test Port Forwarding and Egress Conflict """ @@ -694,8 +681,8 @@ class TestEgressFWRules(cloudstackTestCase): self.create_vm(pfrule=True) self.createEgressRule() + @unittest.skip("Skip") @attr(tags = ["advanced"]) - @log_test_exceptions def test_08_1_egress_fr8(self): """Test Port Forwarding and Egress Conflict """ @@ -708,7 +695,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_09_egress_fr9(self): """Test Delete Egress rule """ @@ -733,7 +719,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_09_1_egress_fr9(self): """Test Delete Egress rule """ @@ -759,7 +744,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_10_egress_fr10(self): """Test Invalid CIDR and Invalid Port ranges """ @@ -771,7 +755,6 @@ class TestEgressFWRules(cloudstackTestCase): self.assertRaises(Exception, self.createEgressRule, '10.2.2.0/24') @attr(tags = ["advanced"]) - @log_test_exceptions def test_10_1_egress_fr10(self): """Test Invalid CIDR and Invalid Port ranges """ @@ -784,7 +767,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_11_egress_fr11(self): """Test Regression on Firewall + PF + LB + SNAT """ @@ -795,7 +777,6 @@ class TestEgressFWRules(cloudstackTestCase): self.create_vm(pfrule=True) @attr(tags = ["advanced"]) - @log_test_exceptions def test_11_1_egress_fr11(self): """Test Regression on Firewall + PF + LB + SNAT """ @@ -807,7 +788,6 @@ class TestEgressFWRules(cloudstackTestCase): @attr(tags = ["advanced"]) - @log_test_exceptions def test_12_egress_fr12(self): """Test Reboot Router """ @@ -825,7 +805,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_12_1_egress_fr12(self): """Test Reboot Router """ @@ -842,9 +821,7 @@ class TestEgressFWRules(cloudstackTestCase): "['0']", negative_test=False) - @attr(tags = ["advanced"]) - @log_test_exceptions def test_13_egress_fr13(self): """Test Redundant Router : Master failover """ @@ -900,7 +877,6 @@ class TestEgressFWRules(cloudstackTestCase): negative_test=False) @attr(tags = ["advanced"]) - @log_test_exceptions def test_13_1_egress_fr13(self): """Test Redundant Router : Master failover """ diff --git a/test/integration/component/test_egress_rules.py b/test/integration/component/test_egress_rules.py index 10e0d0356a5..f8e8e790248 100644 --- a/test/integration/component/test_egress_rules.py +++ b/test/integration/component/test_egress_rules.py @@ -2149,211 +2149,4 @@ class TestInvalidParametersForEgress(cloudstackTestCase): return -class TestEgressAfterHostMaintainance(cloudstackTestCase): - def setUp(self): - - self.apiclient = self.testClient.getApiClient() - self.dbclient = self.testClient.getDbConnection() - self.cleanup = [] - return - - def tearDown(self): - try: - #Clean up, terminate the created templates - cleanup_resources(self.apiclient, self.cleanup) - - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - return - - @classmethod - def setUpClass(cls): - cls.services = Services().services - cls.api_client = super( - TestEgressAfterHostMaintainance, - cls - ).getClsTestClient().getApiClient() - - # Get Zone, Domain and templates - cls.domain = get_domain(cls.api_client, cls.services) - cls.zone = get_zone(cls.api_client, cls.services) - cls.services['mode'] = cls.zone.networktype - cls.pod = get_pod( - cls.api_client, - zoneid=cls.zone.id - ) - - template = get_template( - cls.api_client, - cls.zone.id, - cls.services["ostype"] - ) - cls.services["domainid"] = cls.domain.id - cls.services["virtual_machine"]["zoneid"] = cls.zone.id - cls.services["virtual_machine"]["template"] = template.id - - cls.service_offering = ServiceOffering.create( - cls.api_client, - cls.services["service_offering"] - ) - cls.account = Account.create( - cls.api_client, - cls.services["account"], - domainid=cls.domain.id - ) - cls.services["account"] = cls.account.name - cls._cleanup = [ - cls.account, - cls.service_offering - ] - return - - @classmethod - def tearDownClass(cls): - try: - #Cleanup resources used - cleanup_resources(cls.api_client, cls._cleanup) - - except Exception as e: - raise Exception("Warning: Exception during cleanup : %s" % e) - - return - - @attr(speed = "slow") - @attr(tags = ["sg", "eip", "maintenance"]) - def test_egress_after_host_maintainance(self): - """Test maintenance case for egress - """ - - # Validate the following: - # 1. createaccount of type user - # 2. createsecuritygroup (ssh) for this account - # 3. authorizeSecurityGroupIngress to allow ssh access to the VM - # 4. authorizeSecurityGroupEgress to allow ssh access only out to - # CIDR: 0.0.0.0/0 - # 5. deployVirtualMachine into this security group (ssh) - # 6. deployed VM should be Running, ssh should be allowed into the VM - # 7. Enable maintainance mode for host, cance maintainance mode - # 8. User should be able to SSH into VM after maintainace - - security_group = SecurityGroup.create( - self.apiclient, - self.services["security_group"], - account=self.account.name, - domainid=self.account.domainid - ) - self.debug("Created security group with ID: %s" % security_group.id) - - # Default Security group should not have any ingress rule - sercurity_groups = SecurityGroup.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid - ) - self.assertEqual( - isinstance(sercurity_groups, list), - True, - "Check for list security groups response" - ) - - self.assertEqual( - len(sercurity_groups), - 2, - "Check List Security groups response" - ) - # Authorize Security group to SSH to VM - self.debug( - "Authorizing ingress rule for sec group ID: %s for ssh access" - % security_group.id) - ingress_rule = security_group.authorize( - self.apiclient, - self.services["security_group"], - account=self.account.name, - domainid=self.account.domainid - ) - - self.assertEqual( - isinstance(ingress_rule, dict), - True, - "Check ingress rule created properly" - ) - - ssh_rule = (ingress_rule["ingressrule"][0]).__dict__ - - # Authorize Security group to SSH to VM - self.debug( - "Authorizing egress rule for sec group ID: %s for ssh access" - % security_group.id) - egress_rule = security_group.authorizeEgress( - self.apiclient, - self.services["security_group"], - account=self.account.name, - domainid=self.account.domainid - ) - - self.assertEqual( - isinstance(egress_rule, dict), - True, - "Check egress rule created properly" - ) - ssh_egress_rule = (egress_rule["egressrule"][0]).__dict__ - - self.virtual_machine = VirtualMachine.create( - self.apiclient, - self.services["virtual_machine"], - accountid=self.account.name, - domainid=self.account.domainid, - serviceofferingid=self.service_offering.id, - securitygroupids=[security_group.id] - ) - self.debug("Deploying VM in account: %s" % self.account.name) - - # Should be able to SSH VM - try: - self.debug("SSH into VM: %s" % self.virtual_machine.id) - ssh = self.virtual_machine.get_ssh_client() - except Exception as e: - self.fail("SSH Access failed for %s: %s" % \ - (self.virtual_machine.ipaddress, e) - ) - vms = VirtualMachine.list( - self.apiclient, - id=self.virtual_machine.id, - listall=True - ) - self.assertEqual( - isinstance(vms, list), - True, - "Check list VMs response for valid host" - ) - vm = vms[0] - - self.debug("Enabling host maintainance for ID: %s" % vm.hostid) - cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() - cmd.id = vm.hostid - self.apiclient.prepareHostForMaintenance(cmd) - - self.debug("Canceling host maintainance for ID: %s" % vm.hostid) - cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() - cmd.id = vm.hostid - self.apiclient.cancelHostMaintenance(cmd) - - self.debug("Waiting for SSVMs to come up") - wait_for_ssvms( - self.apiclient, - zoneid=self.zone.id, - podid=self.pod.id, - ) - self.debug("Starting VM: %s" % self.virtual_machine.id) - - self.virtual_machine.start(self.apiclient) - # Should be able to SSH VM - try: - self.debug("SSH into VM: %s" % self.virtual_machine.id) - ssh = self.virtual_machine.get_ssh_client(reconnect=True) - except Exception as e: - self.fail("SSH Access failed for %s: %s" % \ - (self.virtual_machine.ipaddress, e) - ) - return diff --git a/test/integration/component/test_haproxy.py b/test/integration/component/test_haproxy.py new file mode 100644 index 00000000000..799cfa3e925 --- /dev/null +++ b/test/integration/component/test_haproxy.py @@ -0,0 +1,874 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" P1 tests for VPN users +""" +# Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.integration.lib.base import ( + Account, + ServiceOffering, + VirtualMachine, + PublicIPAddress, + Network, + LoadBalancerRule, + Alert, + Router, + Vpn, + NATRule + ) +from marvin.integration.lib.common import (get_domain, + get_zone, + get_template, + cleanup_resources, + random_gen + ) +from marvin.cloudstackAPI import createLBStickinessPolicy +from marvin.remoteSSHClient import remoteSSHClient + + +class Services: + """Test VPN users Services + """ + + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + "disk_offering": { + "displaytext": "Small Disk Offering", + "name": "Small Disk Offering", + "disksize": 1 + }, + "virtual_machine": { + "displayname": "TestVM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "vpn_user": { + "username": "test", + "password": "test", + }, + "natrule": { + "privateport": 22, + "publicport": 22, + "protocol": "TCP", + "username":"root", + "password": "password" + }, + "network": { + "name": "Test Network", + "displaytext": "Test Network", + "netmask": '255.255.255.0' + }, + "lbrule": { + "name": "SSH", + "alg": "roundrobin", + # Algorithm used for load balancing + "privateport": 22, + "publicport": 2222, + "openfirewall": False, + "startport": 22, + "endport": 2222, + "protocol": "TCP", + "cidrlist": '0.0.0.0/0', + }, + "ostype": 'CentOS 5.3 (64-bit)', + "sleep": 60, + "timeout": 10, + "mode": 'advanced', + # Networking mode: Advanced, Basic + } + + +class TestHAProxyStickyness(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestHAProxyStickyness, + cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + + cls._cleanup = [cls.service_offering, ] + return + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.account = Account.create( + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id + ) + + self.virtual_machine_2 = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id + ) + self.public_ip = PublicIPAddress.create( + self.apiclient, + self.virtual_machine.account, + self.virtual_machine.zoneid, + self.virtual_machine.domainid, + self.services["virtual_machine"] + ) + + NATRule.create( + self.apiclient, + self.virtual_machine, + self.services["natrule"], + ipaddressid=self.public_ip.ipaddress.id + ) + + self.cleanup = [self.account, ] + return + + def tearDown(self): + try: + # Clean up, terminate the created instance, volumes and snapshots + cleanup_resources(self.apiclient, self.cleanup) + pass + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def get_Network(self, account): + """Returns a network for account""" + + networks = Network.list( + self.apiclient, + account=account.name, + domainid=account.domainid, + listall=True + ) + self.assertIsInstance(networks, + list, + "List networks should return a valid response") + return networks[0] + + def create_LB_Rule(self, public_ip, network, vmarray, services=None): + """Create and validate the load balancing rule""" + + self.debug("Creating LB rule for IP address: %s" % + public_ip.ipaddress.ipaddress) + objservices = None + if services: + objservices = services + else: + objservices = self.services["lbrule"] + + lb_rule = LoadBalancerRule.create( + self.apiclient, + objservices, + ipaddressid=public_ip.ipaddress.id, + accountid=self.account.name, + networkid=network.id, + domainid=self.account.domainid + ) + self.debug("Adding virtual machines %s to LB rule" % str(vmarray)) + lb_rule.assign(self.apiclient, vmarray) + return lb_rule + + def configure_Stickiness_Policy(self, lb_rule, method, paramDict=None): + """Configure the stickiness policy on lb rule""" + try: + result = lb_rule.createSticky( + self.apiclient, + methodname=method, + name="-".join([method, random_gen()]), + param=paramDict + ) + self.debug("Response: %s" % result) + return result + except Exception as e: + self.fail("Configure sticky policy failed with exception: %s" % e) + + def validate_Stickiness_Policy(self, lb_rule, method, publicip): + """Validates the stickiness policy""" + + sticky_policies = lb_rule.listStickyPolicies(self.apiclient, + lbruleid=lb_rule.id, + listall=True) + self.assertIsInstance(sticky_policies, + list, + "List sticky policies should return a valid list") + sticky_policy = sticky_policies[0] + + self.debug("Stickiness policy method: %s" % + sticky_policy.stickinesspolicy[0].methodname) + self.assertEqual(sticky_policy.stickinesspolicy[0].methodname, + method, + "Stickiness policy should have method as - %s" % method) + + hostnames = [] + + hostnames = self.try_ssh(publicip, hostnames) + hostnames = self.try_ssh(publicip, hostnames) + + self.debug("hostnames: %s" % hostnames) + self.debug("set(hostnames): %s" % set(hostnames)) + + #For each ssh, host should be the same, else stickiness policy is not working properly + if len(hostnames) == len(set(hostnames)): + raise Exception("Stickyness policy: %s not working properly, got hostnames %s" + % (method, hostnames)) + return + + def delete_Stickiness_policy(self, policy, lb_rule): + """Deletes the stickiness policy""" + + try: + lb_rule.deleteSticky(self.apiclient, id=policy.id) + except Exception as e: + self.fail("Failed to delete the stickiness policy: %s" % e) + + sticky_policies = lb_rule.listStickyPolicies(self.apiclient, + lbruleid=lb_rule.id, + listall=True) + self.assertIsInstance(sticky_policies, list, + "List stickiness policies shall return a valid response") + + policy = sticky_policies[0] + + self.assertEqual(len(policy.stickinesspolicy), + 0, + "List stickiness policy should return nothing") + return + + def check_stickiness_supported_methods(self, supportedMethods, value): + + for i, dic in enumerate(supportedMethods): + if dic["methodname"] == value: + return True + return False + + def acquire_Public_Ip(self): + """Acquires the public IP""" + + try: + self.debug("Acquiring public IP for account: %s" % + self.account.name) + public_ip = PublicIPAddress.create( + self.apiclient, + self.virtual_machine.account, + self.virtual_machine.zoneid, + self.virtual_machine.domainid, + self.services["virtual_machine"] + ) + self.debug("Acquired public IP: %s" % + public_ip.ipaddress.ipaddress) + + self.debug("Configuring NAT rule for the acquired public ip") + + NATRule.create( + self.apiclient, + self.virtual_machine, + self.services["natrule"], + ipaddressid=public_ip.ipaddress.id + ) + + return public_ip + except Exception as e: + self.fail("Failed to acquire new public IP: %s" % e) + + def get_router(self, account): + """Returns a default router for account""" + + routers = Router.list(self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True) + self.assertIsInstance(routers, list, + "List routers should return a valid repsonse") + return routers[0] + + def create_VPN(self, public_ip): + """Creates VPN for the network""" + + self.debug("Creating VPN with public IP: %s" % public_ip.ipaddress.id) + try: + # Assign VPN to Public IP + vpn = Vpn.create(self.apiclient, + self.public_ip.ipaddress.id, + account=self.account.name, + domainid=self.account.domainid) + + self.debug("Verifying the remote VPN access") + vpns = Vpn.list(self.apiclient, + publicipid=public_ip.ipaddress.id, + listall=True) + self.assertEqual( + isinstance(vpns, list), + True, + "List VPNs shall return a valid response" + ) + return vpn + except Exception as e: + self.fail("Failed to create remote VPN access: %s" % e) + + def try_ssh(self, ip_addr, hostnames): + try: + self.debug( + "SSH into NAT Rule (Public IP: %s)" % ip_addr) + + # If Round Robin Algorithm is chosen, + # each ssh command should alternate between VMs + + ssh_1 = remoteSSHClient( + ip_addr, + 22, + self.services["natrule"]["username"], + self.services["natrule"]["password"] + ) + hostnames.append(ssh_1.execute("hostname")[0]) + self.debug(hostnames) + except Exception as e: + self.fail("%s: SSH failed for VM with IP Address: %s" % + (e, ip_addr)) + return hostnames + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_01_create_sticky_policy_default_values(self): + """Test Configure stickiness policies with default values""" + + # Validate the following + # 1. Create a LB rule with round robin. listLoadBalancerRules should + # show newly created load balancer rule. + # 2. Configure the Source based, app cookie and lb cookie based policy + # listLBStickinessPolicies should show newly created stickiness + + self.debug("Creating a load balancing rule on IP: %s" % + self.public_ip.ipaddress.ipaddress) + + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + methods = ["SourceBased", "AppCookie", "LBCookie"] + for method in methods: + self.debug("Creating stickiness policy for the LB rule: %s" % + lb_rule.id) + policies = self.configure_Stickiness_Policy(lb_rule, method=method) + + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, method, self.public_ip.ipaddress.ipaddress) + self.debug("Deleting the stickiness policy for lb rule: %s" % + lb_rule.name) + self.delete_Stickiness_policy(policy, lb_rule) + return + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_02_create_sticky_policy_custom_values(self): + """Test Configure stickiness policies with custom values""" + + # Validate the following + # 1. Create a LB rule with roundrobin, leastconn and source. + # listLoadBalancerRules should show newly created load balancer rule + # 2. Configure the Source based, app cookie and lb cookie based policy + # with custom parameters + # listLBStickinessPolicies should show newly created stickiness + + lb_methods = ["roundrobin", "leastconn", "source"] + + configs = {"SourceBased": {"tablesize": '100k'}, + "AppCookie": {"request-learn": "true"}, + "LBCookie": {"nocache": "true"}} + + for lb_method in lb_methods: + self.debug("Creating a load balancing rule on IP %s and algo %s" % + (self.public_ip.ipaddress.ipaddress, lb_method)) + + services = self.services["lbrule"] + services["alg"] = lb_method + + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2], + services=services) + + for method, params in configs.items(): + self.debug("Creating stickiness policy for the LB rule: %s" % + lb_rule.id) + policies = self.configure_Stickiness_Policy(lb_rule, + method=method, + paramDict=params) + + policy = policies.stickinesspolicy[0] + self.debug("Policy: %s" % str(policy)) + + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, method, self.public_ip.ipaddress.ipaddress) + self.debug("Deleting the stickiness policy for lb rule: %s" % + lb_rule.name) + self.delete_Stickiness_policy(policy, lb_rule) + self.debug("Deleting the LB rule: %s" % lb_rule.name) + lb_rule.delete(self.apiclient) + return + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_03_supported_policies_by_network(self): + """Test listnetworks response to check supported stickiness policies""" + + # Validate the following + # 1. List networks for the account in advance network mode + # 2. List of supported sticky methods should be present under + # SupportedStickinessMethods tag + + self.debug("List networks for account: %s" % self.account.name) + networks = Network.list(self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True) + + self.assertIsInstance(networks, + list, + "List network should return a valid response") + network = networks[0] + self.debug("Network: %s" % network) + self.assertEqual(hasattr(network, "SupportedStickinessMethods"), + True, + "Network should have SupportedStickinessMethods param") + + self.assertEqual(hasattr(network, "LbCookie"), + True, + "Network should have LbCookie LB method param") + + self.assertEqual(hasattr(network, "AppCookie"), + True, + "Network should have AppCookie LB method param") + + self.assertEqual(hasattr(network, "SourceBased"), + True, + "Network should have SourceBased LB method param") + + return + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_04_delete_lb_rule(self): + """Test LB rule before/after stickiness policy creation""" + + # Validate the following + # 1. Create a LB rule with roundrobin, leastconn and source. + # listLoadBalancerRules should show newly created load balancer rule + # 2. Delete the loadbalancer rule. Delete loadbalancer rule should be + # successful + # 3. Configure the Source based, app cookie and lb cookie based policy + # with custom parameters listLBStickinessPolicies should show newly + # created stickiness + # 4. Delete load balancer rule. Delete should be successful + + lb_methods = ["roundrobin", "leastconn", "source"] + + configs = {"SourceBased": {"tablesize": '100k'}, + "AppCookie": {"request-learn": "true"}, + "LBCookie": {"nocache": "true"}} + for lb_method in lb_methods: + for method, params in configs.items(): + self.debug("Creating load balancing rule on IP %s & algo %s" % + (self.public_ip.ipaddress.ipaddress, lb_method)) + + services = self.services["lbrule"] + services["alg"] = lb_method + + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2], + services=services) + self.debug( + "Deleting the LB rule before stickiness policy creation") + lb_rule.delete(self.apiclient) + + with self.assertRaises(Exception): + LoadBalancerRule.list(self.apiclient, + id=lb_rule.id, + listall=True) + + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2], + services=services) + self.debug("Creating stickiness policy for the LB rule: %s" % + lb_rule.id) + policies = self.configure_Stickiness_Policy(lb_rule, + method=method, + paramDict=params) + + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, method, self.public_ip.ipaddress.ipaddress) + + self.debug("Deleting the LB rule: %s" % lb_rule.name) + lb_rule.delete(self.apiclient) + with self.assertRaises(Exception): + LoadBalancerRule.list(self.apiclient, id=lb_rule.id) + return + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_05_error_alerts_after_create(self): + """Test error/alerts after creating stickiness policy""" + + # Validate the following + # 1. Create a LB rule with round Robin/Least connections/Source + # listLoadBalancerRules should show newly created load balancer rule + # 2. Configure the Stickiness policy to above created LB rule. + # listLBStickinessPolicies Api should show newly created stickiness + # 3. update & delete stickiness policy see error related to stickiness + # 4. No errors should be shown in the logs and alerts + + lb_methods = ["roundrobin", "leastconn", "source"] + configs = {"SourceBased": {"tablesize": '100k'}, + "AppCookie": {"request-learn": "true"}, + "LBCookie": {"nocache": "true"}} + for lb_method in lb_methods: + for method, params in configs.items(): + self.debug("Creating load balancing rule on IP %s & algo %s" % + (self.public_ip.ipaddress.ipaddress, lb_method)) + + services = self.services["lbrule"] + services["alg"] = lb_method + + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2], + services=services) + + self.debug("Creating stickiness policy for the LB rule: %s" % + lb_rule.id) + policies = self.configure_Stickiness_Policy(lb_rule, + method=method, + paramDict=params) + + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, method, self.public_ip.ipaddress.ipaddress) + + self.debug("Deleting the LB rule: %s" % lb_rule.name) + lb_rule.delete(self.apiclient) + + with self.assertRaises(Exception): + LoadBalancerRule.list(self.apiclient, + id=lb_rule.id, + listall=True) + alerts = Alert.list(self.apiclient, keyword="stickiness", + listall=True) + self.debug( + "Create/update/delete should not produce any alert/error") + self.assertEqual(alerts, None, + "Create/update/delete should not produce any alert/error") + return + + @attr(tags=["advanced", "advancedns"]) + @attr(speed="slow") + def test_06_release_ip(self): + """Test release public IP with stickiness policy""" + + # 1. Configure load balancing rule. Listloadbalancerrule should list + # valid list + # 2. Create stickiness policy. liststickinesspolicy should return valid + # response + # 3. Release public Ip. liststickiness policy should return a valid + # response + + lb_methods = ["roundrobin", "leastconn", "source"] + + configs = {"SourceBased": {"tablesize": '100k'}, + "AppCookie": {"request-learn": "true"}, + "LBCookie": {"nocache": "true"}} + + for lb_method in lb_methods: + for method, params in configs.items(): + self.debug("Setting up environment - acquire public IP") + public_ip = self.acquire_Public_Ip() + + self.debug( + "Creating a load balancing rule on IP %s and algo %s" % + (public_ip.ipaddress.ipaddress, lb_method)) + + services = self.services["lbrule"] + services["alg"] = lb_method + + lb_rule = self.create_LB_Rule(public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2], + services=services) + + policies = self.configure_Stickiness_Policy(lb_rule, + method=method, + paramDict=params) + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, method, public_ip.ipaddress.ipaddress) + + self.debug("Releasing public Ip: %s" % + public_ip.ipaddress.ipaddress) + public_ip.delete(self.apiclient) + + self.debug("Checking the response of liststickiness policies") + + with self.assertRaises(Exception): + lb_rule.listStickyPolicies(self.apiclient, + lbruleid=lb_rule.id, + listall=True) + return + + @attr(tags=["advanced", "advancedns"]) + def test_07_delete_account(self): + """Test Delete account and check the router and its rules""" + + # Validate the following + # 1. create an account + # 2. using that account,create an instances + # 3. select the Source NAT IP and configure the stikiness policy + # 4. Delete account + # 5. The corresponding stikiness policy should be removed + # listLBStickinessPolicies Api shouldnot show deleted stikiness policy + + self.debug("Creating LB rule for account: %s" % + self.account.name) + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + policies = self.configure_Stickiness_Policy(lb_rule, method="LbCookie") + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, "LbCookie", self.public_ip.ipaddress.ipaddress) + + # removing account from cleanup list as we're deleting account + self.cleanup.pop() + self.debug("Deleting account: %s" % self.account.name) + + try: + self.account.delete(self.apiclient) + except Exception as e: + self.fail("Failed to delete account: %s" % e) + self.debug("Checking the response of liststickiness policies") + + with self.assertRaises(Exception): + lb_rule.listStickyPolicies(self.apiclient, + lbruleid=lb_rule.id, + listall=True) + return + + @attr(tags=["advanced", "advancedns"]) + def test_08_create_policy_router_stopped(self): + """Test verify create stickiness policy when router is stopped state""" + + # Validate the following + # 1. stop the router + # 2. create stikiness policy from UI + # 3. start the router. listLBStickinessPolicies Api should show created + # stikiness policy + + self.debug("Creating LB rule for account: %s" % self.account.name) + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + self.debug("Fetching routers for the account: %s" % + self.account.name) + router = self.get_router(self.account) + + self.debug("Stopping the router: %s" % router.name) + Router.stop(self.apiclient, id=router.id) + + policies = self.configure_Stickiness_Policy(lb_rule, method="LbCookie") + policy = policies.stickinesspolicy[0] + + self.debug("Starting the router: %s" % router.name) + Router.start(self.apiclient, id=router.id) + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, "LbCookie", self.public_ip.ipaddress.ipaddress) + return + + @attr(tags=["advanced", "advancedns"]) + def test_09_create_policy_router_destroy(self): + """Test check the stickiness policy rules after destroying router""" + + # Validate the following + # 1. create an account + # 2. using that account,create an instances + # 3. select the Source NAT IP and configure the stikiness policy + # 4. destroy the router. + + self.debug("Creating LB rule for account: %s" % self.account.name) + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + self.debug("Fetching routers for the account: %s" % + self.account.name) + router = self.get_router(self.account) + + policies = self.configure_Stickiness_Policy(lb_rule, method="LbCookie") + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, "LbCookie", self.public_ip.ipaddress.ipaddress) + + self.debug("Destroying the router: %s" % router.name) + Router.destroy(self.apiclient, id=router.id) + + return + + @attr(tags=["advanced", "advancedns"]) + def test_10_create_policy_enable_disable_vpn(self): + """Test enable/disable the VPN after applying sticky policy rules""" + + # Validate the following + # 1. create an account + # 2. using that account,create an instances + # 3. select the Source NAT IP and configure the stikiness policy + # 4. enable /disable the VPN. It should not impact the ceated rules + # listLBStickinessPolicies Api should show created stikiness policy + + self.debug("Creating LB rule for account: %s" % self.account.name) + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + policies = self.configure_Stickiness_Policy(lb_rule, method="LbCookie") + policy = policies.stickinesspolicy[0] + + self.debug("Policy: %s" % str(policy)) + self.debug("Validating the stickiness policy") + self.validate_Stickiness_Policy(lb_rule, "LbCookie", self.public_ip.ipaddress.ipaddress) + + self.debug("Enabling VPN on Public Ip: %s" % + self.public_ip.ipaddress.ipaddress) + self.create_VPN(self.public_ip) + + self.debug("Validating the stickiness policy after enabling VPN") + self.validate_Stickiness_Policy(lb_rule, "LbCookie", self.public_ip.ipaddress.ipaddress) + return + + @attr(tags=["advanced", "advancedns"]) + def test_11_invalid_params(self): + """Test verfify functionality syncronous and asyncronous validations""" + + # Validate the following + # verify the validation while creating or attaching stikiness policy + # by doing the following scenaios + # * by passing the Invlaid parameter + # * Invalid method name + # * required parameter not present + # * passing invalid values to valid paramters. + + self.debug("Creating LB rule for account: %s" % self.account.name) + lb_rule = self.create_LB_Rule(self.public_ip, + network=self.get_Network(self.account), + vmarray=[self.virtual_machine, self.virtual_machine_2]) + + self.debug("Creating stickiness policy with invalid method") + with self.assertRaises(Exception): + self.configure_Stickiness_Policy(lb_rule, method="InvalidMethod") + + self.debug("Creating stickiness policy with invalid params") + with self.assertRaises(Exception): + self.configure_Stickiness_Policy(lb_rule, method="LbCookie", + params={"Test": 10}) + + self.debug("Passing invalid parameter") + with self.assertRaises(Exception): + cmd = createLBStickinessPolicy.createLBStickinessPolicyCmd() + cmd.lbruleid = lb_rule.id + cmd.method = "LbCookie" + cmd.name = "LbCookie" + self.apiclient.createLBStickinessPolicy(cmd) + + self.debug("Creating stickiness policy not passing required param") + with self.assertRaises(Exception): + cmd = createLBStickinessPolicy.createLBStickinessPolicyCmd() + cmd.lbruleid = lb_rule.id + cmd.name = "LbCookie" + self.apiclient.createLBStickinessPolicy(cmd) + + return diff --git a/test/integration/component/test_netscaler_nw_off.py b/test/integration/component/test_netscaler_nw_off.py index cb49dbe258d..b94d47ea164 100644 --- a/test/integration/component/test_netscaler_nw_off.py +++ b/test/integration/component/test_netscaler_nw_off.py @@ -408,13 +408,12 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase): for zone in zones: if zone.networktype == 'Advanced': zone_list.append(zone) - self.assertGreater( len(zone_list), 1, "Atleast 2 advanced mode zones should be present for this test" ) - + zoneid=zone_list[0].id physical_networks = PhysicalNetwork.list( self.apiclient, zoneid=zone_list[0].id @@ -424,43 +423,12 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase): True, "There should be atleast one physical network for advanced zone" ) - physical_network = physical_networks[0] self.debug("Adding netscaler device: %s" % self.services["netscaler_1"]["ipaddress"]) - netscaler_1 = NetScaler.add( - self.apiclient, - self.services["netscaler_1"], - physicalnetworkid=physical_network.id - ) + netscaler_1 = add_netscaler(self.apiclient, zoneid, self.services["netscaler_1"]) self.cleanup.append(netscaler_1) - self.debug("Checking if Netscaler network service provider is enabled?") - - nw_service_providers = NetworkServiceProvider.list( - self.apiclient, - name='Netscaler', - physicalnetworkid=physical_network.id - ) - self.assertEqual( - isinstance(nw_service_providers, list), - True, - "Network service providers list should not be empty" - ) - netscaler_provider = nw_service_providers[0] - if netscaler_provider.state != 'Enabled': - self.debug("Netscaler provider is not enabled. Enabling it..") - response = NetworkServiceProvider.update( - self.apiclient, - id=netscaler_provider.id, - state='Enabled' - ) - self.assertEqual( - response.state, - "Enabled", - "Network service provider should be in enabled state" - ) - else: - self.debug("Netscaler service provider is already enabled.") + physical_network = physical_networks[0] ns_list = NetScaler.list( self.apiclient, lbdeviceid=netscaler_1.lbdeviceid @@ -492,6 +460,7 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase): self.apiclient, zoneid=zone_list[1].id ) + zoneid=zone_list[1].id self.assertEqual( isinstance(physical_networks, list), True, @@ -501,11 +470,7 @@ class TestAddMultipleNSDiffZone(cloudstackTestCase): self.debug("Adding netscaler device: %s" % self.services["netscaler_2"]["ipaddress"]) - netscaler_2 = NetScaler.add( - self.apiclient, - self.services["netscaler_2"], - physicalnetworkid=physical_network.id - ) + netscaler_2 = add_netscaler(self.apiclient, zoneid, self.services["netscaler_2"]) self.cleanup.append(netscaler_2) ns_list = NetScaler.list( self.apiclient, @@ -2394,11 +2359,10 @@ class TestNOWithNetscaler(cloudstackTestCase): cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id + cls._cleanup = [] try: cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.services["netscaler_1"]) - cls._cleanup = [ - cls.netscaler - ] + cls._cleanup.append(cls.netscaler) cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] @@ -2438,7 +2402,7 @@ class TestNOWithNetscaler(cloudstackTestCase): return @attr(tags = ["advancedns"]) - def test_01_network_off_without_conserve_mode(self): + def test_01_netoff_without_conserve_mode(self): """Test Nw off with Conserve mode off, VR-All services, LB-netscaler """ @@ -2707,7 +2671,7 @@ class TestNOWithNetscaler(cloudstackTestCase): return @attr(tags = ["advancedns"]) - def test_02_network_off_with_conserve_mode_netscaler(self): + def test_02_net_off_conserve_mode_ns(self): """Test NW off with Conserve mode ON, LB-Netscaler and VR-All services """ diff --git a/test/integration/component/test_non_contiguous_vlan.py b/test/integration/component/test_non_contiguous_vlan.py new file mode 100644 index 00000000000..5ef1ec738d2 --- /dev/null +++ b/test/integration/component/test_non_contiguous_vlan.py @@ -0,0 +1,446 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" P1 tests for Non contiguous VLAN ranges + + Test Plan: https://cwiki.apache.org/confluence/download/attachments/30760993/Non-Contiguous_VLAN_Ranges_TestPlan.xlsx + + Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-2238 + + Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Support+non-contiguous+VLAN+ranges +""" + +#Import local modules +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.integration.lib.base import Account +from marvin.integration.lib.base import PhysicalNetwork +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr + +class Services(): + def __init__(self): + self.services = { + + "vlan": { + "partial_range": ["",""], + "full_range": "", + }, + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "virtual_machine": { + "displayname": "testserver", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, # in MHz + "memory": 128, # In MBs + }, + + "ostype": 'CentOS 5.6 (64-bit)', + } + + +@attr(tags = ["simulator", "advanced"]) +class TestNonContiguousVLANRanges(cloudstackTestCase): + """ + Test to add non contiguous vlan ranges into existing physical network + """ + @classmethod + def setUpClass(cls): + cls.api_client = super(TestNonContiguousVLANRanges, cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, pod, domain + cls.zone = get_zone(cls.api_client, cls.services) + cls.pod = get_pod(cls.api_client, cls.zone.id, cls.services) + cls.domain = get_domain(cls.api_client, cls.services) + + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + + cls.services["virtual_machine"]["template"] = cls.template.id + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + cls._cleanup = [cls.service_offering] + + return + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.vlan = self.services["vlan"] + self.apiClient = self.testClient.getApiClient() + + self.setNonContiguousVlanIds(self.apiclient, self.zone.id) + + self.cleanup = [] + + def tearDown(self): + """ + Teardown to update a physical network and shrink its vlan + Cleanup all used resource + """ + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan=self.existingvlan) + + try: + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setNonContiguousVlanIds(self, apiclient, zoneid): + """ + Form the non contiguous ranges based on currently assigned range in physical network + """ + + NonContigVlanIdsAcquired = False + + list_physical_networks_response = PhysicalNetwork.list( + apiclient, + zoneid=zoneid + ) + assert isinstance(list_physical_networks_response, list) + assert len(list_physical_networks_response) > 0, "No physical networks found in zone %s" % zoneid + + for physical_network in list_physical_networks_response: + + self.physicalnetwork = physical_network + self.physicalnetworkid = physical_network.id + self.existingvlan = physical_network.vlan + + vlans = xsplit(self.existingvlan, ['-', ',']) + + assert len(vlans) > 0 + assert int(vlans[0]) < int(vlans[-1]), "VLAN range %s was improperly split" % self.existingvlan + + # Keep some gap between existing vlan and the new vlans which we are going to add + # So that they are non contiguous + + non_contig_end_vlan_id = int(vlans[-1]) + 6 + non_contig_start_vlan_id = int(vlans[0]) - 6 + + # Form ranges which are consecutive to existing ranges but not immediately contiguous + # There should be gap in between existing range and new non contiguous ranage + + # If you can't add range after existing range, because it's crossing 4095, then + # select VLAN ids before the existing range such that they are greater than 0, and + # then add this non contiguoud range + + if non_contig_end_vlan_id < 4095: + + self.vlan["partial_range"][0] = str(non_contig_end_vlan_id - 4) + '-' + str(non_contig_end_vlan_id - 3) + self.vlan["partial_range"][1] = str(non_contig_end_vlan_id - 1) + '-' + str(non_contig_end_vlan_id) + self.vlan["full_range"] = str(non_contig_end_vlan_id - 4) + '-' + str(non_contig_end_vlan_id) + NonContigVlanIdsAcquired = True + + elif non_contig_start_vlan_id > 0: + + self.vlan["partial_range"][0] = str(non_contig_start_vlan_id) + '-' + str(non_contig_start_vlan_id + 1) + self.vlan["partial_range"][1] = str(non_contig_start_vlan_id + 3) + '-' + str(non_contig_start_vlan_id + 4) + self.vlan["full_range"] = str(non_contig_start_vlan_id) + '-' + str(non_contig_start_vlan_id + 4) + NonContigVlanIdsAcquired = True + + else: + NonContigVlanIdsAcquired = False + + # If failed to get relevant vlan ids, continue to next physical network + # else break from loop as we have hot the non contiguous vlan ids for the test purpose + + if not NonContigVlanIdsAcquired: + continue + else: + break + + # If even through looping from all existing physical networks, failed to get relevant non + # contiguous vlan ids, then fail the test case + + if not NonContigVlanIdsAcquired: + self.fail("Failed to set non contiguous vlan ids to test. Free some ids from \ + from existing physical networks at extreme ends") + + return + + def validatePhysicalNetworkVlan(self, physicalNetworkId, vlan): + """Validate whether the physical network has the updated vlan + + params: + + @physicalNetworkId: The id of physical network which needs to be validated + @vlan: vlan with which physical network was updated. This should match with the vlan of listed + physical network + + Raise Exception if not matched + """ + + self.debug("Listing physical networks with id: %s" % physicalNetworkId) + + physicalnetworks = PhysicalNetwork.list(self.apiclient, id=physicalNetworkId) + + self.assertTrue(isinstance(physicalnetworks, list), "PhysicalNetwork.list should return a \ + valid list object") + + self.assertTrue(len(physicalnetworks) > 0, "physical networks list should not be empty") + + self.debug("Checking if physical network vlan matches with the passed vlan") + + vlans = xsplit(vlan,[',']) + + for virtualLan in vlans: + self.assert_(physicalnetworks[0].vlan.find(virtualLan) != -1, "vlan range %s \ + is not present in physical network: %s" % (virtualLan, physicalNetworkId)) + + return + + @attr(tags = ["simulator", "advanced"]) + def test_01_add_non_contiguous_ranges(self): + """ + Test adding different non contiguous vlan ranges + """ + # 1. Add new non contiguous vlan-range in addition to existing range + # 2. Add another non contiguous range + # 3. Both the ranges should get added successfully + + vlan1 = self.existingvlan + "," + self.vlan["partial_range"][0] + updatePhysicalNetworkResponse = self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + self.assert_(updatePhysicalNetworkResponse is not None, + msg="couldn't add non contiguous range in the physical network with vlan %s"%vlan1) + + self.debug("Verifying the VLAN of the updated physical network: %s, It should match with \ + the passed vlan: %s" % (self.physicalnetworkid,vlan1)) + + self.validatePhysicalNetworkVlan(self.physicalnetworkid, vlan1) + + vlan2 = vlan1 + "," + self.vlan["partial_range"][1] + updatePhysicalNetworkResponse2 = self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan2) + + self.assert_(updatePhysicalNetworkResponse2 is not None, + msg="couldn't add non contiguous range in the physical network with vlan %s"%vlan2) + + self.debug("Verifying the VLAN of the updated physical network: %s, It should match with \ + the passed vlan: %s" % (self.physicalnetworkid,vlan2)) + + self.validatePhysicalNetworkVlan(self.physicalnetworkid, vlan2) + + return + + @attr(tags = ["simulator", "advanced"]) + def test_02_add_existing_vlan_range(self): + """ + Test adding same non contiguous range twice + """ + # 1. Add non contiguous range to existing range + # 2. Add the same range again + # 3. It should get added successfully + + vlan1 = self.existingvlan+","+self.vlan["partial_range"][0] + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + self.debug("Updating physical network with same vlan range" ) + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + self.debug("Verifying the VLAN of the updated physical network: %s, It should match with \ + the passed vlan: %s" % (self.physicalnetworkid,vlan1)) + + self.validatePhysicalNetworkVlan(self.physicalnetworkid, vlan1) + + return + + @attr(tags = ["simulator", "advanced"]) + def test_03_extend_contiguous_range(self): + """ + Test adding non contiguous range and extend it + """ + + # 1. Add new non contiguous range + # 2. Add new range which extends previously added range + # 3. Newly added range should get extended successfully + + vlan1 = self.existingvlan + "," + self.vlan["partial_range"][0] + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + vlan2 = vlan1 + "," + self.vlan["full_range"] + updatePhysicalNetworkResponse = self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan2) + + self.assert_(updatePhysicalNetworkResponse is not None, + msg="couldn't extend the physical network with vlan %s"%vlan2) + + extendedvlan = self.existingvlan + "," + self.vlan["full_range"] + + self.debug("Verifying the VLAN of the updated physical network: %s, It should match with \ + the extended vlan: %s" % (self.physicalnetworkid, extendedvlan)) + + self.validatePhysicalNetworkVlan(self.physicalnetworkid, extendedvlan) + + return + + @attr(tags = ["simulator", "advanced"]) + def test_04_remove_unused_range(self): + """ + Test removing unused vlan range + """ + # 1. Add new non contiguous range to existing vlan range + # 2. Remove unused vlan range + # 3. Unused vlan range should gte removed successfully + + vlan1 = self.existingvlan+","+self.vlan["partial_range"][0] + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + vlan2 = vlan1+","+self.vlan["partial_range"][1] + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan2) + + self.debug("Removing vlan : %s" % self.vlan["partial_range"][1]) + + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + physicalnetworks = PhysicalNetwork.list(self.apiclient, id=self.physicalnetworkid) + + self.assertTrue(isinstance(physicalnetworks, list), "PhysicalNetwork.list should return a \ + valid list object") + + self.assertTrue(len(physicalnetworks) > 0, "physical networks list should not be empty") + + vlanranges= physicalnetworks[0].vlan + + self.assert_(vlanranges.find(self.vlan["partial_range"][1]) == -1, "vlan range is not removed") + + return + + @attr(tags = ["simulator", "advanced"]) + def test_05_remove_used_range(self): + """ + Test removing used vlan range + """ + # 1. If vlan id from existing range is in use, try to delete this range and add different range, + # this operation should fail + # 2. If any of existing vlan id is not in use, delete this range and add new vlan range + # 3. Use a vlan id from this new range by deploying an instance which + # will create a network with vlan id from this range + # 4. Now try to remove this vlan range + # 5. Vlan range should not get removed, should throw error + + vlans = xsplit(self.existingvlan, ['-', ',']) + vlanstartid = int(vlans[0]) + vlanendid = int(vlans[1]) + + networks = list_networks(self.apiclient) + existingvlaninuse = False + + + # Check if any of the vlan id from existing range is in use + if isinstance(networks,list) and len(networks) > 0: + + self.debug("networks: %s" % networks) + + vlansinuse = [network for network in networks if network.vlan and (vlanstartid <= int(network.vlan) <= vlanendid)] + + self.debug("Total no. of vlans in use : %s" % len(vlansinuse)) + + if len(vlansinuse) > 0: + existingvlaninuse = True + else: + existingvlaninuse = False + + vlan1 = self.vlan["partial_range"][0] + + # If existing vlan id is in use, then try to delete this range, the operation should fail + # This serves the test case purpose, hence test case has completed successfully + if existingvlaninuse: + self.debug("Trying to remove existing vlan in use, This should fail") + with self.assertRaises(Exception) as e: + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + self.debug("operation failed with exception: %s" % e.exception) + + # If any of the existing vlan id is not in use, then add new range and deploy an instance which + # will create a network using vlan id from this new range, hence now the new range is in use + # Now try to delete this new range and add another range, operation should fail + # This serves the test case purpose, hence test case has completed successfully + else: + + self.debug("No vlan in use, hence adding a new vlan and using it by deploying an instance") + + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = vlan1) + + self.debug("Verifying the VLAN of the updated physical network: %s, It should match with \ + the passed vlan: %s" % (self.physicalnetworkid,vlan1)) + + self.validatePhysicalNetworkVlan(self.physicalnetworkid, vlan1) + + account = Account.create( + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) + + self.debug("Deploying instance in the account: %s" % + account.name) + + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + accountid=account.name, + domainid=account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.zone.networktype + ) + + self.debug("Deployed instance in account: %s" % + account.name) + + + + self.debug("Trying to remove vlan range : %s , This should fail" % self.vlan["partial_range"][0]) + + with self.assertRaises(Exception) as e: + self.physicalnetwork.update(self.apiClient, id = self.physicalnetworkid, vlan = self.existingvlan) + + self.debug("operation failed with exception: %s" % e.exception) + + account.delete(self.apiclient) + + return diff --git a/test/integration/component/test_persistent_networks.py b/test/integration/component/test_persistent_networks.py new file mode 100644 index 00000000000..f61ccaae360 --- /dev/null +++ b/test/integration/component/test_persistent_networks.py @@ -0,0 +1,290 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Tests for Persistent Networks without running VMs feature +""" +import marvin +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.cloudstackException import cloudstackAPIException +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +import netaddr + +from nose.plugins.attrib import attr + +class Services(object): + """Test Persistent Networks without running VMs + """ + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended for unique + # username + "password": "password", + }, + "service_offering": { + "name": "Tiny Instance ", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 200, # in MHz + "memory": 256, # In MBs + }, + "shared_persistent_network_offering": { + "name": 'Network offering for Shared Persistent Network', + "displaytext": 'Network offering-DA services', + "guestiptype": 'Shared', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat', + "traffictype": 'GUEST', + "availability": 'Optional', + "ispersistent": 'True', + "serviceProviderList": { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "PortForwarding": 'VirtualRouter', + "Vpn": 'VirtualRouter', + "Firewall": 'VirtualRouter', + "Lb": 'VirtualRouter', + "UserData": 'VirtualRouter', + "StaticNat": 'VirtualRouter', + }, + }, + "isolated_persistent_network_offering": { + "name": 'Network offering for Isolated Persistent Network', + "displaytext": 'Network offering-DA services', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat', + "traffictype": 'GUEST', + "availability": 'Optional', + "ispersistent": 'True', + "serviceProviderList": { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "PortForwarding": 'VirtualRouter', + "Vpn": 'VirtualRouter', + "Firewall": 'VirtualRouter', + "Lb": 'VirtualRouter', + "UserData": 'VirtualRouter', + "StaticNat": 'VirtualRouter', + }, + }, + "isolated_network_offering": { + "name": 'Network offering for Isolated Persistent Network', + "displaytext": 'Network offering-DA services', + "guestiptype": 'Isolated', + "supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Firewall,Lb,UserData,StaticNat', + "traffictype": 'GUEST', + "availability": 'Optional', + "serviceProviderList": { + "Dhcp": 'VirtualRouter', + "Dns": 'VirtualRouter', + "SourceNat": 'VirtualRouter', + "PortForwarding": 'VirtualRouter', + "Vpn": 'VirtualRouter', + "Firewall": 'VirtualRouter', + "Lb": 'VirtualRouter', + "UserData": 'VirtualRouter', + "StaticNat": 'VirtualRouter', + }, + }, + "isolated_network": { + "name": "Isolated Network", + "displaytext": "Isolated Network", + }, + "virtual_machine": { + "displayname": "Test VM", + }, + "ostype": 'CentOS 5.3 (64-bit)', + # Cent OS 5.3 (64 bit) + "sleep": 90, + "timeout": 10, + "mode": 'advanced' + } + + + +class TestPersistentNetworks(cloudstackTestCase): + """Test Persistent Networks without running VMs + """ + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestPersistentNetworks, cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id + ) + cls.services["account"] = cls.account.name + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + cls.services["virtual_machine"]["template"] = cls.template.id + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls.isolated_persistent_network_offering = cls.create_network_offering("isolated_persistent_network_offering") + cls.isolated_network = cls.create_isolated_network(cls.isolated_persistent_network_offering.id) + cls.isolated_network_offering = cls.create_network_offering("isolated_network_offering") + + + # network will be deleted as part of account cleanup + cls._cleanup = [ + cls.account, cls.service_offering, cls.isolated_persistent_network_offering, cls.isolated_network_offering + ] + return + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @classmethod + def create_network_offering(cls, network_offering_type): + network_offering = NetworkOffering.create( + cls.api_client, + cls.services[network_offering_type], + conservemode=False + ) + # Update network offering state from disabled to enabled. + network_offering_update_response = NetworkOffering.update( + network_offering, + cls.api_client, + id=network_offering.id, + state="enabled" + ) + return network_offering + + @classmethod + def create_isolated_network(cls, network_offering_id): + isolated_network = Network.create( + cls.api_client, + cls.services["isolated_network"], + networkofferingid=network_offering_id, + accountid=cls.account.name, + domainid=cls.domain.id, + zoneid=cls.zone.id + ) + cls.debug("persistent isolated network is created: " + isolated_network.id) + return isolated_network + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [ ] + return + + def tearDown(self): + try: + # Clean up, terminate the resources created + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def create_virtual_machine(self, network_id=None): + virtual_machine = VirtualMachine.create(self.apiclient, + self.services["virtual_machine"], + networkids=network_id, + serviceofferingid=self.service_offering.id, + accountid=self.account.name, + domainid=self.domain.id + ) + self.debug("Virtual Machine is created: " + virtual_machine.id) + return virtual_machine + + @attr(tags=["advanced"]) + def test_network_state_after_destroying_vms(self): + # steps + # 1. create virtual machine in network + # 2. destroy created virtual machine + # + # validation + # 1. Persistent network state should be implemented before VM creation and have some vlan assigned + # 2. virtual machine should be created successfully + # 3. Network state should be implemented even after destroying all vms in network + self.assertEquals(self.isolated_network.state, u"Implemented", "network state of persistent is not implemented") + self.assertIsNotNone(self.isolated_network.vlan, "vlan must not be null for persistent network") + + try: + virtual_machine = self.create_virtual_machine(network_id=self.isolated_network.id) + virtual_machine.delete(self.apiclient) + except Exception as e: + self.skipTest("vm creation/deletion fails") + + # wait for time such that, network is cleaned up + # assuming that it will change its state to allocated after this much period + wait_for_cleanup(self.api_client, ["network.gc.interval", "network.gc.wait"]) + + + networks = Network.list(self.apiclient, id=self.isolated_network.id) + self.assertEqual( + isinstance(networks, list), + True, + "list Networks should return valid response" + ) + + + self.assertEquals(networks[0].state, u"Implemented", "network state of persistent network after all vms are destroyed is not implemented") + + @attr(tags=["advanced"]) + def test_shared_network_offering_with_persistent(self): + # steps + # 1. create shared network offering with persistent field enabled + # + # validation + # 1. network offering should throw an exception + try: + shared_persistent_network_offering = self.create_network_offering("shared_persistent_network_offering") + shared_persistent_network_offering.delete(self.apiclient) + self.fail("For shared network ispersistent must be False") + except Exception as e: + pass + + @attr(tags=["advanced"]) + def test_upgrade_network_offering_to_persistent(self): + # steps + # 1. create isolated network with network offering which has ispersistent field disabled + # 2. upgrade isolated network offering to network offering which has ispersistent field enabled + # + # validation + # 1. update of network should happen successfully + # 2. network state should be implemented and have some vlan assigned + isolated_network = self.create_isolated_network(self.isolated_network_offering.id) + isolated_network_response = isolated_network.update(self.apiclient, networkofferingid=self.isolated_persistent_network_offering.id) + self.assertEquals(self.isolated_network.state, u"Implemented", "network state of isolated network upgraded to persistent is not implemented") + self.assertIsNotNone(self.isolated_network.vlan, "vlan must not be null isolated network upgraded to for persistent network") diff --git a/test/integration/component/test_portable_ip.py b/test/integration/component/test_portable_ip.py index 55de60d76ce..917e7f2d1a9 100644 --- a/test/integration/component/test_portable_ip.py +++ b/test/integration/component/test_portable_ip.py @@ -71,6 +71,14 @@ class Services: "name": "Test Network - Portable IP", "displaytext": "Test Network - Portable IP", }, + "network1": { + "name": "Test Network 1 - Portable IP", + "displaytext": "Test Network 1 - Portable IP", + }, + "network2": { + "name": "Test Network 2 - Portable IP", + "displaytext": "Test Network 2 - Portable IP", + }, "disk_offering": { "displaytext": "Small Disk", "name": "Small Disk", @@ -94,6 +102,30 @@ class Services: "publicport": 22, "protocol": 'TCP', }, + "vm1": + # Create a small virtual machine instance with disk offering + { + "displayname": "vm1", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "vm2": + # Create a small virtual machine instance with disk offering + { + "displayname": "vm2", + "username": "root", # VM creds for SSH + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, "ostype": 'CentOS 5.3 (64-bit)', } @@ -726,7 +758,7 @@ class TestAssociatePublicIp(cloudstackTestCase): try: - self.debug("Deploying Virtual Machine") + self.debug("DeployingVirtual Machine") self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["small"], @@ -1307,3 +1339,214 @@ class TestDeleteAccount(cloudstackTestCase): id=portableip.ipaddress.id) return + + +class TestPortableIpTransferAcrossNetworks(cloudstackTestCase): + """Test Transfer Portable IP Across Networks + """ + + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestPortableIpTransferAcrossNetworks, cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.region = get_region(cls.api_client, cls.services) + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.pod = get_pod(cls.api_client, cls.zone.id, cls.services) + cls.services['mode'] = cls.zone.networktype + cls.services["domainid"] = cls.domain.id + cls.services["zoneid"] = cls.zone.id + cls.services["regionid"] = cls.region.id + + template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + # Set Zones and disk offerings + cls.services["vm1"]["zoneid"] = cls.zone.id + cls.services["vm1"]["template"] = template.id + cls.services["vm2"]["zoneid"] = cls.zone.id + cls.services["vm2"]["template"] = template.id + + # Set Zones and Network offerings + cls.account = Account.create( + cls.api_client, + cls.services["account"], + domainid=cls.domain.id, + admin=True + ) + + cls.network_offering = NetworkOffering.create( + cls.api_client, + cls.services["network_offering"], + conservemode=False + ) + + # Enable Network offering + cls.network_offering.update(cls.api_client, state='Enabled') + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + + cls.debug("creating networks and virtual machines in each network for portable ip transfer tests: ") + cls.network1 = Network.create( + cls.api_client, + cls.services["network1"], + accountid=cls.account.name, + domainid=cls.account.domainid, + networkofferingid=cls.network_offering.id, + zoneid=cls.zone.id + ) + + cls.virtual_machine1 = VirtualMachine.create( + cls.api_client, + cls.services["vm1"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + networkids = [cls.network1.id], + ) + cls.network2 = Network.create( + cls.api_client, + cls.services["network2"], + accountid=cls.account.name, + domainid=cls.account.domainid, + networkofferingid=cls.network_offering.id, + zoneid=cls.zone.id + ) + cls.virtual_machine2 = VirtualMachine.create( + cls.api_client, + cls.services["vm2"], + accountid=cls.account.name, + domainid=cls.account.domainid, + serviceofferingid=cls.service_offering.id, + networkids = [cls.network2.id], + ) + cls._cleanup = [cls.account, cls.network_offering] + + return + + @classmethod + def tearDownClass(cls): + try: + #Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + + #create new portable ip range + self.portable_ip_range_services = get_portable_ip_range_services(self.config) + + if self.portable_ip_range_services is None: + self.skipTest('Failed to read config values related to portable ip range') + + self.portable_ip_range_services["regionid"] = self.region.id + + self.debug("Creating new portable IP range with startip:%s and endip:%s" % + (str(self.portable_ip_range_services["startip"]), + str(self.portable_ip_range_services["endip"]))) + + #create new portable ip range + self.portable_ip_range = PortablePublicIpRange.create(self.apiclient, + self.portable_ip_range_services) + + self.debug("Created new portable IP range with startip:%s and endip:%s and id:%s" % + (self.portable_ip_range.startip, + self.portable_ip_range.endip, + self.portable_ip_range.id)) + + self.cleanup = [self.portable_ip_range, ] + return + + def tearDown(self): + try: + #Clean up, terminate the resources created + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced","swamy"]) + def test_list_portable_ip_range_non_root_admin(self): + """Test list portable ip ranges with non admin root account + """ + # 1. Create new network 1 and associate portable IP 1 + # 2. Have at least 1 VM in network1 + # 3. Create a new network 2 and at least 1 VM in network 2 + # 2. enable static NAT on portable IP 1 with a VM in network 2 + # 3. SSH to the VM in network 2 + + portableip = PublicIPAddress.create( + self.apiclient, + accountid=self.account.name, + zoneid=self.zone.id, + domainid=self.account.domainid, + networkid=self.network1.id, + isportable=True + ) + self.debug("created public ip address (portable): %s" % portableip.ipaddress.ipaddress) + #Create NAT rule + self.debug("Creating NAT rule on the portable public ip") + # Enable Static NAT for VM + StaticNATRule.enable( + self.apiclient, + portableip.ipaddress.id, + self.virtual_machine2.id, + networkid=self.network2.id + ) + # Open up firewall port for SSH + self.debug("Opening firewall on the portable public ip") + fw_rule = FireWallRule.create( + self.apiclient, + ipaddressid=portableip.ipaddress.id, + protocol=self.services["natrule"]["protocol"], + cidrlist=[self.services["natrule"]["cidr"]], + startport=self.services["natrule"]["publicport"], + endport=self.services["natrule"]["publicport"] + ) + static_nat_list = PublicIPAddress.list( + self.apiclient, + associatednetworkid=self.network2.id, + listall=True, + isstaticnat=True, + ipaddress=portableip.ipaddress.ipaddress, + ) + self.assertEqual( + isinstance(static_nat_list, list), + True, + "List Public IP should return a valid static NAT info that was created on portable ip" + ) + self.assertTrue( + static_nat_list[0].ipaddress == portableip.ipaddress.ipaddress and static_nat_list[0].virtualmachineid==self.virtual_machine2.id, + "There is some issue in transferring portable ip {} across networks".format(portableip.ipaddress.ipaddress) + ) + try: + + self.debug("Trying to SSH to ip: %s" % portableip.ipaddress.ipaddress) + + remoteSSHClient( + portableip.ipaddress.ipaddress, + self.services['natrule']["publicport"], + self.virtual_machine2.username, + self.virtual_machine2.password + ) + except Exception as e: + self.fail("Exception while SSHing : %s" % e) + + self.debug("disassociating portable ip: %s" % portableip.ipaddress.ipaddress) + portableip.delete(self.apiclient) + + + + + + diff --git a/test/integration/component/test_redundant_router_cleanups.py b/test/integration/component/test_redundant_router_cleanups.py index 303ca8b8da6..e30c1020243 100644 --- a/test/integration/component/test_redundant_router_cleanups.py +++ b/test/integration/component/test_redundant_router_cleanups.py @@ -653,7 +653,7 @@ class TestRedundantRouterNetworkCleanups(cloudstackTestCase): self.debug("Sleeping for network gc wait + interval time") # Sleep to ensure that all resources are deleted - time.sleep((delay + exp) * 2) + time.sleep((delay + exp) * 3) routers = Router.list( self.apiclient, diff --git a/test/integration/component/test_reset_ssh_keypair.py b/test/integration/component/test_reset_ssh_keypair.py index 8b499d017af..4d0c45e9e17 100644 --- a/test/integration/component/test_reset_ssh_keypair.py +++ b/test/integration/component/test_reset_ssh_keypair.py @@ -59,7 +59,6 @@ class Services: "cpunumber": 1, "cpuspeed": 100, "memory": 128, - "storagetype": "local" }, "egress": { "name": 'web', @@ -83,6 +82,23 @@ class Services: "mode": 'advanced', } +def wait_vm_start(apiclient, account, timeout, sleep): + while timeout: + vms = VirtualMachine.list( + apiclient, + account=account.name, + domainid=account.domainid, + listall=True + ) + if vms and vms[0].state == "Running": + return timeout + + time.sleep(sleep) + timeout = timeout - 1 + + return timeout + + class TestResetSSHKeypair(cloudstackTestCase): @classmethod @@ -340,7 +356,6 @@ class TestResetSSHKeypair(cloudstackTestCase): except Exception as e: self.fail("Failed to reset SSH key: %s, %s" % (virtual_machine.name, e)) - return self.debug("Starting the virtual machine after resetting the keypair") try: virtual_machine.start(self.apiclient) @@ -348,17 +363,13 @@ class TestResetSSHKeypair(cloudstackTestCase): self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) + self.debug("SSH key path: %s" % str(keyPairFilePath)) try: virtual_machine.get_ssh_client(keyPairFileLocation=str(keyPairFilePath)) @@ -462,17 +473,12 @@ class TestResetSSHKeypair(cloudstackTestCase): self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) self.debug("SSHing with new keypair") try: @@ -576,17 +582,13 @@ class TestResetSSHKeypair(cloudstackTestCase): except Exception as e: self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) self.debug("SSHing with new keypair") try: @@ -691,17 +693,13 @@ class TestResetSSHKeypair(cloudstackTestCase): except Exception as e: self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) self.debug("SSHing with new keypair") try: @@ -1207,18 +1205,14 @@ class TestResetSSHKeyUserRights(cloudstackTestCase): virtual_machine.start(self.apiclient) except Exception as e: self.fail("Failed to start virtual machine: %s, %s" % - (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.user_account.name, - domainid=self.user_account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + (virtual_machine.name, e)) + + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (vms[0].name, self.services["timeout"])) self.debug("SSHing with new keypair") try: @@ -1351,17 +1345,13 @@ class TestResetSSHKeyUserRights(cloudstackTestCase): except Exception as e: self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) self.debug("SSHing with new keypair") try: @@ -1495,17 +1485,13 @@ class TestResetSSHKeyUserRights(cloudstackTestCase): except Exception as e: self.fail("Failed to start virtual machine: %s, %s" % (virtual_machine.name, e)) - while True: - vms = VirtualMachine.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - if vms[0].state == "Running": - break - self.debug("Vm not in Running state sleep 60s") - time.sleep(60) + + timeout = wait_vm_start(self.apiclient, self.account, self.services["timeout"], + self.services["sleep"]) + + if timeout == 0: + self.fail("The virtual machine %s failed to start even after %s minutes" + % (virtual_machine.name, self.services["timeout"])) self.debug("SSHing with new keypair") try: diff --git a/test/integration/component/test_resource_limits.py b/test/integration/component/test_resource_limits.py index 833723cb3ea..377aa7463b1 100644 --- a/test/integration/component/test_resource_limits.py +++ b/test/integration/component/test_resource_limits.py @@ -1185,7 +1185,7 @@ class TestResourceLimitsDomain(cloudstackTestCase): self.apiclient, 2, # Volume domainid=self.account.domainid, - max=2 + max=1 ) self.debug("Deploying VM for account: %s" % self.account.name) diff --git a/test/integration/component/test_routers.py b/test/integration/component/test_routers.py index 396c54ee7bd..b41cc6fc711 100644 --- a/test/integration/component/test_routers.py +++ b/test/integration/component/test_routers.py @@ -1249,25 +1249,39 @@ class TestRouterStopCreateFW(cloudstackTestCase): "Check for list hosts response return valid data" ) host = hosts[0] + host.user, host.passwd = get_host_credentials(self.config, host.ipaddress) + # For DNS and DHCP check 'dnsmasq' process status - try: - host.user, host.passwd = get_host_credentials(self.config, host.ipaddress) - result = get_process_status( - host.ipaddress, - 22, - host.user, - host.passwd, - router.linklocalip, - 'iptables -t nat -L' + if self.apiclient.hypervisor.lower() == 'vmware': + result = get_process_status( + self.apiclient.connection.mgtSvr, + 22, + self.apiclient.connection.user, + self.apiclient.connection.passwd, + router.linklocalip, + 'iptables -t nat -L', + hypervisor=self.apiclient.hypervisor + ) + else: + try: + result = get_process_status( + host.ipaddress, + 22, + host.user, + host.passwd, + router.linklocalip, + 'iptables -t nat -L' + ) + except KeyError: + self.skipTest("Provide a marvin config file with host credentials to run %s" % self._testMethodName) + + self.debug("iptables -t nat -L: %s" % result) + self.debug("Public IP: %s" % public_ip.ipaddress) + res = str(result) + self.assertEqual( + res.count(str(public_ip.ipaddress)), + 1, + "Check public IP address" ) - self.debug("iptables -t nat -L: %s" % result) - self.debug("Public IP: %s" % public_ip.ipaddress) - res = str(result) - self.assertEqual( - res.count(str(public_ip.ipaddress)), - 1, - "Check public IP address" - ) - except KeyError: - self.skipTest("Provide a marvin config file with host credentials to run %s" % self._testMethodName) + return diff --git a/test/integration/component/test_shared_networks.py b/test/integration/component/test_shared_networks.py index 88bb018d404..732e1447b50 100644 --- a/test/integration/component/test_shared_networks.py +++ b/test/integration/component/test_shared_networks.py @@ -82,10 +82,10 @@ class Services: "network1": { "name": "MySharedNetwork - Test1", "displaytext": "MySharedNetwork1", - "gateway" :"172.16.15.1", + "gateway" :"172.16.16.1", "netmask" :"255.255.255.0", - "startip" :"172.16.15.21", - "endip" :"172.16.15.41", + "startip" :"172.16.16.21", + "endip" :"172.16.16.41", "acltype" : "Domain", "scope":"all", }, @@ -2189,7 +2189,7 @@ class TestSharedNetworks(cloudstackTestCase): self.services["network1"]["acltype"] = "domain" self.services["network1"]["networkofferingid"] = self.shared_network_offering.id self.services["network1"]["physicalnetworkid"] = physical_network.id - self.services["network1"]["vlan"] = self.getFreeVlan(self.api_client, self.zone.id) + self.services["network1"]["vlan"] = self.getFreeVlan(self.api_client, self.zone.id)[1] #vlan id is second return value of function self.network1 = Network.create( self.api_client, diff --git a/test/integration/component/test_snapshot_gc.py b/test/integration/component/test_snapshot_gc.py index aec976103a3..1e1cc5dd801 100644 --- a/test/integration/component/test_snapshot_gc.py +++ b/test/integration/component/test_snapshot_gc.py @@ -148,7 +148,6 @@ class TestAccountSnapshotClean(cloudstackTestCase): ) cls.services["account"] = cls.account.name - cls._cleanup.append(cls.account) if cls.zone.localstorageenabled: cls.services["service_offering"]["storagetype"] = "local" @@ -156,7 +155,7 @@ class TestAccountSnapshotClean(cloudstackTestCase): cls.api_client, cls.services["service_offering"] ) - cls._cleanup.append(cls.service_offering) + cls.virtual_machine = VirtualMachine.create( cls.api_client, cls.services["server"], @@ -165,7 +164,7 @@ class TestAccountSnapshotClean(cloudstackTestCase): domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id ) - cls._cleanup.append(cls.virtual_machine) + # Get the Root disk of VM volumes = list_volumes( cls.api_client, @@ -177,13 +176,10 @@ class TestAccountSnapshotClean(cloudstackTestCase): # Create a snapshot from the ROOTDISK cls.snapshot = Snapshot.create(cls.api_client, volumes[0].id) - cls._cleanup.append(cls.snapshot) except Exception, e: cls.tearDownClass() unittest.SkipTest("setupClass fails for %s" % cls.__name__) raise e - else: - cls._cleanup.remove(cls.account) return @classmethod @@ -296,15 +292,12 @@ class TestAccountSnapshotClean(cloudstackTestCase): # Wait for account cleanup interval wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) - accounts = list_accounts( + + with self.assertRaises(Exception): + accounts = list_accounts( self.apiclient, id=self.account.id ) - self.assertEqual( - accounts, - None, - "List accounts should return empty list after account deletion" - ) self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was still found on NFS after account gc") diff --git a/test/integration/component/test_snapshots_improvement.py b/test/integration/component/test_snapshots_improvement.py new file mode 100644 index 00000000000..190db556c1c --- /dev/null +++ b/test/integration/component/test_snapshots_improvement.py @@ -0,0 +1,693 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" P1 tests for Snapshots Improvements +""" +# Import Local Modules +from nose.plugins.attrib import attr +from marvin.cloudstackTestCase import cloudstackTestCase, unittest +from marvin.integration.lib.utils import (random_gen) +from marvin.integration.lib.base import ( + Account, + ServiceOffering, + VirtualMachine, + Snapshot, + Template, + Volume, + DiskOffering + ) +from marvin.integration.lib.common import (get_domain, + get_zone, + get_template, + cleanup_resources, + list_snapshots + ) +from marvin.cloudstackAPI import (createSnapshot, + createVolume, + createTemplate, + listOsTypes, + stopVirtualMachine + ) +from marvin.integration.lib.utils import is_snapshot_on_nfs + + +class Services: + def __init__(self): + self.services = { + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 200, # in MHz + "memory": 256, # In MBs + }, + "service_offering2": { + "name": "Med Instance", + "displaytext": "Med Instance", + "cpunumber": 1, + "cpuspeed": 1000, # In MHz + "memory": 1024, # In MBs + }, + "disk_offering": { + "displaytext": "Small Disk", + "name": "Small Disk", + "disksize": 1, + "storagetype": "shared", + }, + "disk_offering2": { + "displaytext": "Med Disk", + "name": "Med Disk", + "disksize": 5, + "storagetype": "shared", + }, + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + # Random characters are appended in create account to + # ensure unique username generated each time + "password": "password", + }, + "virtual_machine": { + "displayname": "TestVM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "template": { + "displaytext": "Public Template", + "name": "Public template", + "ostype": 'CentOS 5.3 (64-bit)', + "isfeatured": True, + "ispublic": True, + "isextractable": True, + "templatefilter": 'self', + }, + "volume": { + "diskname": "TestDiskServ", + "size": 1, # GBs + }, + "diskdevice": "/dev/xvda", + "rootdisk": "/dev/xvda", + + "mount_dir": "/mnt/tmp", + "sub_dir": "test", + "sub_lvl_dir1": "test1", + "sub_lvl_dir2": "test2", + "random_data": "random.data", + + "ostype": 'CentOS 5.3 (64-bit)', + "NumberOfThreads": 1, + "sleep": 60, + "timeout": 10, + "mode": 'advanced', + # Networking mode: Advanced, Basic + } + +class TestSnapshotOnRootVolume(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super(TestSnapshotOnRootVolume, + cls).getClsTestClient().getApiClient() + cls.services = Services().services + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"]) + cls.account = Account.create(cls.api_client, + cls.services["account"], + domainid=cls.domain.id) + # pdb.set_trace() + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"]) + cls.disk_offering = DiskOffering.create( + cls.api_client, + cls.services["disk_offering"], + domainid=cls.domain.id) + cls.service_offering2 = ServiceOffering.create( + cls.api_client, + cls.services["service_offering2"]) + cls.disk_offering2 = DiskOffering.create( + cls.api_client, + cls.services["disk_offering2"], + domainid=cls.domain.id) + + cls._cleanup = [cls.account, + cls.service_offering, + cls.disk_offering, + cls.service_offering2, + cls.disk_offering2] + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.cleanup = [] + return + + def tearDown(self): + try: + # Clean up, terminate the created instance, volumes and snapshots + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + @attr(tags=["advanced", "basic"]) + def test_01_snapshot_on_rootVolume(self): + """Test create VM with default cent os template and create snapshot + on root disk of the vm + """ + # Validate the following + # 1. Deploy a Linux VM using default CentOS template, use small service + # offering, disk offering + # 2. Create snapshot on the root disk of this newly cteated vm + # 3. listSnapshots should list the snapshot that was created. + # 4. verify that secondary storage NFS share contains the reqd + # volume under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid + # 5. verify backup_snap_id was non null in the `snapshots` table + + # Create virtual machine with small systerm offering and disk offering + new_virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + templateid=self.template.id, + zoneid=self.zone.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + diskofferingid=self.disk_offering.id, + ) + self.debug("Virtual machine got created with id: %s" % + new_virtual_machine.id) + list_virtual_machine_response = VirtualMachine.list( + self.apiclient, + id=new_virtual_machine.id) + self.assertEqual(isinstance(list_virtual_machine_response, list), + True, + "Check listVirtualMachines returns a valid list") + + self.assertNotEqual(len(list_virtual_machine_response), + 0, + "Check listVirtualMachines response") + self.cleanup.append(new_virtual_machine) + + # Getting root volume id of the vm created above + list_volume_response = Volume.list( + self.apiclient, + virtualmachineid=list_virtual_machine_response[0].id, + type="ROOT", + account=self.account.name, + domainid=self.account.domainid) + + self.assertEqual(isinstance(list_volume_response, list), + True, + "Check listVolumes returns a valid list") + self.assertNotEqual(len(list_volume_response), + 0, + "Check listVolumes response") + self.debug( + "Snapshot will be created on the volume with voluem id: %s" % + list_volume_response[0].id) + + # Perform snapshot on the root volume + root_volume_snapshot = Snapshot.create( + self.apiclient, + volume_id=list_volume_response[0].id) + self.debug("Created snapshot: %s for vm: %s" % ( + root_volume_snapshot.id, + list_virtual_machine_response[0].id)) + list_snapshot_response = Snapshot.list( + self.apiclient, + id=root_volume_snapshot.id, + account=self.account.name, + domainid=self.account.domainid) + self.assertEqual(isinstance(list_snapshot_response, list), + True, + "Check listSnapshots returns a valid list") + + self.assertNotEqual(len(list_snapshot_response), + 0, + "Check listSnapshots response") + # Verify Snapshot state + self.assertEqual( + list_snapshot_response[0].state in [ + 'BackedUp', + 'CreatedOnPrimary' + ], + True, + "Snapshot state is not as expected. It is %s" % + list_snapshot_response[0].state + ) + + self.assertEqual( + list_snapshot_response[0].volumeid, + list_volume_response[0].id, + "Snapshot volume id is not matching with the vm's volume id") + self.cleanup.append(root_volume_snapshot) + + # Below code is to verify snapshots in the backend and in db. + # Verify backup_snap_id field in the snapshots table for the snapshot created, it should not be null + + self.debug("select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) + qryresult = self.dbclient.execute("select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) + self.assertNotEqual(len(qryresult), 0, "Check sql query to return snapshots list") + snapshot_qry_response = qryresult[0] + snapshot_id = snapshot_qry_response[0] + is_removed = snapshot_qry_response[1] + backup_snap_id = snapshot_qry_response[2] + self.assertNotEqual(is_removed, "NULL", "Snapshot is removed from CS, please check the logs") + msg = "Backup snapshot id is set to null for the backedup snapshot :%s" % snapshot_id + self.assertNotEqual(backup_snap_id, "NULL", msg ) + + # Check if the snapshot is present on the secondary storage + self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, root_volume_snapshot.id)) + + return + +class TestCreateSnapshot(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.api_client = super( + TestCreateSnapshot, + cls).getClsTestClient().getApiClient() + cls.services = Services().services + # Get Zone, Domain and templates + cls.domain = get_domain(cls.api_client, cls.services) + cls.zone = get_zone(cls.api_client, cls.services) + + cls.template = get_template( + cls.api_client, + cls.zone.id, + cls.services["ostype"] + ) + + cls.services["virtual_machine"]["zoneid"] = cls.zone.id + + # Create VMs, NAT Rules etc + cls.service_offering = ServiceOffering.create( + cls.api_client, + cls.services["service_offering"] + ) + cls._cleanup = [ + cls.service_offering, + ] + return + + @classmethod + def tearDownClass(cls): + try: + # Cleanup resources used + cleanup_resources(cls.api_client, cls._cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + + self.account = Account.create( + self.apiclient, + self.services["account"], + domainid=self.domain.id + ) + self.cleanup = [self.account, ] + return + + def tearDown(self): + try: + # Clean up, terminate the created instance, volumes and snapshots + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + return + + def create_VM(self, host_id=None): + try: + self.debug('Creating VM for account=%s' % + self.account.name) + vm = VirtualMachine.create( + self.apiclient, + self.services["virtual_machine"], + templateid=self.template.id, + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + hostid=host_id, + mode=self.services["mode"] + ) + self.debug('Created VM=%s in account=%s' % + (vm.id, self.account.name)) + return vm + except Exception as e: + self.fail('Unable to deploy VM in a account=%s - %s' % + (self.account.name, e)) + + def stop_VM(self, virtual_machine): + """ Return Stop Virtual Machine command""" + + cmd = stopVirtualMachine.stopVirtualMachineCmd() + cmd.id = virtual_machine.id + return cmd + + def create_Snapshot_On_Root_Disk(self, virtual_machine): + try: + volumes = Volume.list( + self.apiclient, + virtualmachineid=virtual_machine.id, + type='ROOT', + listall=True + ) + self.assertEqual( + isinstance(volumes, list), + True, + "Check list response returns a valid list" + ) + volume = volumes[0] + + cmd = createSnapshot.createSnapshotCmd() + cmd.volumeid = volume.id + cmd.account = self.account.name + cmd.domainid = self.account.domainid + return cmd + except Exception as e: + self.fail('Unable to create new job for snapshot: %s' % e) + + def create_Template_from_Snapshot(self, snapshot): + try: + self.debug("Creating template from snapshot: %s" % snapshot.name) + + cmd = createTemplate.createTemplateCmd() + cmd.displaytext = self.services["template"]["displaytext"] + cmd.name = "-".join([self.services["template"]["name"], + random_gen()]) + + ncmd = listOsTypes.listOsTypesCmd() + ncmd.description = self.services["template"]["ostype"] + ostypes = self.apiclient.listOsTypes(ncmd) + + if not isinstance(ostypes, list): + raise Exception( + "Unable to find Ostype id with desc: %s" % + self.services["template"]["ostype"]) + cmd.ostypeid = ostypes[0].id + cmd.snapshotid = snapshot.id + + return cmd + except Exception as e: + self.fail("Failed to create template from snapshot: %s - %s" % + (snapshot.name, e)) + + def create_Volume_from_Snapshot(self, snapshot): + try: + self.debug("Creating volume from snapshot: %s" % snapshot.name) + + cmd = createVolume.createVolumeCmd() + cmd.name = "-".join([ + self.services["volume"]["diskname"], + random_gen()]) + cmd.snapshotid = snapshot.id + cmd.zoneid = self.zone.id + cmd.size = self.services["volume"]["size"] + cmd.account = self.account.name + cmd.domainid = self.account.domainid + return cmd + except Exception as e: + self.fail("Failed to create volume from snapshot: %s - %s" % + (snapshot.name, e)) + + def create_Snapshot_VM(self): + """Creates a virtual machine and take a snapshot on root disk + + 1. Create a virtual machine + 2. SSH into virtual machine + 3. Create dummy folders on the ROOT disk of the virtual machine + 4. Take a snapshot of ROOT disk""" + + jobs = [] + self.debug("Deploying VM for account: %s" % self.account.name) + for i in range(self.services["NumberOfThreads"]): + vm = self.create_VM() + + self.debug("Create snapshot on ROOT disk") + jobs.append(self.create_Snapshot_On_Root_Disk(vm)) + + # Submit snapshot job at one go + self.testClient.submitCmdsAndWait(jobs) + return + + def create_Snaphot_Stop_VM(self): + """Creates a snapshot on ROOT disk while vm is in stopping state + + 1. Create a virtual machine + 2. SSH into virtual machine + 3. Create dummy folders on the ROOT disk of the virtual machine + 4. Create snapshot on ROOT disk + 5. Stop virtual machine while snapshots are taken on ROOT disk""" + + + jobs = [] + self.debug("Deploying VM for account: %s" % self.account.name) + for i in range(self.services["NumberOfThreads"]): + vm = self.create_VM() + + self.debug("Create thread to stop virtual machine: %s" % vm.name) + jobs.append(self.stop_VM(vm)) + + self.debug("Create snapshot on ROOT disk") + jobs.append(self.create_Snapshot_On_Root_Disk(vm)) + + self.debug("Running concurrent migration jobs in account: %s" % + self.account.name) + # Submit snapshot job at one go + self.testClient.submitCmdsAndWait(jobs) + + return + + def get_Snapshots_For_Account(self, account, domainid): + try: + snapshots = list_snapshots( + self.apiclient, + account=account, + domainid=domainid, + listall=True, + key='type', + value='manual' + ) + self.debug("List Snapshots result : %s" % snapshots) + self.assertEqual( + isinstance(snapshots, list), + True, + "List snapshots shall return a valid list" + ) + return snapshots + except Exception as e: + self.fail("Failed to fetch snapshots for account: %s - %s" % + (account, e)) + + def verify_Snapshots(self): + try: + self.debug("Listing snapshots for accout : %s" % self.account.name) + snapshots = self.get_Snapshots_For_Account( + self.account.name, + self.account.domainid) + self.assertEqual( + len(snapshots), + int(self.services["NumberOfThreads"]), + "No of snapshots should equal to no of threads spawned" + ) + except Exception as e: + self.fail("Failed to verify snapshots created: %s" % e) + + @attr(speed="slow") + @attr(tags=["advanced", "advancedns"]) + @attr(configuration='concurrent.snapshots.threshold.perhost') + def test_01_concurrent_snapshots_live_migrate(self): + """Test perform concurrent snapshots and migrate the vm from one host + to another + + 1.Configure the concurrent.snapshots.threshold.perhost=3 + 2.Deploy a Linux VM using default CentOS template, use small + service offering, disk offering + 3.Perform snapshot on the root disk of this newly created VMs""" + + # Validate the following + # a. Check all snapshots jobs are running concurrently on backgrounds + # b. listSnapshots should list this newly created snapshot. + + self.debug("Create virtual machine and snapshot on ROOT disk volume") + self.create_Snapshot_VM() + + self.debug("Verify whether snapshots were created properly or not?") + self.verify_Snapshots() + return + + @attr(speed="slow") + @attr(tags=["advanced", "advancedns"]) + @attr(configuration='concurrent.snapshots.threshold.perhost') + def test_02_stop_vm_concurrent_snapshots(self): + """Test stop running VM while performing concurrent snapshot on volume + + 1.Configure the concurrent.snapshots.threshold.perhost=3 + 2.Deploy a Linux VM using default CentOS template, use small + service offering, disk offering + 3.Perform snapshot on root disk of this newly created VM + 4.stop the running Vms while snapshot on volume in progress + """ + + # Validate the following + # a. check all snapshots jobs are running concurrently on back grounds + # b. listSnapshots should list this newly created snapshot. + + self.debug("Create virtual machine and snapshot on ROOT disk volume") + self.create_Snaphot_Stop_VM() + + self.debug("Verify whether snapshots were created properly or not?") + self.verify_Snapshots() + return + + @attr(speed="slow") + @attr(tags=["advanced", "advancedns"]) + @attr(configuration='concurrent.snapshots.threshold.perhost') + def test_03_concurrent_snapshots_create_template(self): + """Test while parent concurrent snapshot job in progress,create + template from completed snapshot + + 1.Configure the concurrent.snapshots.threshold.perhost=3 + 2.Deploy a Linux VM using default CentOS template, use small + service offering, disk offering + 3.Perform snapshot on root disk of this newly created VMs(10 vms) + 4.while parent concurrent snapshot job in progress,create template + from completed snapshot""" + + # Validate the following + # a.Able to create Template from snapshots + # b.check all snapshots jobs are running concurrently on back grounds + # c.listSnapshots should list this newly created snapshot. + + self.debug("Create virtual machine and snapshot on ROOT disk") + self.create_Snapshot_VM() + + self.debug("Verify whether snapshots were created properly or not?") + self.verify_Snapshots() + + self.debug("Fetch the list of snapshots belong to account: %s" % + self.account.name) + snapshots = self.get_Snapshots_For_Account( + self.account.name, + self.account.domainid) + jobs = [] + for snapshot in snapshots: + self.debug("Create a template from snapshot: %s" % snapshot.name) + jobs.append(self.create_Template_from_Snapshot(snapshot)) + + # Verify IO usage by submitting the concurrent jobs + self.testClient.submitCmdsAndWait(jobs) + + self.debug("Verifying if templates are created properly or not?") + templates = Template.list( + self.apiclient, + templatefilter=self.services["template"]["templatefilter"], + account=self.account.name, + domainid=self.account.domainid, + listall=True) + self.assertNotEqual(templates, + None, + "Check if result exists in list item call") + for template in templates: + self.assertEqual(template.isready, + True, + "Check new template state in list templates call") + + self.debug("Test completed successfully.") + return + + @attr(speed="slow") + @attr(tags=["advanced", "advancedns"]) + @attr(configuration='concurrent.snapshots.threshold.perhost') + def test_04_concurrent_snapshots_create_volume(self): + """Test while parent concurrent snapshot job in progress,create volume + from completed snapshot + + 1.Configure the concurrent.snapshots.threshold.perhost=3 + 2.Deploy a Linux VM using default CentOS template, use small + service offering, disk offering. + 3.Perform snapshot on root disk of this newly created VM + 4.while parent concurrent snapshot job in progress,create volume + from completed snapshot""" + + # Validate the following + # a.Able to create Volume from snapshots + # b.check all snapshots jobs are running concurrently on back grounds + # c.listSnapshots should list this newly created snapshot. + + self.debug("Create virtual machine and snapshot on ROOT disk thread") + self.create_Snapshot_VM() + + self.debug("Verify whether snapshots were created properly or not?") + self.verify_Snapshots() + + self.debug("Fetch the list of snapshots belong to account: %s" % + self.account.name) + snapshots = self.get_Snapshots_For_Account( + self.account.name, + self.account.domainid) + jobs = [] + for snapshot in snapshots: + self.debug("Create a volume from snapshot: %s" % snapshot.name) + jobs.append(self.create_Volume_from_Snapshot(snapshot)) + + # Verify IO usage by submitting the concurrent jobs + self.testClient.submitCmdsAndWait(jobs) + + self.debug("Verifying if volume created properly or not?") + volumes = Volume.list(self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True, + type='ROOT') + + self.assertNotEqual(volumes, + None, + "Check if result exists in list item call") + for volume in volumes: + self.debug("Volume: %s, state: %s" % (volume.name, volume.state)) + self.assertEqual(volume.state, + "Ready", + "Check new volume state in list volumes call") + + self.debug("Test completed successfully.") + return diff --git a/test/integration/component/test_stopped_vm.py b/test/integration/component/test_stopped_vm.py index 7903f0e8cbf..3be915166ee 100644 --- a/test/integration/component/test_stopped_vm.py +++ b/test/integration/component/test_stopped_vm.py @@ -84,9 +84,9 @@ class Services: "mode": 'HTTP_DOWNLOAD', # Downloading existing ISO }, "template": { - "url": "http://download.cloud.com/releases/2.0.0/UbuntuServer-10-04-64bit.vhd.bz2", - "hypervisor": 'XenServer', - "format": 'VHD', + "url": "", + "hypervisor": '', + "format": '', "isfeatured": True, "ispublic": True, "isextractable": True, @@ -647,7 +647,7 @@ class TestDeployVM(cloudstackTestCase): self.debug("Successfully created ISO with ID: %s" % iso.id) try: iso.download(self.apiclient) - self.cleanup.append(iso) + except Exception as e: self.fail("Exception while downloading ISO %s: %s"\ % (iso.id, e)) @@ -1092,9 +1092,9 @@ class TestDeployHaEnabledVM(cloudstackTestCase): domainid=self.account.domainid ) try: - # Dowanload the ISO + # Download the ISO self.iso.download(self.apiclient) - self.cleanup.append(self.iso) + except Exception as e: raise Exception("Exception while downloading ISO %s: %s"\ % (self.iso.id, e)) @@ -1522,6 +1522,13 @@ class TestDeployVMFromTemplate(cloudstackTestCase): self.services["account"], domainid=self.domain.id ) + + builtin_info = get_builtin_template_info(self.apiclient, self.zone.id) + self.services["template"]["url"] = builtin_info[0] + self.services["template"]["hypervisor"] = builtin_info[1] + self.services["template"]["format"] = builtin_info[2] + + # Register new template self.template = Template.register( self.apiclient, self.services["template"], @@ -1529,6 +1536,11 @@ class TestDeployVMFromTemplate(cloudstackTestCase): account=self.account.name, domainid=self.account.domainid ) + self.debug( + "Registered a template of format: %s with ID: %s" % ( + self.services["template"]["format"], + self.template.id + )) try: self.template.download(self.apiclient) except Exception as e: diff --git a/test/integration/component/test_vpc_network.py b/test/integration/component/test_vpc_network.py index 970a6254c85..b9b4f0049f6 100644 --- a/test/integration/component/test_vpc_network.py +++ b/test/integration/component/test_vpc_network.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. -""" Component tests for VPC network functionality +""" Component tests for VPC network functionality - with and without Netscaler (Netscaler tests will be skipped if Netscaler configuration fails) """ #Import Local Modules import marvin @@ -27,7 +27,8 @@ from marvin.integration.lib.base import * from marvin.integration.lib.common import * from marvin.remoteSSHClient import remoteSSHClient import datetime - +# For more info on ddt refer to http://ddt.readthedocs.org/en/latest/api.html#module-ddt +from ddt import ddt, data class Services: """Test VPC network services @@ -74,6 +75,7 @@ class Services: "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, }, }, + # Offering that uses Netscaler as provider for LB inside VPC, dedicated = false "network_off_netscaler": { "name": 'Network offering-netscaler', "displaytext": 'Network offering-netscaler', @@ -96,6 +98,37 @@ class Services: "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, }, }, + # Offering that uses Netscaler as provider for LB in VPC, dedicated = True + # This offering is required for the tests that use Netscaler as external LB provider in VPC + "network_offering_vpcns": { + "name": 'VPC Network offering', + "displaytext": 'VPC Network off', + "guestiptype": 'Isolated', + "supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL', + "traffictype": 'GUEST', + "availability": 'Optional', + "useVpc": 'on', + "serviceProviderList": { + "Vpn": 'VpcVirtualRouter', + "Dhcp": 'VpcVirtualRouter', + "Dns": 'VpcVirtualRouter', + "SourceNat": 'VpcVirtualRouter', + "PortForwarding": 'VpcVirtualRouter', + "Lb": 'Netscaler', + "UserData": 'VpcVirtualRouter', + "StaticNat": 'VpcVirtualRouter', + "NetworkACL": 'VpcVirtualRouter' + }, + "serviceCapabilityList": { + "SourceNat": { + "SupportedSourceNatTypes": "peraccount" + }, + "lb": { + "SupportedLbIsolation": "dedicated" + }, + }, + }, + "network_off_shared": { "name": 'Shared Network offering', "displaytext": 'Shared Network offering', @@ -116,6 +149,19 @@ class Services: "displaytext": "TestVPC", "cidr": '10.0.0.1/24' }, + # Netscaler should be added as a dedicated device for it to work as external LB provider in VPC + "netscaler": { + "ipaddress": '10.102.192.50', + "username": 'nsroot', + "password": 'nsroot', + "networkdevicetype": 'NetscalerVPXLoadBalancer', + "publicinterface": '1/3', + "privateinterface": '1/4', + "numretries": 2, + "lbdevicededicated": True, + "lbdevicecapacity": 50, + "port": 22, + }, "network": { "name": "Test Network", "displaytext": "Test Network", @@ -172,7 +218,7 @@ class Services: "timeout": 10, } - +@ddt class TestVPCNetwork(cloudstackTestCase): @classmethod @@ -182,6 +228,9 @@ class TestVPCNetwork(cloudstackTestCase): cls ).getClsTestClient().getApiClient() cls.services = Services().services + # Added an attribute to track if Netscaler addition was successful. + # Value is checked in tests and if not configured, Netscaler tests will be skipped + cls.ns_configured = False # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client, cls.services) cls.zone = get_zone(cls.api_client, cls.services) @@ -199,12 +248,15 @@ class TestVPCNetwork(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls._cleanup.append(cls.vpc_off) - cls.vpc_off.update(cls.api_client, state='Enabled') + # Configure Netscaler device + # If configuration succeeds, set ns_configured to True so that Netscaler tests are executed + try: + cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.services["netscaler"]) + cls._cleanup.append(cls.netscaler) + cls.debug("Netscaler configured") + cls.ns_configured = True + except Exception as e: + cls.debug("Warning: Couldn't configure Netscaler: %s" % e) return @classmethod @@ -287,38 +339,42 @@ class TestVPCNetwork(cloudstackTestCase): ) self.debug("VPC network validated - %s" % network.name) return - + + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_01_create_network(self): + def test_01_create_network(self, value): """ Test create network in VPC """ # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering. - # 3. Create a network offering with guest type=Isolated" that has + # 1. Create a VPC using Default Offering + # 2. Create a network offering with guest type=Isolated" that has # all of supported Services(Vpn,dhcpdns,UserData, SourceNat,Static - # NAT,LB and PF,LB,NetworkAcl ) provided by VPCVR and conserver + # NAT,LB and PF,LB,NetworkAcl ) provided by VPCVR and conserve # mode is ON - # 4. Create a VPC using the above VPC offering. - # 5. Create a network using the network offering created in step2 as + # 3. Create a network tier using the network offering created in step2 as # part of this VPC. + # 4. Validate Network is created + # 5. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -332,7 +388,7 @@ class TestVPCNetwork(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -380,38 +436,42 @@ class TestVPCNetwork(cloudstackTestCase): ) return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_02_create_network_fail(self): + def test_02_create_network_fail(self, value): """ Test create network in VPC mismatched services (Should fail) """ - + # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering. - # 3. Create a network offering with guest type=Isolated" that has - # one of supported Services(Vpn,dhcpdns,UserData, SourceNat,Static - # NAT,LB and PF,LB,NetworkAcl ) provided by VPCVR and conserver - # mode is ON - # 4. Create a VPC using the above VPC offering. - # 5. Create a network using the network offering created in step2 as + # 1. Create a VPC using Default VPC Offering + # 2. Create a network offering with guest type=Isolated" that has + # one of supported Services(Vpn,dhcpdns,UserData, Static + # NAT,LB and PF,LB,NetworkAcl ) provided by VPCVR, SourceNat by VR + # and conserve mode is ON + # 3. Create a network using the network offering created in step2 as # part of this VPC. - # 6. Network creation should fail + # 4. Network creation should fail since SourceNat offered by VR instead of VPCVR + # 5. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) + self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -423,13 +483,12 @@ class TestVPCNetwork(cloudstackTestCase): ) self.validate_vpc_network(vpc) - self.services["network_offering"]["supportedservices"] = 'SourceNat' - self.services["network_offering"]["serviceProviderList"] = { + self.services[value]["serviceProviderList"] = { "SourceNat": 'VirtualRouter', } self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -452,127 +511,41 @@ class TestVPCNetwork(cloudstackTestCase): ) return - @attr(tags=["netscaler", "intervlan"]) - def test_03_create_network_netscaler(self): - """ Test create network using netscaler for LB - """ - - # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering - # 3. Create a network offering with guest type="Isolated that has - # LB services provided by Netscaler and all other services - # provided by VPCVR and conserver mode is "ON" - # 4. Create a VPC using the above VPC offering. - # 5. Create a network using the network offering created in step2 as - # part of this VPC - - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) - self.services["vpc"]["cidr"] = '10.1.1.1/16' - vpc = VPC.create( - self.apiclient, - self.services["vpc"], - vpcofferingid=vpc_off.id, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid - ) - self.validate_vpc_network(vpc) - - self.network_offering = NetworkOffering.create( - self.apiclient, - self.services["network_off_netscaler"], - conservemode=False - ) - # Enable Network offering - self.network_offering.update(self.apiclient, state='Enabled') - self.cleanup.append(self.network_offering) - - # Creating network using the network offering created - self.debug("Creating network with network offering: %s" % - self.network_offering.id) - network = Network.create( - self.apiclient, - self.services["network"], - accountid=self.account.name, - domainid=self.account.domainid, - networkofferingid=self.network_offering.id, - zoneid=self.zone.id, - gateway='10.1.1.1', - vpcid=vpc.id - ) - self.debug("Created network with ID: %s" % network.id) - self.debug( - "Verifying list network response to check if network created?") - networks = Network.list( - self.apiclient, - id=network.id, - listall=True - ) - self.assertEqual( - isinstance(networks, list), - True, - "List networks should return a valid response" - ) - nw = networks[0] - - self.assertEqual( - nw.networkofferingid, - self.network_offering.id, - "Network should be created from network offering - %s" % - self.network_offering.id - ) - self.assertEqual( - nw.vpcid, - vpc.id, - "Network should be created in VPC: %s" % vpc.name - ) - return - + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_04_create_multiple_networks_with_lb(self): + def test_04_create_multiple_networks_with_lb(self, value): """ Test create multiple networks with LB service (Should fail) """ # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering - # 3. Create a network offering with guest type=Isolated that has LB - # services Enabled and conserver mode is "ON". - # 4. Create a network using the network offering created in step3 as + # 1. Create a VPC using Default Offering + # 2. Create a network offering with guest type=Isolated that has LB + # services Enabled and conserve mode is "ON". + # 3. Create a network using the network offering created in step2 as # part of this VPC. - # 5. Create another network using the network offering created in + # 4. Create another network using the network offering created in # step3 as part of this VPC + # 5. Create Network should fail + # 6. Repeat test for offering which has Netscaler as external LB provider + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -586,7 +559,7 @@ class TestVPCNetwork(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -654,30 +627,20 @@ class TestVPCNetwork(cloudstackTestCase): """ # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering - # 3. Create a network offering with guest type=Isolated that has LB - # services Enabled and conserver mode is "ON". - # 4. Create a network using the network offering created in step3 as - # part of this VPC. - # 5. Create another network using the network offering created in - # step3 as part of this VPC + # 1.Create a VPC using Default Offering (Without Netscaler) + # 2. Create a network offering with guest type=Isolated that has LB + # service provided by netscaler and conserve mode is "ON". + # 3. Create a network using this network offering as part of this VPC. + # 4. Create Network should fail since it doesn't match the VPC offering - self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -689,12 +652,29 @@ class TestVPCNetwork(cloudstackTestCase): ) self.validate_vpc_network(vpc) + self.network_offering = NetworkOffering.create( + self.apiclient, + self.services["network_offering_vpcns"], + conservemode=False + ) + # Enable Network offering + self.network_offering.update(self.apiclient, state='Enabled') + self.cleanup.append(self.network_offering) + + # Creating network using the network offering created + self.debug("Creating network with network offering: %s" % + self.network_offering.id) with self.assertRaises(Exception): - NetworkOffering.create( - self.apiclient, - self.services["network_off_netscaler"], - conservemode=False - ) + Network.create( + self.apiclient, + self.services["network"], + accountid=self.account.name, + domainid=self.account.domainid, + networkofferingid=self.network_offering.id, + zoneid=self.zone.id, + gateway='10.1.1.1', + vpcid=vpc.id + ) self.debug("Network creation failed") return @@ -779,16 +759,13 @@ class TestVPCNetwork(cloudstackTestCase): """ # Validate the following - # 1. Create VPC Offering by specifying supported Services - - # Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # with out including LB services. + # 1. Create VPC Offering without LB service # 2. Create a VPC using the above VPC offering # 3. Create a network offering with guest type=Isolated that has all # supported Services(Vpn,dhcpdns,UserData, SourceNat,Static NAT,LB - # and PF,LB,NetworkAcl ) provided by VPCVR and conserver mode is OFF - # 4. Create a VPC using the above VPC offering - # 5. Create a network using the network offering created in step2 as - # part of this VPC. + # and PF,LB,NetworkAcl ) provided by VPCVR and conserve mode is OFF + # 4. Create Network with the above offering + # 5. Create network fails since VPC offering doesn't support LB self.debug("Creating a VPC offering without LB service") self.services["vpc_offering"]["supportedservices"] = 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,UserData,StaticNat' @@ -907,36 +884,39 @@ class TestVPCNetwork(cloudstackTestCase): ) self.debug("Network creation failed as VPC doesn't have LB service") return - + + @data("network_off_shared", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_09_create_network_shared_nwoff(self): + def test_09_create_network_shared_nwoff(self, value): """ Test create network with shared network offering """ # Validate the following - # 1. Create VPC Offering by specifying supported Services - - # Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # with out including LB services + # 1. Create VPC Offering using Default Offering # 2. Create a VPC using the above VPC offering # 3. Create a network offering with guest type=shared - # 4. Create a VPC using the above VPC offering - # 5. Create a network using the network offering created in step2 - # as part of this VPC + # 4. Create a network using the network offering created in step3 as part of this VPC + # 5. Create network fails since it using shared offering + # 6. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_off_shared"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -977,59 +957,32 @@ class TestVPCNetwork(cloudstackTestCase): self.debug("Network creation failed") return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_10_create_network_with_conserve_mode(self): + def test_10_create_network_with_conserve_mode(self, value): """ Test create network with conserve mode ON """ # Validate the following - # 1. Create VPC Offering by specifying all supported Services - # (Vpn,dhcpdns,UserData, SourceNat,Static NAT and PF,LB,NetworkAcl) - # 2. Create a VPC using the above VPC offering - # 3. Create a network offering with guest type=Isolated that has all + # 1. Create a network offering with guest type=Isolated that has all # supported Services(Vpn,dhcpdns,UserData, SourceNat,Static NAT,LB - # and PF,LB,NetworkAcl ) provided by VPCVR and conserver mode is ON - # 4. Create a VPC using the above VPC offering - # 5. Create a network using the network offering created in step2 as - # part of this VPC - - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) - self.services["vpc"]["cidr"] = '10.1.1.1/16' - vpc = VPC.create( - self.apiclient, - self.services["vpc"], - vpcofferingid=vpc_off.id, - zoneid=self.zone.id, - account=self.account.name, - domainid=self.account.domainid - ) - self.validate_vpc_network(vpc) + # and PF,LB,NetworkAcl ) provided by VPCVR and conserve mode is ON + # 2. Create offering fails since Conserve mode ON isn't allowed within VPC + # 3. Repeat test for offering which has Netscaler as external LB provider self.debug("Creating network offering with conserve mode = ON") with self.assertRaises(Exception): NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=True ) self.debug( "Network creation failed as VPC support nw with conserve mode OFF") return +@ddt class TestVPCNetworkRanges(cloudstackTestCase): @classmethod @@ -1039,6 +992,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): cls ).getClsTestClient().getApiClient() cls.services = Services().services + # Added an attribute to track if Netscaler addition was successful. + # Value is checked in tests and if not configured, Netscaler tests will be skipped + cls.ns_configured = False # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client, cls.services) cls.zone = get_zone(cls.api_client, cls.services) @@ -1056,12 +1012,14 @@ class TestVPCNetworkRanges(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls.vpc_off.update(cls.api_client, state='Enabled') - cls._cleanup.append(cls.vpc_off) + # Configure Netscaler device + # If configuration succeeds, set ns_configured to True so that Netscaler tests are executed + try: + cls.netscaler = add_netscaler(cls.api_client, cls.zone.id, cls.services["netscaler"]) + cls._cleanup.append(cls.netscaler) + cls.ns_configured = True + except Exception as e: + cls.debug("Warning: Couldn't configure Netscaler: %s" % e) return @classmethod @@ -1091,7 +1049,6 @@ class TestVPCNetworkRanges(cloudstackTestCase): cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.debug("Warning: Exception during cleanup : %s" % e) - #raise Exception("Warning: Exception during cleanup : %s" % e) return def validate_vpc_offering(self, vpc_offering): @@ -1144,8 +1101,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.debug("VPC network validated - %s" % network.name) return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_01_create_network_outside_range(self): + def test_01_create_network_outside_range(self, value): """ Test create network outside cidr range of VPC """ @@ -1153,20 +1111,26 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Add network1 with cidr - 10.2.1.1/24 to this VPC # 3. Network creation should fail. + # 4. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -1182,7 +1146,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1215,6 +1179,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Add network1 with cidr - 10.2.1.1/24 to this VPC # 3. Network creation should fail. + # 4. Repeat test for offering which has Netscaler as external LB provider self.debug("Creating a VPC offering") vpc_off = VpcOffering.create( @@ -1268,8 +1233,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network cidr range is outside of vpc") return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_03_create_network_inside_range(self): + def test_03_create_network_inside_range(self, value): """ Test create network inside cidr range of VPC """ @@ -1277,18 +1243,25 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Add network1 with cidr - 10.1.1.1/8 to this VPC # 3. Network creation should fail. + # 4. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' @@ -1306,7 +1279,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1333,8 +1306,9 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network cidr range is inside of vpc") return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_04_create_network_overlapping_range(self): + def test_04_create_network_overlapping_range(self, value): """ Test create network overlapping cidr range of VPC """ @@ -1344,20 +1318,26 @@ class TestVPCNetworkRanges(cloudstackTestCase): # 3. Add network2 with cidr - 10.1.1.1/24 to this VPC # 4. Add network3 with cidr - 10.1.1.1/26 to this VPC # 5. Network creation in step 3 & 4 should fail. + # 6. Repeat test for offering which has Netscaler as external LB provider + self.services = Services().services + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( @@ -1374,7 +1354,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1457,27 +1437,36 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as network range overlaps each other") return + @data("network_offering", "network_offering_vpcns") @attr(tags=["advanced", "intervlan"]) - def test_05_create_network_diff_account(self): + def test_05_create_network_diff_account(self, value): """ Test create network from different account in VPC """ # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Add network1 with cidr - 10.1.1.1/24 to this VPC - # 3. Network creation should fail. + # 3. Create another account + # 4. Create network using this account - Network creation should fail + # 5. Repeat test for offering which has Netscaler as external LB provider - self.debug("Creating a VPC offering") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) + if (value == "network_offering_vpcns" and self.ns_configured == False): + self.skipTest('Netscaler not configured: skipping test') - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) - - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') + if (value == "network_offering"): + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + else: + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering with Netscaler', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) self.debug("creating a VPC network with cidr: 10.1.1.1/16") self.services["vpc"]["cidr"] = '10.1.1.1/16' @@ -1495,7 +1484,7 @@ class TestVPCNetworkRanges(cloudstackTestCase): self.network_offering = NetworkOffering.create( self.apiclient, - self.services["network_offering"], + self.services[value], conservemode=False ) # Enable Network offering @@ -1532,7 +1521,6 @@ class TestVPCNetworkRanges(cloudstackTestCase): "Network creation failed as VPC belongs to different account") return - class TestVPCNetworkUpgrade(cloudstackTestCase): @classmethod @@ -1559,12 +1547,7 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): cls.services["service_offering"] ) cls._cleanup.append(cls.service_offering) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - cls.vpc_off.update(cls.api_client, state='Enabled') - cls._cleanup.append(cls.vpc_off) + return @classmethod @@ -1594,7 +1577,6 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): cleanup_resources(self.apiclient, self.cleanup) except Exception as e: self.debug("Warning: Exception during cleanup : %s" % e) - #raise Exception("Warning: Exception during cleanup : %s" % e) return def validate_vpc_offering(self, vpc_offering): @@ -1646,7 +1628,7 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): ) self.debug("VPC network validated - %s" % network.name) return - + @attr(tags=["advanced", "intervlan"]) def test_01_network_services_upgrade(self): """ Test update Network that is part of a VPC to a network offering that has more services @@ -1666,19 +1648,15 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): # 8. Update network1 to NO2. self.debug("Creating a VPC offering..") - vpc_off = VpcOffering.create( - self.apiclient, - self.services["vpc_offering"] - ) - self.cleanup.append(vpc_off) - self.validate_vpc_offering(vpc_off) + vpc_off_list=VpcOffering.list( + self.apiclient, + name='Default VPC offering', + listall=True + ) + vpc_off=vpc_off_list[0] + self.debug("Creating a VPC with offering: %s" % vpc_off.id) - self.debug("Enabling the VPC offering created") - vpc_off.update(self.apiclient, state='Enabled') - - self.debug("creating a VPC network in the account: %s" % - self.account.name) self.services["vpc"]["cidr"] = '10.1.1.1/16' vpc = VPC.create( self.apiclient, @@ -2112,7 +2090,6 @@ class TestVPCNetworkUpgrade(cloudstackTestCase): ) return - class TestVPCNetworkGc(cloudstackTestCase): @classmethod @@ -2354,26 +2331,16 @@ class TestVPCNetworkGc(cloudstackTestCase): self.debug("Waiting for network garbage collection thread to run") # Wait for the network garbage collection thread to run wait_for_cleanup(self.apiclient, - ["network.gc.interval", "network.gc.wait"]*2) - self.debug("Check if the VPC router is in stopped state?") - routers = Router.list( - self.apiclient, - account=self.account.name, - domainid=self.account.domainid, - listall=True - ) - self.assertEqual( - isinstance(routers, list), - True, - "List routers shall return a valid response" - ) - router = routers[0] - # TODO: Add some more assertions - self.assertEqual( - router.state, - "Stopped", - "Router state should be stopped after network gc" - ) + ["network.gc.interval", "network.gc.wait"]) + + #Bug???: Network Acls are not cleared + netacls = NetworkACL.list(self.apiclient, networkid=self.network_1.id) + self.debug("List of NetACLS %s" % netacls) + self.assertEqual(netacls, None, "Netacls were not cleared after network GC thread is run") + + lbrules = LoadBalancerRule.list(self.apiclient, networkid=self.network_1.id) + self.debug("List of LB Rules %s" % lbrules) + self.assertEqual(lbrules, None, "LBrules were not cleared after network GC thread is run") return @attr(tags=["advanced", "intervlan"]) diff --git a/test/integration/component/test_vpc_vm_life_cycle.py b/test/integration/component/test_vpc_vm_life_cycle.py index 9844c1f8922..425c2848f96 100644 --- a/test/integration/component/test_vpc_vm_life_cycle.py +++ b/test/integration/component/test_vpc_vm_life_cycle.py @@ -189,7 +189,6 @@ class Services: "mode": 'advanced' } - class TestVMLifeCycleVPC(cloudstackTestCase): @classmethod @@ -927,13 +926,6 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): domainid=cls.domain.id ) - cls.vpc_off = VpcOffering.create( - cls.api_client, - cls.services["vpc_offering"] - ) - - cls.vpc_off.update(cls.api_client, state='Enabled') - cls.services["vpc"]["cidr"] = '10.1.1.1/16' cls.vpc = VPC.create( cls.api_client, @@ -984,6 +976,10 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): cls.services["network"]["physicalnetworkid"] = physical_network.id cls.services["network"]["vlan"] = shared_vlan + # Start Ip and End Ip should be specified for shared network + cls.services["network"]["startip"] = '10.1.2.20' + cls.services["network"]["endip"] = '10.1.2.30' + # Creating network using the network offering created cls.network_2 = Network.create( cls.api_client, @@ -994,7 +990,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): zoneid=cls.zone.id, gateway='10.1.2.1', ) - # Spawn an instance in that network + cls.vm_1 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1004,7 +1000,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) - # Spawn an instance in that network + cls.vm_2 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1014,6 +1010,8 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) + + cls.vm_3 = VirtualMachine.create( cls.api_client, cls.services["virtual_machine"], @@ -1023,6 +1021,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): networkids=[str(cls.network_1.id), str(cls.network_2.id)] ) + cls.public_ip_1 = PublicIPAddress.create( cls.api_client, accountid=cls.account.name, @@ -1040,7 +1039,10 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): vpcid=cls.vpc.id, domainid=cls.account.domainid ) - cls.lb_rule.assign(cls.api_client, [cls.vm_1, cls.vm_2, cls.vm_3]) + + # Only the vms in the same network can be added to load balancing rule + # hence we can't add vm_2 with vm_1 + cls.lb_rule.assign(cls.api_client, [cls.vm_1]) cls.public_ip_2 = PublicIPAddress.create( cls.api_client, @@ -1084,16 +1086,20 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ) cls._cleanup = [ cls.account, - cls.service_offering, + cls.network_2, cls.nw_off, cls.shared_nw_off, - cls.vpc_off + cls.vpc_off, + cls.service_offering, ] return @classmethod def tearDownClass(cls): try: + cls.vpc_off.update(cls.api_client, state='Disabled') + cls.shared_nw_off.update(cls.api_client, state='Disabled') + cls.nw_off.update(cls.api_client, state='Disabled') cleanup_resources(cls.api_client, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) @@ -1381,6 +1387,9 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): except Exception as e: self.fail("Failed to destroy the virtual instances, %s" % e) + #Wait for expunge interval to cleanup VM + wait_for_cleanup(self.apiclient, ["expunge.delay", "expunge.interval"]) + self.debug("Check if the instance is in stopped state?") vms = VirtualMachine.list( self.apiclient, @@ -1388,15 +1397,9 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): listall=True ) self.assertEqual( - isinstance(vms, list), - True, - "List virtual machines should return a valid list" - ) - vm = vms[0] - self.assertEqual( - vm.state, - "Expunging", - "Virtual machine should be in expunging state" + vms, + None, + "List virtual machines should not return anything" ) self.debug("Validating if network rules are coonfigured properly?") @@ -1652,7 +1655,7 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ["expunge.interval", "expunge.delay"] ) - # Check if the network rules still exists after Vm expunged + # Check if the network rules still exists after Vm expunged self.debug("Checking if NAT rules existed ") with self.assertRaises(Exception): nat_rules = NATRule.list( @@ -1668,7 +1671,6 @@ class TestVMLifeCycleSharedNwVPC(cloudstackTestCase): ) return - class TestVMLifeCycleBothIsolated(cloudstackTestCase): @classmethod @@ -2004,7 +2006,6 @@ class TestVMLifeCycleBothIsolated(cloudstackTestCase): ) return - class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase): @classmethod @@ -2690,7 +2691,7 @@ class TestVMLifeCycleStoppedVPCVR(cloudstackTestCase): ["expunge.interval", "expunge.delay"] ) - # Check if the network rules still exists after Vm expunged + # Check if the network rules still exists after Vm expunged self.debug("Checking if NAT rules existed ") with self.assertRaises(Exception): nat_rules = NATRule.list( diff --git a/test/integration/smoke/test_network.py b/test/integration/smoke/test_network.py index 042ac84ae53..f2045959697 100644 --- a/test/integration/smoke/test_network.py +++ b/test/integration/smoke/test_network.py @@ -464,7 +464,9 @@ class TestPortForwarding(cloudstackTestCase): src_nat_ip_addr.ipaddress, self.virtual_machine.ssh_port, self.virtual_machine.username, - self.virtual_machine.password + self.virtual_machine.password, + retries=2, + delay=0 ) return @@ -580,7 +582,9 @@ class TestPortForwarding(cloudstackTestCase): ip_address.ipaddress.ipaddress, self.virtual_machine.ssh_port, self.virtual_machine.username, - self.virtual_machine.password + self.virtual_machine.password, + retries=2, + delay=0 ) return @@ -883,7 +887,9 @@ class TestReleaseIP(cloudstackTestCase): self.ip_addr.ipaddress, self.services["natrule"]["publicport"], self.virtual_machine.username, - self.virtual_machine.password + self.virtual_machine.password, + retries=2, + delay=0 ) return diff --git a/test/integration/smoke/test_routers.py b/test/integration/smoke/test_routers.py index 02686664ded..0f32e274338 100644 --- a/test/integration/smoke/test_routers.py +++ b/test/integration/smoke/test_routers.py @@ -201,16 +201,17 @@ class TestRouterServices(cloudstackTestCase): router.linklocalip, "service dnsmasq status" ) - res = str(result) - self.debug("Dnsmasq process status: %s" % res) - self.assertEqual( - res.count("running"), - 1, - "Check dnsmasq service is running or not" - ) except KeyError: self.skipTest("Marvin configuration has no host credentials to check router services") + res = str(result) + self.debug("Dnsmasq process status: %s" % res) + + self.assertEqual( + res.count("running"), + 1, + "Check dnsmasq service is running or not" + ) return diff --git a/test/integration/smoke/test_snapshots.py b/test/integration/smoke/test_snapshots.py index 06777e4eb60..6ee7c6271c9 100644 --- a/test/integration/smoke/test_snapshots.py +++ b/test/integration/smoke/test_snapshots.py @@ -269,77 +269,5 @@ class TestSnapshotRootDisk(cloudstackTestCase): "Check if backup_snap_id is not null" ) - # Get the Secondary Storage details from list Hosts - hosts = list_hosts( - self.apiclient, - type='SecondaryStorage', - zoneid=self.zone.id - ) - self.assertEqual( - isinstance(hosts, list), - True, - "Check list response returns a valid list" - ) - uuids = [] - for host in hosts: - # hosts[0].name = "nfs://192.168.100.21/export/test" - parse_url = (host.name).split('/') - # parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test'] - - # Split IP address and export path from name - sec_storage_ip = parse_url[2] - # Sec Storage IP: 192.168.100.21 - - export_path = '/'.join(parse_url[3:]) - # Export path: export/test - - try: - # Login to VM to check snapshot present on sec disk - ssh_client = self.virtual_machine_with_disk.get_ssh_client() - - cmds = [ - "mkdir -p %s" % self.services["paths"]["mount_dir"], - "mount %s/%s %s" % ( - sec_storage_ip, - export_path, - self.services["paths"]["mount_dir"] - ), - "ls %s/snapshots/%s/%s" % ( - self.services["paths"]["mount_dir"], - account_id, - volume_id - ), - ] - - for c in cmds: - self.debug(c) - result = ssh_client.execute(c) - self.debug(result) - - except Exception: - self.fail("SSH failed for Virtual machine: %s" % - self.virtual_machine_with_disk.ipaddress) - - uuids.append(result) - # Unmount the Sec Storage - cmds = [ - "umount %s" % (self.services["mount_dir"]), - ] - try: - for c in cmds: - self.debug(c) - result = ssh_client.execute(c) - self.debug(result) - - except Exception as e: - self.fail("SSH failed for Virtual machine: %s" % - self.virtual_machine_with_disk.ipaddress) - - res = str(uuids) - # Check snapshot UUID in secondary storage and database - self.assertEqual( - res.count(snapshot_uuid), - 1, - "Check snapshot UUID in secondary storage and database" - ) - return \ No newline at end of file + self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, snapshot.id)) + return diff --git a/test/integration/smoke/test_vpc_vpn.py b/test/integration/smoke/test_vpc_vpn.py new file mode 100644 index 00000000000..c360884ce4c --- /dev/null +++ b/test/integration/smoke/test_vpc_vpn.py @@ -0,0 +1,192 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" Tests for VPN in VPC +""" +#Import Local Modules +from marvin.cloudstackTestCase import * +from marvin.cloudstackAPI import * +from marvin.integration.lib.utils import * +from marvin.integration.lib.base import * +from marvin.integration.lib.common import * +from nose.plugins.attrib import attr + +class Services: + def __init__(self): + self.services = { + "account": { + "email": "test@test.com", + "firstname": "Test", + "lastname": "User", + "username": "test", + "password": "password", + }, + "virtual_machine": { + "displayname": "Test VM", + "username": "root", + "password": "password", + "ssh_port": 22, + "hypervisor": 'XenServer', + "privateport": 22, + "publicport": 22, + "protocol": 'TCP', + }, + "ostype": 'CentOS 5.3 (64-bit)', + "service_offering": { + "name": "Tiny Instance", + "displaytext": "Tiny Instance", + "cpunumber": 1, + "cpuspeed": 100, + "memory": 256, + }, + "network_offering": { + "name": "Network offering for internal vpc", + "displaytext": "Network offering for internal vpc", + "guestiptype": "Isolated", + "traffictype": "Guest", + "supportedservices": "Vpn,Dhcp,Dns,Lb,UserData,SourceNat,StaticNat,PortForwarding,NetworkACL", + "serviceProviderList": { + "Dhcp": "VpcVirtualRouter", + "Dns": "VpcVirtualRouter", + "Vpn": "VpcVirtualRouter", + "UserData": "VpcVirtualRouter", + "Lb": "InternalLbVM", + "SourceNat": "VpcVirtualRouter", + "StaticNat": "VpcVirtualRouter", + "PortForwarding": "VpcVirtualRouter", + "NetworkACL": "VpcVirtualRouter", + }, + "serviceCapabilityList": { + "SourceNat": {"SupportedSourceNatTypes": "peraccount"}, + "Lb": {"lbSchemes": "internal", "SupportedLbIsolation": "dedicated"} + } + }, + "vpn_user": { + "username": "test", + "password": "password", + } + } + + +class TestVpcVpn(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + cls.apiclient = super(TestVpcVpn, cls).getClsTestClient().getApiClient() + cls.services = Services().services + cls.zone = get_zone(cls.apiclient, cls.services) + cls.domain = get_domain(cls.apiclient) + cls.service_offering = ServiceOffering.create( + cls.apiclient, + cls.services["service_offering"] + ) + cls.account = Account.create(cls.apiclient, services=cls.services["account"]) + cls.template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"] + ) + cls.cleanup = [cls.account] + + @attr(tags=["advanced"]) + def test_vpc_vpn(self): + """Test VPN in VPC""" + + # 0) Get the default network offering for VPC + networkOffering = NetworkOffering.list(self.apiclient, name="DefaultIsolatedNetworkOfferingForVpcNetworks") + self.assert_(networkOffering is not None and len(networkOffering) > 0, "No VPC based network offering") + + # 1) Create VPC + vpcOffering = VpcOffering.list(self.apiclient,isdefault=True) + self.assert_(vpcOffering is not None and len(vpcOffering)>0, "No VPC offerings found") + self.services["vpc"] = {} + self.services["vpc"]["name"] = "vpc-vpn" + self.services["vpc"]["displaytext"] = "vpc-vpn" + self.services["vpc"]["cidr"] = "10.1.1.0/24" + vpc = VPC.create( + apiclient=self.apiclient, + services=self.services["vpc"], + networkDomain="vpc.vpn", + vpcofferingid=vpcOffering[0].id, + zoneid=self.zone.id, + account=self.account.name, + domainid=self.domain.id + ) + self.assert_(vpc is not None, "VPC creation failed") + + # 2) Create network in VPC + self.services["vpcnetwork"] = {} + self.services["vpcnetwork"]["name"] = "vpcntwk" + self.services["vpcnetwork"]["displaytext"] = "vpcntwk" + ntwk = Network.create( + apiclient=self.apiclient, + services=self.services["vpcnetwork"], + accountid=self.account.name, + domainid=self.domain.id, + networkofferingid=networkOffering[0].id, + zoneid=self.zone.id, + vpcid=vpc.id, + gateway="10.1.1.1", + netmask="255.255.255.192" + ) + self.assertIsNotNone(ntwk, "Network failed to create") + self.debug("Network %s created in VPC %s" %(ntwk.id, vpc.id)) + + # 3) Deploy a vm + self.services["virtual_machine"]["networkids"] = ntwk.id + vm = VirtualMachine.create(self.apiclient, services=self.services["virtual_machine"], + templateid=self.template.id, + zoneid=self.zone.id, + accountid=self.account.name, + domainid= self.domain.id, + serviceofferingid=self.service_offering.id, + ) + self.assert_(vm is not None, "VM failed to deploy") + self.assert_(vm.state == 'Running', "VM is not running") + self.debug("VM %s deployed in VPC %s" %(vm.id, vpc.id)) + + # 4) Enable VPN for VPC + + src_nat_list = PublicIPAddress.list( + self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + listall=True, + issourcenat=True, + vpcid=vpc.id + ) + ip = src_nat_list[0] + vpn = Vpn.create(self.apiclient, + publicipid=ip.id, + account=self.account.name, + domainid=self.account.domainid) + + # 5) Add VPN user for VPC + vpnUser = VpnUser.create(self.apiclient, + account=self.account.name, + domainid=self.account.domainid, + username=self.services["vpn_user"]["username"], + password=self.services["vpn_user"]["password"]) + + # 6) Disable VPN for VPC + vpn.delete(self.apiclient) + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls.cleanup) + except Exception, e: + raise Exception("Cleanup failed with %s" % e) diff --git a/test/pom.xml b/test/pom.xml index eb6970571e1..d04a04541c8 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -34,8 +34,7 @@ log4j log4j - ${cs.log4j.version} - + junit junit @@ -44,12 +43,10 @@ com.trilead trilead-ssh2 - ${cs.trilead.version} - + log4j apache-log4j-extras - ${cs.log4j.extras.version} log4j diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index d9b87d42c25..0067155ac18 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -148,6 +148,7 @@ known_categories = { 'createSecondaryStagingStore': 'Image Store', 'deleteSecondaryStagingStore': 'Image Store', 'listSecondaryStagingStores': 'Image Store', + 'prepareSecondaryStorageForMigration' : 'Image Store', 'InternalLoadBalancer': 'Internal LB', 'DeploymentPlanners': 'Configuration', 'PortableIp': 'Portable IP', diff --git a/tools/appliance/build.sh b/tools/appliance/build.sh index f01cff6d281..61ee7936e67 100644 --- a/tools/appliance/build.sh +++ b/tools/appliance/build.sh @@ -33,9 +33,9 @@ rootdir=$PWD bundle # Clean and start building the appliance -veewee vbox destroy $appliance -veewee vbox build $appliance --nogui --auto -veewee vbox halt $appliance +bundle exec veewee vbox destroy $appliance +bundle exec veewee vbox build $appliance --nogui --auto +bundle exec veewee vbox halt $appliance while [[ `vboxmanage list runningvms | grep $appliance | wc -l` -ne 0 ]]; do diff --git a/tools/appliance/definitions/devcloud/base.sh b/tools/appliance/definitions/devcloud/base.sh index 122b3893c92..7fec0fc342d 100644 --- a/tools/appliance/definitions/devcloud/base.sh +++ b/tools/appliance/definitions/devcloud/base.sh @@ -6,7 +6,7 @@ apt-get -y update apt-get -y install curl unzip apt-get clean -echo 'cloud ALL=NOPASSWD:ALL' > /etc/sudoers.d/cloud +echo 'cloud ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount' > /etc/sudoers.d/cloud # Tweak sshd to prevent DNS resolution (speed up logins) echo 'UseDNS no' >> /etc/ssh/sshd_config diff --git a/tools/appliance/definitions/systemvmtemplate64/base.sh b/tools/appliance/definitions/systemvm64template/base.sh similarity index 85% rename from tools/appliance/definitions/systemvmtemplate64/base.sh rename to tools/appliance/definitions/systemvm64template/base.sh index d6faea04b41..46c5db6dcd1 100644 --- a/tools/appliance/definitions/systemvmtemplate64/base.sh +++ b/tools/appliance/definitions/systemvm64template/base.sh @@ -5,7 +5,7 @@ apt-get -y update apt-get -y install curl unzip # Set up sudo -echo 'vagrant ALL=NOPASSWD:ALL' > /etc/sudoers.d/vagrant +echo 'vagrant ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount' > /etc/sudoers.d/vagrant # Tweak sshd to prevent DNS resolution (speed up logins) echo 'UseDNS no' >> /etc/ssh/sshd_config diff --git a/tools/appliance/definitions/systemvmtemplate64/cleanup.sh b/tools/appliance/definitions/systemvm64template/cleanup.sh similarity index 100% rename from tools/appliance/definitions/systemvmtemplate64/cleanup.sh rename to tools/appliance/definitions/systemvm64template/cleanup.sh diff --git a/tools/appliance/definitions/systemvmtemplate64/definition.rb b/tools/appliance/definitions/systemvm64template/definition.rb similarity index 95% rename from tools/appliance/definitions/systemvmtemplate64/definition.rb rename to tools/appliance/definitions/systemvm64template/definition.rb index 30f2849978d..454145d3bd0 100644 --- a/tools/appliance/definitions/systemvmtemplate64/definition.rb +++ b/tools/appliance/definitions/systemvm64template/definition.rb @@ -1,7 +1,7 @@ Veewee::Definition.declare({ :cpu_count => '1', :memory_size=> '256', - :disk_size => '2000', :disk_format => 'VDI', :hostiocache => 'off', + :disk_size => '2500', :disk_format => 'VDI', :hostiocache => 'off', :os_type_id => 'Debian_64', :iso_file => "debian-7.0.0-amd64-netinst.iso", :iso_src => "http://cdimage.debian.org/mirror/cdimage/archive/7.0.0/amd64/iso-cd/debian-7.0.0-amd64-netinst.iso", diff --git a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh b/tools/appliance/definitions/systemvm64template/postinstall.sh similarity index 95% rename from tools/appliance/definitions/systemvmtemplate64/postinstall.sh rename to tools/appliance/definitions/systemvm64template/postinstall.sh index 53ae2bb0e5a..3755b525aab 100644 --- a/tools/appliance/definitions/systemvmtemplate64/postinstall.sh +++ b/tools/appliance/definitions/systemvm64template/postinstall.sh @@ -105,7 +105,7 @@ setup_accounts() { echo "root:$ROOTPW" | chpasswd echo "cloud:`openssl rand -base64 32`" | chpasswd sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=admin' /etc/sudoers - sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers + sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount/g' /etc/sudoers # Disable password based authentication via ssh, this will take effect on next reboot sed -i -e 's/^.*PasswordAuthentication .*$/PasswordAuthentication no/g' /etc/ssh/sshd_config # Secure ~/.ssh @@ -167,6 +167,7 @@ EOF fix_vhdutil() { wget --no-check-certificate http://download.cloud.com.s3.amazonaws.com/tools/vhd-util -O /bin/vhd-util + chmod a+x /bin/vhd-util } do_fixes() { @@ -203,12 +204,12 @@ configure_services() { cd /opt wget --no-check-certificate $snapshot_url -O cloudstack.tar.gz tar -zxvf cloudstack.tar.gz - cp -rv $snapshot_dir/patches/systemvm/debian/config/* / - cp -rv $snapshot_dir/patches/systemvm/debian/vpn/* / + cp -rv $snapshot_dir/systemvm/patches/debian/config/* / + cp -rv $snapshot_dir/systemvm/patches/debian/vpn/* / mkdir -p /usr/share/cloud/ - cd $snapshot_dir/patches/systemvm/debian/config + cd $snapshot_dir/systemvm/patches/debian/config tar -cvf /usr/share/cloud/cloud-scripts.tar * - cd $snapshot_dir/patches/systemvm/debian/vpn + cd $snapshot_dir/systemvm/patches/debian/vpn tar -rvf /usr/share/cloud/cloud-scripts.tar * cd /opt rm -fr $snapshot_dir cloudstack.tar.gz diff --git a/tools/appliance/definitions/systemvmtemplate64/preseed.cfg b/tools/appliance/definitions/systemvm64template/preseed.cfg similarity index 100% rename from tools/appliance/definitions/systemvmtemplate64/preseed.cfg rename to tools/appliance/definitions/systemvm64template/preseed.cfg diff --git a/tools/appliance/definitions/systemvmtemplate64/zerodisk.sh b/tools/appliance/definitions/systemvm64template/zerodisk.sh similarity index 89% rename from tools/appliance/definitions/systemvmtemplate64/zerodisk.sh rename to tools/appliance/definitions/systemvm64template/zerodisk.sh index 25bd8c4af2d..b00f7ae7ccc 100644 --- a/tools/appliance/definitions/systemvmtemplate64/zerodisk.sh +++ b/tools/appliance/definitions/systemvm64template/zerodisk.sh @@ -6,7 +6,7 @@ rm -fv .veewee_version .veewee_params .vbox_version echo "Cleaning up" # Zero out the free space to save space in the final image: -for path in / /boot /usr /var /opt /tmp +for path in / /boot /usr /var /opt /tmp /home do dd if=/dev/zero of=$path/zero bs=1M sync diff --git a/tools/appliance/definitions/systemvmtemplate/base.sh b/tools/appliance/definitions/systemvmtemplate/base.sh index 4d6092a5995..5aaa0eddbb6 100644 --- a/tools/appliance/definitions/systemvmtemplate/base.sh +++ b/tools/appliance/definitions/systemvmtemplate/base.sh @@ -7,7 +7,7 @@ apt-get -y install curl unzip apt-get clean # Set up sudo, TODO: Check security concerns -echo 'cloud ALL=NOPASSWD:ALL' > /etc/sudoers.d/cloud +echo 'cloud ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount' > /etc/sudoers.d/cloud # Tweak sshd to prevent DNS resolution (speed up logins) echo 'UseDNS no' >> /etc/ssh/sshd_config diff --git a/tools/appliance/definitions/systemvmtemplate/definition.rb b/tools/appliance/definitions/systemvmtemplate/definition.rb index 54c85aa60ef..33f25b2c31e 100644 --- a/tools/appliance/definitions/systemvmtemplate/definition.rb +++ b/tools/appliance/definitions/systemvmtemplate/definition.rb @@ -1,7 +1,7 @@ Veewee::Definition.declare({ :cpu_count => '1', :memory_size=> '256', - :disk_size => '2000', :disk_format => 'VDI', :hostiocache => 'off', + :disk_size => '2500', :disk_format => 'VDI', :hostiocache => 'off', :os_type_id => 'Debian', :iso_file => "debian-7.0.0-i386-netinst.iso", :iso_src => "http://cdimage.debian.org/mirror/cdimage/archive/7.0.0/i386/iso-cd/debian-7.0.0-i386-netinst.iso", diff --git a/tools/appliance/definitions/systemvmtemplate/postinstall.sh b/tools/appliance/definitions/systemvmtemplate/postinstall.sh index 997d8548501..1309d47a9f5 100644 --- a/tools/appliance/definitions/systemvmtemplate/postinstall.sh +++ b/tools/appliance/definitions/systemvmtemplate/postinstall.sh @@ -104,7 +104,7 @@ setup_accounts() { echo "root:$ROOTPW" | chpasswd echo "cloud:`openssl rand -base64 32`" | chpasswd sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=admin' /etc/sudoers - sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers + sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount/g' /etc/sudoers # Disable password based authentication via ssh, this will take effect on next reboot sed -i -e 's/^.*PasswordAuthentication .*$/PasswordAuthentication no/g' /etc/ssh/sshd_config # Secure ~/.ssh @@ -203,12 +203,12 @@ configure_services() { cd /opt wget --no-check-certificate $snapshot_url -O cloudstack.tar.gz tar -zxvf cloudstack.tar.gz - cp -rv $snapshot_dir/patches/systemvm/debian/config/* / - cp -rv $snapshot_dir/patches/systemvm/debian/vpn/* / + cp -rv $snapshot_dir/systemvm/patches/debian/config/* / + cp -rv $snapshot_dir/systemvm/patches/debian/vpn/* / mkdir -p /usr/share/cloud/ - cd $snapshot_dir/patches/systemvm/debian/config + cd $snapshot_dir/systemvm/patches/debian/config tar -cvf /usr/share/cloud/cloud-scripts.tar * - cd $snapshot_dir/patches/systemvm/debian/vpn + cd $snapshot_dir/systemvm/patches/debian/vpn tar -rvf /usr/share/cloud/cloud-scripts.tar * cd /opt rm -fr $snapshot_dir cloudstack.tar.gz diff --git a/tools/build/build_asf.sh b/tools/build/build_asf.sh index c2a817a8ffd..6170cd50df4 100755 --- a/tools/build/build_asf.sh +++ b/tools/build/build_asf.sh @@ -90,7 +90,7 @@ export currentversion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:eval echo "found $currentversion" echo 'setting version numbers' -mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnonoss +mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnoredist mv deps/XenServerJava/pom.xml.versionsBackup deps/XenServerJava/pom.xml perl -pi -e "s/-SNAPSHOT//" deps/XenServerJava/pom.xml perl -pi -e "s/-SNAPSHOT//" tools/apidoc/pom.xml diff --git a/tools/build/setnextversion.sh b/tools/build/setnextversion.sh index 7da3765704a..a41676db6db 100755 --- a/tools/build/setnextversion.sh +++ b/tools/build/setnextversion.sh @@ -62,7 +62,7 @@ export currentversion=`mvn org.apache.maven.plugins:maven-help-plugin:2.1.1:eval echo "found $currentversion" echo 'setting version numbers' -mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnonoss +mvn versions:set -DnewVersion=$version -P vmware -P developer -P systemvm -P simulator -P baremetal -P ucs -Dnoredist mv deps/XenServerJava/pom.xml.versionsBackup deps/XenServerJava/pom.xml perl -pi -e "s/$currentversion/$version/" deps/XenServerJava/pom.xml perl -pi -e "s/$currentversion/$version/" tools/apidoc/pom.xml diff --git a/tools/devcloud-kvm/pom.xml b/tools/devcloud-kvm/pom.xml index 6c1b543d4ac..c95cf42e988 100644 --- a/tools/devcloud-kvm/pom.xml +++ b/tools/devcloud-kvm/pom.xml @@ -24,7 +24,6 @@ mysql mysql-connector-java - 5.1.21 runtime diff --git a/tools/devcloud/devcloud-advanced.cfg b/tools/devcloud/devcloud-advanced.cfg index 75c3a4f7147..fb25d03cf38 100644 --- a/tools/devcloud/devcloud-advanced.cfg +++ b/tools/devcloud/devcloud-advanced.cfg @@ -104,7 +104,9 @@ "internaldns1": "192.168.56.10", "secondaryStorages": [ { - "url": "nfs://192.168.56.10:/opt/storage/secondary" + "url": "nfs://192.168.56.10:/opt/storage/secondary", + "provider": "NFS", + "details": [ ] } ] } diff --git a/tools/devcloud/devcloud-advancedsg.cfg b/tools/devcloud/devcloud-advancedsg.cfg index 6c26b15f5da..c625e79c53f 100644 --- a/tools/devcloud/devcloud-advancedsg.cfg +++ b/tools/devcloud/devcloud-advancedsg.cfg @@ -88,7 +88,9 @@ "internaldns1": "192.168.56.10", "secondaryStorages": [ { - "url": "nfs://192.168.56.10/opt/storage/secondary" + "url": "nfs://192.168.56.10:/opt/storage/secondary", + "provider": "NFS", + "details": [ ] } ] } diff --git a/tools/devcloud/pom.xml b/tools/devcloud/pom.xml index 51b40f09db6..aa1a7dbcac4 100644 --- a/tools/devcloud/pom.xml +++ b/tools/devcloud/pom.xml @@ -24,7 +24,6 @@ mysql mysql-connector-java - 5.1.21 runtime diff --git a/tools/devcloud/src/deps/boxes/basebox-build/postinstall.sh b/tools/devcloud/src/deps/boxes/basebox-build/postinstall.sh index 217d23024aa..f2b15602e8c 100644 --- a/tools/devcloud/src/deps/boxes/basebox-build/postinstall.sh +++ b/tools/devcloud/src/deps/boxes/basebox-build/postinstall.sh @@ -28,7 +28,7 @@ groupadd -r admin usermod -a -G admin devcloud echo "root:password" | chpasswd sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=admin' /etc/sudoers -sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers +sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:/bin/chmod, /bin/cp, /bin/mkdir, /bin/mount, /bin/umount/g' /etc/sudoers mkdir /home/devcloud/.ssh chmod 700 /home/devcloud/.ssh diff --git a/tools/eclipse/ApacheCloudStack.xml b/tools/eclipse/ApacheCloudStack.xml new file mode 100644 index 00000000000..5a0a01da6e6 --- /dev/null +++ b/tools/eclipse/ApacheCloudStack.xml @@ -0,0 +1,309 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tools/marvin/marvin/asyncJobMgr.py b/tools/marvin/marvin/asyncJobMgr.py index 25818a62de5..0d7939ce845 100644 --- a/tools/marvin/marvin/asyncJobMgr.py +++ b/tools/marvin/marvin/asyncJobMgr.py @@ -81,14 +81,14 @@ class workThread(threading.Thread): if cmd.isAsync == "false": jobstatus.startTime = datetime.datetime.now() - result = self.connection.make_request(cmd) + result = self.connection.marvin_request(cmd) jobstatus.result = result jobstatus.endTime = datetime.datetime.now() jobstatus.duration =\ time.mktime(jobstatus.endTime.timetuple()) - time.mktime( jobstatus.startTime.timetuple()) else: - result = self.connection.make_request(cmd, None, True) + result = self.connection.marvin_request(cmd) if result is None: jobstatus.status = False else: diff --git a/tools/marvin/marvin/cloudstackConnection.py b/tools/marvin/marvin/cloudstackConnection.py index 8129396813a..2c027c36879 100644 --- a/tools/marvin/marvin/cloudstackConnection.py +++ b/tools/marvin/marvin/cloudstackConnection.py @@ -32,37 +32,40 @@ from requests import RequestException class cloudConnection(object): + """ Connections to make API calls to the cloudstack management server """ - def __init__(self, mgtSvr, port=8096, user=None, passwd=None, - apiKey=None, securityKey=None, - asyncTimeout=3600, logging=None, scheme='http', - path='client/api'): + def __init__(self, mgmtDet, asyncTimeout=3600, logging=None, + scheme='http', path='client/api'): self.loglevel() # Turn off requests logs - self.apiKey = apiKey - self.securityKey = securityKey - self.mgtSvr = mgtSvr - self.port = port - self.user = user - self.passwd = passwd + self.apiKey = mgmtDet.apiKey + self.securityKey = mgmtDet.securityKey + self.mgtSvr = mgmtDet.mgtSvrIp + self.port = mgmtDet.port + self.user = mgmtDet.user + self.passwd = mgmtDet.passwd + self.certCAPath = mgmtDet.certCAPath + self.certPath = mgmtDet.certPath self.logging = logging self.path = path self.retries = 5 + self.protocol = scheme self.asyncTimeout = asyncTimeout self.auth = True - if port == 8096 or \ + if self.port == 8096 or \ (self.apiKey is None and self.securityKey is None): self.auth = False - if scheme not in ['http', 'https']: - raise RequestException("Protocol must be HTTP") - self.protocol = scheme + if mgmtDet.useHttps == "True": + self.protocol = "https" self.baseurl = "%s://%s:%d/%s"\ % (self.protocol, self.mgtSvr, self.port, self.path) def __copy__(self): - return cloudConnection(self.mgtSvr, self.port, self.user, self.passwd, - self.apiKey, self.securityKey, - self.asyncTimeout, self.logging, self.protocol, + return cloudConnection(self.mgtSvr, self.port, self.user, + self.passwd, self.apiKey, + self.securityKey, + self.asyncTimeout, self.logging, + self.protocol, self.path) def loglevel(self, lvl=logging.WARNING): @@ -84,7 +87,7 @@ class cloudConnection(object): timeout = self.asyncTimeout while timeout > 0: - asyncResonse = self.marvin_request(cmd, response_type=response) + asyncResonse = self.marvinRequest(cmd, response_type=response) if asyncResonse.jobstatus == 2: raise cloudstackException.cloudstackAPIException( @@ -143,27 +146,78 @@ class cloudConnection(object): payload["signature"] = signature try: - if method == 'POST': - response = requests.post(self.baseurl, params=payload) + #https_flag : Signifies whether to verify connection over \ + #http or https, \ + #initialized to False, will be set to true if user provided https + #connection + https_flag = False + cert_path = () + if self.protocol == "https": + https_flag = True + if self.certCAPath != "NA" and self.certPath != "NA": + cert_path = (self.certCAPath, self.certPath) + + #Verify whether protocol is "http", then call the request over http + if self.protocol == "http": + if method == 'POST': + response = requests.post(self.baseurl, params=payload, + verify=https_flag) + else: + response = requests.get(self.baseurl, params=payload, + verify=https_flag) else: - response = requests.get(self.baseurl, params=payload) + ''' + If protocol is https, then create the connection url with \ + user provided certificates \ + provided as part of cert + ''' + try: + if method == 'POST': + response = requests.post(self.baseurl, + params=payload, + cert=cert_path, + verify=https_flag) + else: + response = requests.get(self.baseurl, params=payload, + cert=cert_path, + verify=https_flag) + except Exception, e: + ''' + If an exception occurs with user provided CA certs, \ + then try with default certs, \ + we dont need to mention here the cert path + ''' + self.logging.debug("Creating CS connection over https \ + didnt worked with user provided certs \ + , so trying with no certs %s" % e) + if method == 'POST': + response = requests.post(self.baseurl, + params=payload, + verify=https_flag) + else: + response = requests.get(self.baseurl, + params=payload, + verify=https_flag) except ConnectionError, c: - self.logging.debug("Connection refused. Reason: %s : %s" % - (self.baseurl, c)) + self.logging.debug("Connection refused. Reason: %s : %s" + % (self.baseurl, c)) raise c except HTTPError, h: - self.logging.debug("Server returned error code: %s" % h) + self.logging.debug("Http Error.Server returned error code: %s" % h) raise h except Timeout, t: self.logging.debug("Connection timed out with %s" % t) raise t except RequestException, r: - self.logging.debug("Error returned by server %s" % r) + self.logging.debug("RequestException from server %s" % r) raise r + except Exception, e: + self.logging.debug("Error returned by server %s" % r) + raise e else: return response - def sanitize_command(self, cmd): + def sanitizeCommand(self, cmd): """ Removes None values, Validates all required params are present @param cmd: Cmd object eg: createPhysicalNetwork @@ -201,9 +255,9 @@ class cloudConnection(object): for k, v in val.iteritems(): requests["%s[%d].%s" % (param, i, k)] = v i = i + 1 - return cmdname, isAsync, requests + return cmdname.strip(), isAsync, requests - def marvin_request(self, cmd, response_type=None, method='GET', data=''): + def marvinRequest(self, cmd, response_type=None, method='GET', data=''): """ Requester for marvin command objects @param cmd: marvin's command from cloudstackAPI @@ -211,11 +265,13 @@ class cloudConnection(object): @param method: HTTP GET/POST, defaults to GET @return: """ - cmdname, isAsync, payload = self.sanitize_command(cmd) + cmdname, isAsync, payload = self.sanitizeCommand(cmd) self.logging.debug("sending %s request: %s %s" % (method, cmdname, str(payload))) - response = self.request( - cmdname, self.auth, payload=payload, method=method) + response = self.request(cmdname, + self.auth, + payload=payload, + method=method) self.logging.debug("Request: %s Response: %s" % (response.url, response.text)) try: diff --git a/tools/marvin/marvin/cloudstackTestClient.py b/tools/marvin/marvin/cloudstackTestClient.py index 36f7f8d8369..be93f3581cc 100644 --- a/tools/marvin/marvin/cloudstackTestClient.py +++ b/tools/marvin/marvin/cloudstackTestClient.py @@ -19,24 +19,36 @@ import cloudstackConnection import asyncJobMgr import dbConnection from cloudstackAPI import * -import random -import string -import hashlib + +''' +@Desc : CloudStackTestClient is encapsulated class for getting various \ + clients viz., apiclient,dbconnection etc +@Input : mgmtDetails : Management Server Details + dbSvrDetails: Database Server details of Management \ + Server. Retrieved from configuration file. + asyncTimeout : + defaultWorkerThreads : + logging : +''' class cloudstackTestClient(object): - def __init__(self, mgtSvr=None, port=8096, user=None, passwd=None, - apiKey=None, securityKey=None, asyncTimeout=3600, - defaultWorkerThreads=10, logging=None): + def __init__(self, mgmtDetails, + dbSvrDetails, asyncTimeout=3600, + defaultWorkerThreads=10, + logging=None): self.connection = \ - cloudstackConnection.cloudConnection(mgtSvr, port, user, passwd, - apiKey, securityKey, - asyncTimeout, logging) + cloudstackConnection.cloudConnection(mgmtDetails, + asyncTimeout, + logging) self.apiClient =\ cloudstackAPIClient.CloudStackAPIClient(self.connection) self.dbConnection = None + if dbSvrDetails is not None: + self.createDbConnection(dbSvrDetails.dbSvr, dbSvrDetails.port, + dbSvrDetails.user, + dbSvrDetails.passwd, dbSvrDetails.db) self.asyncJobMgr = None - self.ssh = None self.id = None self.defaultWorkerThreads = defaultWorkerThreads @@ -48,10 +60,10 @@ class cloudstackTestClient(object): def identifier(self, id): self.id = id - def dbConfigure(self, host="localhost", port=3306, user='cloud', - passwd='cloud', db='cloud'): - self.dbConnection = dbConnection.dbConnection(host, port, user, passwd, - db) + def createDbConnection(self, host="localhost", port=3306, user='cloud', + passwd='cloud', db='cloud'): + self.dbConnection = dbConnection.dbConnection(host, port, user, + passwd, db) def isAdminContext(self): """ @@ -71,13 +83,6 @@ class cloudstackTestClient(object): except: return 0 # user - def random_gen(self, size=6, chars=string.ascii_uppercase + string.digits): - """Generate Random Strings of variable length""" - randomstr = ''.join(random.choice(chars) for x in range(size)) - if self.identifier: - return ''.join([self.identifier, '-', randomstr]) - return randomstr - def createUserApiClient(self, UserName, DomainName, acctType=0): if not self.isAdminContext(): return self.apiClient @@ -149,17 +154,6 @@ class cloudstackTestClient(object): def getDbConnection(self): return self.dbConnection - def executeSql(self, sql=None): - if sql is None or self.dbConnection is None: - return None - - return self.dbConnection.execute() - - def executeSqlFromFile(self, sqlFile=None): - if sqlFile is None or self.dbConnection is None: - return None - return self.dbConnection.executeSqlFromFile(sqlFile) - def getApiClient(self): self.apiClient.id = self.identifier return self.apiClient diff --git a/tools/marvin/marvin/codegenerator.py b/tools/marvin/marvin/codegenerator.py index 96729f6bbfe..e0f056f66a0 100644 --- a/tools/marvin/marvin/codegenerator.py +++ b/tools/marvin/marvin/codegenerator.py @@ -222,7 +222,7 @@ class codeGenerator(object): body += self.space + self.space body += 'response = %sResponse()\n' % cmdName body += self.space + self.space - body += 'response = self.connection.marvin_request(command,' + body += 'response = self.connection.marvinRequest(command,' body += ' response_type=response, method=method)\n' body += self.space + self.space + 'return response\n' body += self.newline diff --git a/tools/marvin/marvin/codes.py b/tools/marvin/marvin/codes.py new file mode 100644 index 00000000000..3da90d66534 --- /dev/null +++ b/tools/marvin/marvin/codes.py @@ -0,0 +1,39 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +@Desc : This module defines all codes, constants maintained globally \ + and used across marvin and its test features.The main purpose \ + is to maintain readability, maintain one common place for \ + all codes used or reused across test features. It enhances \ + maintainability and readability. Users just import statement \ + to receive all the codes mentioned here. EX: Here, we define \ + a code viz., ENABLED with value "Enabled",then using \ + this code in a sample feature say test_a.py as below. \ + + from codes import * + if obj.getvalue() == ENABLED + +@DateAdded: 20th October 2013 +""" + +ENABLED = "Enabled" +NETWORK_OFFERING = "network_offering" +ROOT = "ROOT" +INVALID_INPUT = "INVALID INPUT" +EMPTY_LIST = "EMPTY_LIST" +FAIL = 0 +PASS = 1 diff --git a/tools/marvin/marvin/configGenerator.py b/tools/marvin/marvin/configGenerator.py index a966ae089e4..0cfad30569c 100644 --- a/tools/marvin/marvin/configGenerator.py +++ b/tools/marvin/marvin/configGenerator.py @@ -27,6 +27,9 @@ class managementServer(object): self.port = 8096 self.apiKey = None self.securityKey = None + self.useHttps = None + self.certCAPath = None + self.certPath = None class dbServer(object): @@ -827,10 +830,10 @@ def generate_setup_config(config, file=None): fp.close() -def get_setup_config(file): +def getSetupConfig(file): if not os.path.exists(file): raise IOError("config file %s not found. \ -please specify a valid config file" % file) + please specify a valid config file" % file) config = cloudstackConfiguration() configLines = [] with open(file, 'r') as fp: @@ -859,7 +862,7 @@ by default is ./datacenterCfg") (options, args) = parser.parse_args() if options.inputfile: - config = get_setup_config(options.inputfile) + config = getSetupConfig(options.inputfile) if options.advanced: config = describe_setup_in_advanced_mode() elif options.advancedsg: diff --git a/tools/marvin/marvin/deployAndRun.py b/tools/marvin/marvin/deployAndRun.py index 78c161789a7..8a758a10e36 100644 --- a/tools/marvin/marvin/deployAndRun.py +++ b/tools/marvin/marvin/deployAndRun.py @@ -17,6 +17,7 @@ import deployDataCenter import TestCaseExecuteEngine +import sys from argparse import ArgumentParser if __name__ == "__main__": @@ -37,6 +38,8 @@ if __name__ == "__main__": parser.add_argument("-l", "--load", dest="load", action="store_true", help="only load config, do not deploy,\ it will only run testcase") + parser.add_argument("-n", "--num", dest="number", + help="how many times you want run the test case") options = parser.parse_args() @@ -52,23 +55,42 @@ if __name__ == "__main__": deploy.loadCfg() else: deploy.deploy() + iterates = 1 + if options.number is not None: + if options.number == "loop": + iterates = sys.maxint + else: + try: + iterates = int(options.number) + except: + iterates = 1 if options.testCaseFolder is None: if options.module is None: parser.print_usage() exit(1) else: - engine = \ - TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, - deploy.getCfg(), - testCaseLogFile, - testResultLogFile) - engine.loadTestsFromFile(options.module) - engine.run() + n = 0 + while(n < iterates): + engine = \ + TestCaseExecuteEngine.TestCaseExecuteEngine( + deploy.testClient, + deploy.getCfg( + ), + testCaseLogFile, + testResultLogFile) + engine.loadTestsFromFile(options.module) + engine.run() + n = n + 1 else: - engine = TestCaseExecuteEngine.TestCaseExecuteEngine(deploy.testClient, - deploy.getCfg(), - testCaseLogFile, - testResultLogFile) - engine.loadTestsFromDir(options.testCaseFolder) - engine.run() + n = 0 + while(n < iterates): + engine = TestCaseExecuteEngine.TestCaseExecuteEngine( + deploy.testClient, + deploy.getCfg( + ), + testCaseLogFile, + testResultLogFile) + engine.loadTestsFromDir(options.testCaseFolder) + engine.run() + n = n + 1 diff --git a/tools/marvin/marvin/deployDataCenter.py b/tools/marvin/marvin/deployDataCenter.py index 8cc9cd4fa6f..3f7eebbc21b 100644 --- a/tools/marvin/marvin/deployDataCenter.py +++ b/tools/marvin/marvin/deployDataCenter.py @@ -32,8 +32,12 @@ class deployDataCenters(object): if not path.exists(cfgFile) \ and not path.exists(path.abspath(cfgFile)): raise IOError("config file %s not found. please \ -specify a valid config file" % cfgFile) + specify a valid config file" % cfgFile) self.configFile = cfgFile + ''' + parsed configuration information + ''' + self.config = None def addHosts(self, hosts, zoneId, podId, clusterId, hypervisor): if hosts is None: @@ -88,11 +92,11 @@ specify a valid config file" % cfgFile) if cluster.hypervisor.lower() != "vmware": self.addHosts(cluster.hosts, zoneId, podId, clusterId, cluster.hypervisor) - self.wait_for_host(zoneId, clusterId) + self.waitForHost(zoneId, clusterId) self.createPrimaryStorages(cluster.primaryStorages, zoneId, podId, clusterId) - def wait_for_host(self, zoneId, clusterId): + def waitForHost(self, zoneId, clusterId): """ Wait for the hosts in the zoneid, clusterid to be up @@ -123,7 +127,7 @@ specify a valid config file" % cfgFile) primarycmd.clusterid = clusterId self.apiClient.createStoragePool(primarycmd) - def createpods(self, pods, zoneId, networkId=None): + def createPods(self, pods, zoneId, networkId=None): if pods is None: return for pod in pods: @@ -208,7 +212,7 @@ specify a valid config file" % cfgFile) }) self.apiClient.createSecondaryStagingStore(cachecmd) - def createnetworks(self, networks, zoneId): + def createNetworks(self, networks, zoneId): if networks is None: return for network in networks: @@ -417,8 +421,8 @@ specify a valid config file" % cfgFile) guestntwrk.networkofferingid = \ listnetworkofferingresponse[0].id - networkid = self.createnetworks([guestntwrk], zoneId) - self.createpods(zone.pods, zoneId, networkid) + networkid = self.createNetworks([guestntwrk], zoneId) + self.createPods(zone.pods, zoneId, networkid) if self.isEipElbZone(zone): self.createVlanIpRanges(zone.networktype, zone.ipranges, zoneId, forvirtualnetwork=True) @@ -426,7 +430,7 @@ specify a valid config file" % cfgFile) isPureAdvancedZone = (zone.networktype == "Advanced" and zone.securitygroupenabled != "true") if isPureAdvancedZone: - self.createpods(zone.pods, zoneId) + self.createPods(zone.pods, zoneId) self.createVlanIpRanges(zone.networktype, zone.ipranges, zoneId) elif (zone.networktype == "Advanced" @@ -459,7 +463,7 @@ specify a valid config file" % cfgFile) networkcmdresponse = self.apiClient.createNetwork(networkcmd) networkId = networkcmdresponse.id - self.createpods(zone.pods, zoneId, networkId) + self.createPods(zone.pods, zoneId, networkId) '''Note: Swift needs cache storage first''' self.createCacheStorages(zone.cacheStorages, zoneId) @@ -510,13 +514,15 @@ specify a valid config file" % cfgFile) def loadCfg(self): try: - self.config = configGenerator.get_setup_config(self.configFile) + self.config = configGenerator.getSetupConfig(self.configFile) except: raise cloudstackException.InvalidParameterException( "Failed to load config %s" % self.configFile) - mgt = self.config.mgtSvr[0] - + ''' Retrieving Management Server Connection Details ''' + mgtDetails = self.config.mgtSvr[0] + ''' Retrieving Database Connection Details''' + dbSvrDetails = self.config.dbSvr loggers = self.config.logger testClientLogFile = None self.testCaseLogFile = None @@ -534,36 +540,33 @@ specify a valid config file" % cfgFile) if testClientLogFile is not None: testClientLogger = logging.getLogger("testclient.testengine.run") fh = logging.FileHandler(testClientLogFile) - fh.setFormatter(logging. - Formatter("%(asctime)s - %(levelname)s - %(name)s\ - - %(message)s")) + fh.setFormatter(logging.Formatter( + "%(asctime)s - %(levelname)s - %(name)s\ - %(message)s") + ) testClientLogger.addHandler(fh) testClientLogger.setLevel(logging.INFO) self.testClientLogger = testClientLogger self.testClient = \ cloudstackTestClient.\ - cloudstackTestClient(mgt.mgtSvrIp, mgt.port, mgt.user, mgt.passwd, - mgt.apiKey, mgt.securityKey, + cloudstackTestClient(mgtDetails, + dbSvrDetails, logging=self.testClientLogger) - if mgt.apiKey is None: - apiKey, securityKey = self.registerApiKey() - self.testClient = cloudstackTestClient.cloudstackTestClient( - mgt.mgtSvrIp, 8080, - mgt.user, mgt.passwd, - apiKey, securityKey, - logging=self.testClientLogger) - """config database""" - dbSvr = self.config.dbSvr - if dbSvr is not None: - self.testClient.dbConfigure(dbSvr.dbSvr, dbSvr.port, dbSvr.user, - dbSvr.passwd, dbSvr.db) + if mgtDetails.apiKey is None: + mgtDetails.apiKey, mgtDetails.securityKey = self.registerApiKey() + mgtDetails.port = 8080 + self.testClient = \ + cloudstackTestClient.cloudstackTestClient( + mgtDetails, + dbSvrDetails, + logging= + self.testClientLogger) self.apiClient = self.testClient.getApiClient() """set hypervisor""" - if mgt.hypervisor: - self.apiClient.hypervisor = mgt.hypervisor + if mgtDetails.hypervisor: + self.apiClient.hypervisor = mgtDetails.hypervisor else: self.apiClient.hypervisor = "XenServer" # Defaults to Xenserver @@ -578,15 +581,13 @@ specify a valid config file" % cfgFile) self.apiClient.updateConfiguration(updateCfg) def copyAttributesToCommand(self, source, command): - map(lambda attr: setattr(command, attr, getattr(source, attr, None)), - filter(lambda attr: not attr.startswith("__") and - attr not in ["required", "isAsync"], dir(command))) + filter(lambda attr: not attr.startswith("__") and attr not in + ["required", "isAsync"], dir(command))) def configureS3(self, s3): if s3 is None: return - command = addS3.addS3Cmd() self.copyAttributesToCommand(s3, command) self.apiClient.addS3(command) @@ -598,16 +599,13 @@ specify a valid config file" % cfgFile) self.configureS3(self.config.s3) if __name__ == "__main__": - parser = OptionParser() - parser.add_option("-i", "--input", action="store", default="./datacenterCfg", dest="input", help="the path \ where the json config file generated, by default is \ ./datacenterCfg") (options, args) = parser.parse_args() - deploy = deployDataCenters(options.input) deploy.deploy() diff --git a/tools/marvin/marvin/integration/lib/base.py b/tools/marvin/marvin/integration/lib/base.py index df8140685e6..4f151378cc9 100755 --- a/tools/marvin/marvin/integration/lib/base.py +++ b/tools/marvin/marvin/integration/lib/base.py @@ -1514,7 +1514,8 @@ class NetworkOffering: cmd.specifyVlan = services["specifyVlan"] if "specifyIpRanges" in services: cmd.specifyIpRanges = services["specifyIpRanges"] - + if "ispersistent" in services: + cmd.ispersistent = services["ispersistent"] if "egress_policy" in services: cmd.egressdefaultpolicy = services["egress_policy"] @@ -1803,7 +1804,7 @@ class Host: return def enableMaintenance(self, apiclient): - """enables maintainance mode Host""" + """enables maintenance mode Host""" cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = self.id @@ -1811,14 +1812,14 @@ class Host: @classmethod def enableMaintenance(cls, apiclient, id): - """enables maintainance mode Host""" + """enables maintenance mode Host""" cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = id return apiclient.prepareHostForMaintenance(cmd) def cancelMaintenance(self, apiclient): - """Cancels maintainance mode Host""" + """Cancels maintenance mode Host""" cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = self.id @@ -1826,7 +1827,7 @@ class Host: @classmethod def cancelMaintenance(cls, apiclient, id): - """Cancels maintainance mode Host""" + """Cancels maintenance mode Host""" cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = id @@ -1895,7 +1896,7 @@ class StoragePool: return def enableMaintenance(self, apiclient): - """enables maintainance mode Storage pool""" + """enables maintenance mode Storage pool""" cmd = enableStorageMaintenance.enableStorageMaintenanceCmd() cmd.id = self.id diff --git a/tools/marvin/marvin/integration/lib/common.py b/tools/marvin/marvin/integration/lib/common.py index e8958850ce5..164ef2052dd 100644 --- a/tools/marvin/marvin/integration/lib/common.py +++ b/tools/marvin/marvin/integration/lib/common.py @@ -94,6 +94,22 @@ def add_netscaler(apiclient, zoneid, NSservice): return netscaler +def get_region(apiclient, services=None): + "Returns a default region" + + cmd = listRegions.listRegionsCmd() + if services: + if "regionid" in services: + cmd.id = services["regionid"] + + regions = apiclient.listRegions(cmd) + + if isinstance(regions, list): + assert len(regions) > 0 + return regions[0] + else: + raise Exception("Failed to find specified region.") + def get_domain(apiclient, services=None): "Returns a default domain" diff --git a/tools/marvin/marvin/integration/lib/utils.py b/tools/marvin/marvin/integration/lib/utils.py index a6abe0665eb..7d662af265c 100644 --- a/tools/marvin/marvin/integration/lib/utils.py +++ b/tools/marvin/marvin/integration/lib/utils.py @@ -5,9 +5,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -30,6 +30,7 @@ import urlparse import datetime from marvin.cloudstackAPI import * from marvin.remoteSSHClient import remoteSSHClient +from marvin.codes import * def restart_mgmt_server(server): @@ -269,30 +270,32 @@ def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid): ) assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid - assert len(qresultset) != 0, "No such snapshot %s found in the cloudstack db" % snapshotid + + if len(qresultset) == 0: + #Snapshot does not exist + return False snapshotPath = qresultset[0][0] nfsurl = secondaryStore.url - # parse_url = ['nfs:', '', '192.168.100.21', 'export', 'test'] from urllib2 import urlparse parse_url = urlparse.urlsplit(nfsurl, scheme='nfs') host, path = parse_url.netloc, parse_url.path if not config.mgtSvr: raise Exception("Your marvin configuration does not contain mgmt server credentials") - host, user, passwd = config.mgtSvr[0].mgtSvrIp, config.mgtSvr[0].user, config.mgtSvr[0].passwd + mgtSvr, user, passwd = config.mgtSvr[0].mgtSvrIp, config.mgtSvr[0].user, config.mgtSvr[0].passwd try: ssh_client = remoteSSHClient( - host, + mgtSvr, 22, user, - passwd, + passwd ) cmds = [ "mkdir -p %s /mnt/tmp", - "mount -t %s %s:%s /mnt/tmp" % ( + "mount -t %s %s%s /mnt/tmp" % ( 'nfs', host, path, @@ -314,5 +317,39 @@ def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid): ssh_client.execute(c) except Exception as e: raise Exception("SSH failed for management server: %s - %s" % - (config[0].mgtSvrIp, e)) + (config.mgtSvr[0].mgtSvrIp, e)) return 'snapshot exists' in result + + +def validateList(inp): + ''' + @name: validateList + @Description: 1. A utility function to validate + whether the input passed is a list + 2. The list is empty or not + 3. If it is list and not empty, return PASS and first element + 4. If not reason for FAIL + @Input: Input to be validated + @output: List, containing [ Result,FirstElement,Reason ] + Ist Argument('Result') : FAIL : If it is not a list + If it is list but empty + PASS : If it is list and not empty + IInd Argument('FirstElement'): If it is list and not empty, + then first element + in it, default to None + IIIrd Argument( 'Reason' ): Reason for failure ( FAIL ), + default to None. + INVALID_INPUT + EMPTY_LIST + ''' + ret = [FAIL, None, None] + if inp is None: + ret[2] = INVALID_INPUT + return ret + if not isinstance(inp, list): + ret[2] = INVALID_INPUT + return ret + if len(inp) == 0: + ret[2] = EMPTY_LIST + return ret + return [PASS, inp[0], None] diff --git a/tools/marvin/marvin/marvinPlugin.py b/tools/marvin/marvin/marvinPlugin.py index aded17cca55..0e52bab9d33 100644 --- a/tools/marvin/marvin/marvinPlugin.py +++ b/tools/marvin/marvin/marvinPlugin.py @@ -22,6 +22,7 @@ import nose.core from marvin.cloudstackTestCase import cloudstackTestCase from marvin import deployDataCenter from nose.plugins.base import Plugin +import time class MarvinPlugin(Plugin): @@ -32,6 +33,11 @@ class MarvinPlugin(Plugin): name = "marvin" def configure(self, options, config): + """enable the marvin plugin when the --with-marvin directive is given + to nose. The enableOpt value is set from the command line directive and + self.enabled (True|False) determines whether marvin's tests will run. + By default non-default plugins like marvin will be disabled + """ if hasattr(options, self.enableOpt): if not getattr(options, self.enableOpt): self.enabled = False @@ -58,7 +64,7 @@ class MarvinPlugin(Plugin): else: self.result_stream = sys.stdout - deploy = deployDataCenter.deployDataCenters(options.config) + deploy = deployDataCenter.deployDataCenters(options.config_file) deploy.loadCfg() if options.load else deploy.deploy() self.setClient(deploy.testClient) self.setConfig(deploy.getCfg()) @@ -73,7 +79,7 @@ class MarvinPlugin(Plugin): """ parser.add_option("--marvin-config", action="store", default=env.get('MARVIN_CONFIG', './datacenter.cfg'), - dest="config", + dest="config_file", help="Marvin's configuration file where the " + "datacenter information is specified " + "[MARVIN_CONFIG]") @@ -121,15 +127,30 @@ class MarvinPlugin(Plugin): self.config = config def beforeTest(self, test): - testname = test.__str__().split()[0] - self.testclient.identifier = '-'.join([self.identifier, testname]) + self.testName = test.__str__().split()[0] + self.testclient.identifier = '-'.join([self.identifier, self.testName]) self.logger.name = test.__str__() + def startTest(self, test): + """ + Currently used to record start time for tests + """ + self.startTime = time.time() + + def stopTest(self, test): + """ + Currently used to record end time for tests + """ + endTime = time.time() + if self.startTime is not None: + totTime = int(endTime - self.startTime) + self.logger.debug( + "TestCaseName: %s; Time Taken: %s Seconds; \ + StartTime: %s; EndTime: %s" + % (self.testName, str(totTime), + str(time.ctime(self.startTime)), str(time.ctime(endTime)))) + def _injectClients(self, test): - self.debug_stream. \ - setFormatter(logging. - Formatter("%(asctime)s - %(levelname)s - %(name)s" + - " - %(message)s")) setattr(test, "debug", self.logger.debug) setattr(test, "info", self.logger.info) setattr(test, "warn", self.logger.warning) diff --git a/tools/marvin/marvin/sandbox/demo/simulator/testcase/libs/base.py b/tools/marvin/marvin/sandbox/demo/simulator/testcase/libs/base.py index 0b5da5c162f..7c8546c092c 100644 --- a/tools/marvin/marvin/sandbox/demo/simulator/testcase/libs/base.py +++ b/tools/marvin/marvin/sandbox/demo/simulator/testcase/libs/base.py @@ -1053,8 +1053,8 @@ class Host: return def enableMaintenance(self, apiclient): - """enables maintainance mode Host""" - + """enables maintenance mode Host""" + cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = self.id return apiclient.prepareHostForMaintenance(cmd) @@ -1113,8 +1113,8 @@ class StoragePool: return def enableMaintenance(self, apiclient): - """enables maintainance mode Storage pool""" - + """enables maintenance mode Storage pool""" + cmd = enableStorageMaintenance.enableStorageMaintenanceCmd() cmd.id = self.id return apiclient.enableStorageMaintenance(cmd) diff --git a/tools/marvin/pom.xml b/tools/marvin/pom.xml index a23c6529be2..086924859d1 100644 --- a/tools/marvin/pom.xml +++ b/tools/marvin/pom.xml @@ -24,7 +24,6 @@ maven-antrun-plugin - 1.7 clean diff --git a/tools/pom.xml b/tools/pom.xml index 9633109d8b1..a7c5a2d48d5 100644 --- a/tools/pom.xml +++ b/tools/pom.xml @@ -22,7 +22,6 @@ 4.0.0 Apache CloudStack Developer Tools - org.apache.cloudstack cloud-tools pom diff --git a/ui/css/cloudstack3.css b/ui/css/cloudstack3.css index 6510d0ebc52..a75027a3acc 100644 --- a/ui/css/cloudstack3.css +++ b/ui/css/cloudstack3.css @@ -46,10 +46,11 @@ div.toolbar, /*+}*/ body { + min-width: 1024px; font-family: sans-serif; height: 769px !important; overflow: auto; - background: #FFFFFF; + background: #EDE8E8; } body.install-wizard { @@ -60,12 +61,23 @@ body.install-wizard { background: #FFFFFF url(../images/bg-login.png); } -#container { +#main-area { width: 1024px; - height: 783px; + height: 700px; + margin: auto; + border: 1px solid #D4D4D4; + /*+box-shadow:0px -5px 11px #B7B7B7;*/ + -moz-box-shadow: 0px -5px 11px #B7B7B7; + -webkit-box-shadow: 0px -5px 11px #B7B7B7; + -o-box-shadow: 0px -5px 11px #B7B7B7; + box-shadow: 0px -5px 11px #B7B7B7; + border: 1px solid #E8E8E8; +} + +#container { + /*[empty]width:;*/ + height: 700px; margin: auto; - border: 1px solid #E2E2E2; - border-top: none; position: relative; } @@ -73,6 +85,16 @@ body.install-wizard { display: none; } +a { + color: #0B84DC; + text-decoration: none; +} + +a:hover { + text-decoration: underline; + color: #000000; +} + /*Table*/ table { width: 740px; @@ -93,7 +115,7 @@ table thead { table thead th { border: 1px solid #C6C3C3; - color: #566677; + color: #525252; border-top: none; border-bottom: 1px solid #CFC9C9; text-align: left; @@ -102,7 +124,7 @@ table thead th { -webkit-text-shadow: 0px 1px 1px #FFFFFF; -o-text-shadow: 0px 1px 1px #FFFFFF; text-shadow: 0px 1px 1px #FFFFFF; - font-weight: normal; + font-weight: bold; } table thead th.sorted { @@ -124,9 +146,9 @@ table thead th.sorted.asc { table tbody td, table th { - padding: 9px 5px 4px 5px; + padding: 10px 5px 8px; border-right: 1px solid #BFBFBF; - color: #495A76; + color: #282828; clear: none; min-width: 88px; font-size: 11px; @@ -174,7 +196,7 @@ table thead th.quick-view { max-width: 58px !important; width: 58px !important; height: 14px !important; - text-indent: 7px; + text-indent: 2px; } table tbody td.quick-view { @@ -212,7 +234,7 @@ table tbody tr { } table tbody tr.even { - background: #DFE1E3; + background: #FFFFFF; } table tbody tr.odd { @@ -271,15 +293,6 @@ table th div.ui-resizable-handle { } /** Header, misc*/ -#header, -#navigation { - /*+text-shadow:0px -1px 1px #000000;*/ - -moz-text-shadow: 0px -1px 1px #000000; - -webkit-text-shadow: 0px -1px 1px #000000; - -o-text-shadow: 0px -1px 1px #000000; - text-shadow: 0px -1px 1px #000000; -} - #template { display: none; } @@ -403,11 +416,11 @@ body.login { } .login .logo { - width: 243px; + width: 250px; height: 31px; float: left; margin: 72px 0 0 209px; - background: url(../images/logo-login.png) no-repeat -7px 0px; + background: url(../images/logo-login.png) no-repeat 0 0; } .login.nologo .logo { @@ -446,7 +459,7 @@ body.login { } #browser div.panel div.detail-view .toolbar { - width: 594px; + width: 100%; } div.list-view table tbody td span { @@ -1177,9 +1190,8 @@ div.notification.corner-alert:hover div.message span { div.panel div.list-view { overflow: auto; overflow-x: hidden; - height: 668px; + height: 632px; margin-top: 30px; - border-bottom: 1px solid #E7E7E7; } .detail-view div.list-view { @@ -1296,7 +1308,6 @@ div.list-view td.state.off span { .quick-view-tooltip { width: 470px; display: inline-block; - margin-left: -462px; padding-top: 50px; } @@ -1305,20 +1316,17 @@ div.list-view td.state.off span { position: absolute; top: 71px; left: 10px; - color: #5A6977; - /*+text-shadow:0px 1px #EAEAEA;*/ - -moz-text-shadow: 0px 1px #EAEAEA; - -webkit-text-shadow: 0px 1px #EAEAEA; - -o-text-shadow: 0px 1px #EAEAEA; - text-shadow: 0px 1px #EAEAEA; + color: #808080; + font-weight: 100; } .quick-view-tooltip > div.title .icon { position: relative; - top: -3px; + top: -2px; + left: -7px; background: url(../images/sprites.png) no-repeat -42px -67px; float: right; - padding: 5px 21px 0 1px; + padding: 0px 13px 0 0px; } .quick-view-tooltip .loading-overlay { @@ -1337,8 +1345,8 @@ div.list-view td.state.off span { .quick-view-tooltip .container { border: 1px solid #9EA2A5; - background: #CCCFD6; - width: 470px; + background: #FFFFFF; + width: 471px; min-height: 100px; height: auto; overflow: hidden; @@ -1356,10 +1364,10 @@ div.list-view td.state.off span { .quick-view-tooltip .detail-view .main-groups { width: 456px; - height: 132px; + height: 170px; position: absolute; - top: 55px; padding-top: 7px; + top: 55px; border: 1px solid #808080; border-left: none; border-right: none; @@ -1393,7 +1401,7 @@ div.list-view td.state.off span { .quick-view-tooltip .detail-view .details { display: inline-block; height: auto; - padding-bottom: 209px; + padding-bottom: 224px; } .quick-view-tooltip .detail-view .detail-group { @@ -1416,14 +1424,9 @@ div.list-view td.state.off span { } .quick-view-tooltip .detail-view .detail-group table td.name { - color: #3E5F7F !important; + color: #000000 !important; padding: 0px 29px 0px 5px !important; font-size: 13px; - /*+text-shadow:0px 1px #DBDBDB;*/ - -moz-text-shadow: 0px 1px #DBDBDB; - -webkit-text-shadow: 0px 1px #DBDBDB; - -o-text-shadow: 0px 1px #DBDBDB; - text-shadow: 0px 1px #DBDBDB; } .quick-view-tooltip .detail-view .detail-group table td.value { @@ -1444,7 +1447,7 @@ div.list-view td.state.off span { .quick-view-tooltip .detail-view .detail-group .main-groups table td.value span { height: 25px; - top: 0px; + top: 7px; } .quick-view-tooltip .detail-view .detail-group.actions { @@ -1506,15 +1509,17 @@ div.list-view td.state.off span { height: auto; background: none; vertical-align: top; + position: relative; + top: 27px; float: left; } .quick-view-tooltip .detail-view .detail-group.actions td.view-all { position: relative; left: 0px; + top: 26px; float: left; - height: 22px; - border-top: 1px solid #808080; + height: 26px; /*+box-shadow:inset 0px 1px #FFFFFF;*/ -moz-box-shadow: inset 0px 1px #FFFFFF; -webkit-box-shadow: inset 0px 1px #FFFFFF; @@ -1585,10 +1590,13 @@ div.list-view td.state.off span { min-width: 91px; text-align: center; font-size: 11px; - margin-right: 1px; + margin-right: 12px; color: #4E6070; text-decoration: none; - background: #DEE3E5; + /*+placement:shift 0px 2px;*/ + position: relative; + left: 0px; + top: 2px; } .ui-tabs li { @@ -1596,10 +1604,14 @@ div.list-view td.state.off span { } .ui-tabs ul li.ui-state-default a { - border-right: 1px solid #97AFC5; - border-top: 2px solid #D6DCDE; - background: url(../images/bg-details-tab-gradient.png); - padding-bottom: 11px; + padding-bottom: 10px; + border: 1px solid #D9D9D9; + /*+border-radius:4px 4px 0 0;*/ + -moz-border-radius: 4px 4px 0 0; + -webkit-border-radius: 4px 4px 0 0; + -khtml-border-radius: 4px 4px 0 0; + border-radius: 4px 4px 0 0; + background: #F0F0F0; } .project-view .ui-tabs ul li.ui-state-default a { @@ -1620,7 +1632,8 @@ div.list-view td.state.off span { } .ui-tabs ul li.ui-state-hover a { - background: url(../images/gradients.png) -8px -1413px; + text-decoration: underline; + color: #000000; } .ui-tabs ul li.ui-state-active { @@ -1628,9 +1641,7 @@ div.list-view td.state.off span { } .ui-tabs ul li.ui-state-active a { - border-right: 1px solid #D6DCDE; background: #FFFFFF; - border-left: none; padding-bottom: 12px; } @@ -1659,26 +1670,17 @@ div.list-view td.state.off span { .ui-tabs li.ui-state-active.first a, .ui-tabs li.ui-state-default.first a { - border-left: 1px solid #E2DDDD; - /*+placement:shift 0px 0px;*/ - position: relative; - left: 0px; - top: 0px; - /*+border-radius:4px 0 0;*/ - -moz-border-radius: 4px 0 0; - -webkit-border-radius: 4px 0 0; - -khtml-border-radius: 4px 0 0; - border-radius: 4px 0 0; - border-radius: 4px 0 0 0; + /*+border-radius:4px 4px 0 0;*/ + -moz-border-radius: 4px 4px 0 0; + -webkit-border-radius: 4px 4px 0 0; + -khtml-border-radius: 4px 4px 0 0; + border-radius: 4px 4px 0 0; + border: 1px solid #E2DDDD; + border: 1px solid #E2DDDD; } .ui-tabs li.ui-state-active.last a, .ui-tabs li.ui-state-default.last a { - /*+border-radius:0 4px 0 0;*/ - -moz-border-radius: 0 4px 0 0; - -webkit-border-radius: 0 4px 0 0; - -khtml-border-radius: 0 4px 0 0; - border-radius: 0 4px 0 0; } .ui-tabs li.ui-state-active.first.last a, @@ -1695,7 +1697,7 @@ div.list-view td.state.off span { } .ui-tabs div.ui-tabs-panel { - border: 1px solid #E2DDDD; + border: 1px solid #D9D9D9; clear: both; height: 78%; width: 97%; @@ -1722,9 +1724,9 @@ div.list-view td.state.off span { .detail-group table { width: 96%; font-size: 12px; - border: 1px solid #CFC9C9; - background: #E4E6E7; - margin-top: 11px; + border-bottom: 1px solid #DFDFDF; + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#f7f7f7', endColorstr='#eaeaea',GradientType=0 ); + margin-top: 10px; } .detail-group table tr, @@ -1734,6 +1736,10 @@ div.list-view td.state.off span { cursor: default; } +.detail-group table tr.odd { + background: none; +} + .details.group-multiple table { border: none; border-top: none; @@ -1750,7 +1756,8 @@ div.list-view td.state.off span { .detail-group .main-groups table td.name { width: 113px; - color: #6393F1; + color: #6D6D6D; + font-weight: bold; padding: 14px 12px 13px 13px; border: none; text-indent: 0; @@ -1966,25 +1973,17 @@ div.detail-group td.view-all div.view-all { } div.detail-group td.view-all a { - background: url(../images/gradients.png) repeat-x 0px -529px; - font-size: 11px; + font-size: 13px; display: block; - height: 27px; text-decoration: none; - color: #4C5D6C; + color: #0373B7; /*+text-shadow:0px 1px 2px #FFFFFF;*/ -moz-text-shadow: 0px 1px 2px #FFFFFF; -webkit-text-shadow: 0px 1px 2px #FFFFFF; -o-text-shadow: 0px 1px 2px #FFFFFF; text-shadow: 0px 1px 2px #FFFFFF; float: left; - padding: 0 1px; - border-left: 1px solid #9B9EA2; - /*+border-radius:5px 0 0 5px;*/ - -moz-border-radius: 5px 0 0 5px; - -webkit-border-radius: 5px 0 0 5px; - -khtml-border-radius: 5px 0 0 5px; - border-radius: 5px 0 0 5px; + font-weight: 100; } div.detail-group td.view-all:hover a { @@ -1992,13 +1991,19 @@ div.detail-group td.view-all:hover a { } div.detail-group td.view-all a span { - /*+placement:shift -4px 7px;*/ + /*+placement:shift -4px -1px;*/ position: relative; left: -4px; - top: 7px; + top: -1px; +} + +div.detail-group td.view-all:hover a span { + text-decoration: underline; + color: #000000; } div.detail-group td.view-all div.view-all div.end { + display: none; float: left; width: 15px; height: 25px; @@ -2034,16 +2039,11 @@ div.details .main-groups label.error { .detail-view td.view-all.multiple { max-width: 145px; - height: 17px; display: block; float: left; - margin: 8px 2px 8px 8px; - border: none !important; - /*+box-shadow:none;*/ - -moz-box-shadow: none !important; - -webkit-box-shadow: none !important; - -o-box-shadow: none !important; - box-shadow: none !important; + height: 28px; + margin-left: 0; + text-align: left; } /*** Actions*/ @@ -2061,7 +2061,7 @@ div.detail-group.actions tr { } div.detail-group.actions td { - height: 43px; + height: 50px; vertical-align: middle; } @@ -2100,14 +2100,12 @@ div.detail-group.actions td { } .detail-group table td.detail-actions { - width: 59%; height: 26px; } .detail-group table td.detail-actions.full-length { display: block; width: 99%; - border-bottom: 1px solid #AAAAAA; float: left; } @@ -2143,10 +2141,6 @@ div.detail-group.actions td { } .detail-group table td.detail-actions div.buttons { - /*+placement:shift 6px 0px;*/ - position: relative; - left: 6px; - top: 0px; } .detail-group table td.detail-actions a { @@ -2155,7 +2149,6 @@ div.detail-group.actions td { text-indent: -9999px; width: 30px; height: 25px; - background: url(../images/sprites.png) -417px -11px; margin: 0; } @@ -2184,7 +2177,6 @@ div.detail-group.actions td { .detail-group table td.detail-actions div.action.single a { width: 31px; height: 26px; - background: url(../images/buttons.png); background-position: -414px -625px; } @@ -2204,8 +2196,11 @@ div.detail-group.actions td { /*Header*/ #header { - height: 56px; - background: #DFDFDF url(../images/bg-header.png) repeat-x; + width: 100%; + height: 135px; + background: url(../images/overlay-pattern.png) repeat 0, #1B5070 url(../images/header-gradient.png) no-repeat center; + background-size: auto, cover; + position: relative; } #header div.button { @@ -2215,25 +2210,44 @@ div.detail-group.actions td { } #header.nologo div.logo { - width: 170px; + width: 1024px; height: 47px; - position: relative; - float: left; - margin: 4px 0 0 19px; + margin: auto; background: url(../images/logo.png) no-repeat 0 center; + /*+placement:shift 0px 15px;*/ + position: relative; + left: 0px; + top: 15px; } #header div.controls { - height: 39px; + width: 1026px; + height: 48px; position: relative; - float: right; - margin-top: 8px; - display: inline-block; - padding: 0 96px 0 0; + margin: 27px auto 0; + padding-top: 13px; + /*+border-radius:4px 4px 0 0;*/ + -moz-border-radius: 4px 4px 0 0; + -webkit-border-radius: 4px 4px 0 0; + -khtml-border-radius: 4px 4px 0 0; + border-radius: 4px 4px 0 0; } #header div.controls.nologo { - background-image: none; + background: #666666; + background: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiA/Pgo8c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgdmlld0JveD0iMCAwIDEgMSIgcHJlc2VydmVBc3BlY3RSYXRpbz0ibm9uZSI+CiAgPGxpbmVhckdyYWRpZW50IGlkPSJncmFkLXVjZ2ctZ2VuZXJhdGVkIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgeDE9IjAlIiB5MT0iMCUiIHgyPSIwJSIgeTI9IjEwMCUiPgogICAgPHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iIzY2NjY2NiIgc3RvcC1vcGFjaXR5PSIxIi8+CiAgICA8c3RvcCBvZmZzZXQ9IjEwMCUiIHN0b3AtY29sb3I9IiMzZDNkM2QiIHN0b3Atb3BhY2l0eT0iMSIvPgogIDwvbGluZWFyR3JhZGllbnQ+CiAgPHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEiIGhlaWdodD0iMSIgZmlsbD0idXJsKCNncmFkLXVjZ2ctZ2VuZXJhdGVkKSIgLz4KPC9zdmc+); + background: -moz-linear-gradient(top, #666666 0%, #3d3d3d 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#666666), color-stop(100%,#3d3d3d)); + background: -webkit-linear-gradient(top, #666666 0%,#3d3d3d 100%); + background: -o-linear-gradient(top, #666666 0%,#3d3d3d 100%); + background: -ms-linear-gradient(top, #666666 0%,#3d3d3d 100%); + background: linear-gradient(to bottom, #666666 0%,#3d3d3d 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#666666', endColorstr='#3d3d3d',GradientType=0 ); + /*+box-shadow:0px -1px 6px #0E3955;*/ + -moz-box-shadow: 0px -1px 6px #0E3955; + -webkit-box-shadow: 0px -1px 6px #0E3955; + -o-box-shadow: 0px -1px 6px #0E3955; + box-shadow: 0px -1px 6px #0E3955; } .button { @@ -2244,16 +2258,27 @@ div.detail-group.actions td { #header div.notifications { background: transparent; + float: right; height: 18px; padding: 1px 0 0; - margin: 8px 36px 0 50px; + /*+placement:shift -174px -57px;*/ + position: relative; + left: -174px; + top: -57px; +} + +#header div.notifications:after { + content: "|"; + /*+placement:shift 28px 7px;*/ + position: relative; + left: 28px; + top: 7px; } #header div.notifications span { position: relative; - top: 6px; + top: 5px; left: 7px; - font-weight: bold; } #header div.notifications:hover { @@ -2296,8 +2321,10 @@ div.detail-group.actions td { #user { height: 30px; - margin: 5px 6px 0 0; - position: relative; + margin: 0; + position: absolute; + top: -47px; + left: 890px; cursor: default !important; display: inline-block; float: left; @@ -2305,22 +2332,9 @@ div.detail-group.actions td { } #user div.name { - background: url(../images/bg-gradients.png) 0px -867px; display: inline-block; float: left; padding: 9px 18px 7px 12px; - border: 1px solid #7A7A7A; - border-bottom: 1px solid #ADADAD; - /*+box-shadow:inset 0px -1px 2px #6D6D6D;*/ - -moz-box-shadow: inset 0px -1px 2px #6D6D6D; - -webkit-box-shadow: inset 0px -1px 2px #6D6D6D; - -o-box-shadow: inset 0px -1px 2px #6D6D6D; - box-shadow: inset 0px -1px 2px #6D6D6D; - /*+border-radius:4px 0 0 4px;*/ - -moz-border-radius: 4px 0 0 4px; - -webkit-border-radius: 4px 0 0 4px; - -khtml-border-radius: 4px 0 0 4px; - border-radius: 4px 0 0 4px; border-right: none; /*[empty]border-top:;*/ min-width: 75px; @@ -2328,7 +2342,6 @@ div.detail-group.actions td { text-align: center; height: 12px; overflow: hidden; - font-weight: bold; /*+text-shadow:0px -1px 1px #464646;*/ -moz-text-shadow: 0px -1px 1px #464646; -webkit-text-shadow: 0px -1px 1px #464646; @@ -2345,37 +2358,21 @@ div.detail-group.actions td { position: relative; left: 0px; top: 0px; - background: url(../images/bg-gradients.png); background-position: 0px -867px; - border-top: 1px solid #7A7A7A; - border-bottom: 1px solid #ADADAD; - border-left: 1px solid #B5B5B5; - /*+border-radius:0 4px 4px 0;*/ - -moz-border-radius: 0 4px 4px 0; - -webkit-border-radius: 0 4px 4px 0; - -khtml-border-radius: 0 4px 4px 0; - border-radius: 0 4px 4px 0; - /*+box-shadow:inset 1px 0px 2px #6D6D6D;*/ - -moz-box-shadow: inset 1px 0px 2px #6D6D6D; - -webkit-box-shadow: inset 1px 0px 2px #6D6D6D; - -o-box-shadow: inset 1px 0px 2px #6D6D6D; - box-shadow: inset 1px 0px 2px #6D6D6D; cursor: pointer; } #user div.options:hover { - background-position: 0px -904px; - /*[empty]background-color:;*/ } #user div.options .arrow { width: 11px; height: 8px; background: url(../images/buttons.png) -402px -23px; - /*+placement:shift 10px 13px;*/ + /*+placement:shift 8px 11px;*/ position: relative; - left: 10px; - top: 13px; + left: 8px; + top: 11px; } /** Zone filter (mixed zone management)*/ @@ -2416,7 +2413,20 @@ div.detail-group.actions td { width: 230px; position: relative; float: left; + /*+box-shadow:inset -1px 4px 7px #DDDDDD;*/ + -moz-box-shadow: inset -1px 4px 7px #DDDDDD; + -webkit-box-shadow: inset -1px 4px 7px #DDDDDD; + -o-box-shadow: inset -1px 4px 7px #DDDDDD; + box-shadow: inset -1px 4px 7px #DDDDDD; background: #EDE8E8; + background: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiA/Pgo8c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgdmlld0JveD0iMCAwIDEgMSIgcHJlc2VydmVBc3BlY3RSYXRpbz0ibm9uZSI+CiAgPGxpbmVhckdyYWRpZW50IGlkPSJncmFkLXVjZ2ctZ2VuZXJhdGVkIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgeDE9IjAlIiB5MT0iMCUiIHgyPSIwJSIgeTI9IjEwMCUiPgogICAgPHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2ZmZmZmZiIgc3RvcC1vcGFjaXR5PSIxIi8+CiAgICA8c3RvcCBvZmZzZXQ9IjYlIiBzdG9wLWNvbG9yPSIjZWRlOGU4IiBzdG9wLW9wYWNpdHk9IjEiLz4KICA8L2xpbmVhckdyYWRpZW50PgogIDxyZWN0IHg9IjAiIHk9IjAiIHdpZHRoPSIxIiBoZWlnaHQ9IjEiIGZpbGw9InVybCgjZ3JhZC11Y2dnLWdlbmVyYXRlZCkiIC8+Cjwvc3ZnPg==); + background: -moz-linear-gradient(top, #ffffff 0%, #ede8e8 6%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,#ffffff), color-stop(6%,#ede8e8)); + background: -webkit-linear-gradient(top, #ffffff 0%,#ede8e8 6%); + background: -o-linear-gradient(top, #ffffff 0%,#ede8e8 6%); + background: -ms-linear-gradient(top, #ffffff 0%,#ede8e8 6%); + background: linear-gradient(to bottom, #ffffff 0%,#ede8e8 6%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#ffffff', endColorstr='#ede8e8',GradientType=0 ); } .project-view #navigation { @@ -2424,8 +2434,8 @@ div.detail-group.actions td { } #navigation ul { - border-right: 1px solid #CECCCC; - height: 100%; + height: 700px; + padding-top: 29px; } .project-view #navigation ul { @@ -2433,14 +2443,9 @@ div.detail-group.actions td { } #navigation ul li { - background: url(../images/bg-nav-item.png) repeat-x; - height: 50px; + height: 42px; cursor: pointer; - /*+text-shadow:0px 1px 1px #FFFFFF;*/ - -moz-text-shadow: 0px 1px 1px #FFFFFF; - -webkit-text-shadow: 0px 1px 1px #FFFFFF; - -o-text-shadow: 0px 1px 1px #FFFFFF; - text-shadow: 0px 1px 1px #FFFFFF; + border-bottom: 1px solid #D2D2D2; } .project-view #navigation ul li { @@ -2459,9 +2464,13 @@ div.detail-group.actions td { #navigation ul li:hover, #navigation ul li.active { - background: url(../images/bg-nav-item-active.png); - background-position: 0px 0px; width: 230px; + background: #2C5D7B; + /*+box-shadow:inset 0px 0px 7px #000000;*/ + -moz-box-shadow: inset 0px 0px 7px #000000; + -webkit-box-shadow: inset 0px 0px 7px #000000; + -o-box-shadow: inset 0px 0px 7px #000000; + box-shadow: inset 0px 0px 7px #000000; } #navigation ul li.disabled { @@ -2488,11 +2497,11 @@ div.detail-group.actions td { #navigation ul li:hover span, #navigation ul li.active span { color: #FFFFFF; - /*+text-shadow:1px 2px 1px #5D5F6D;*/ - -moz-text-shadow: 1px 2px 1px #5D5F6D; - -webkit-text-shadow: 1px 2px 1px #5D5F6D; - -o-text-shadow: 1px 2px 1px #5D5F6D; - text-shadow: 1px 2px 1px #5D5F6D; + /*+text-shadow:0px 1px #000000;*/ + -moz-text-shadow: 0px 1px #000000; + -webkit-text-shadow: 0px 1px #000000; + -o-text-shadow: 0px 1px #000000; + text-shadow: 0px 1px #000000; } #navigation ul li.disabled:hover { @@ -2517,27 +2526,32 @@ div.detail-group.actions td { #navigation ul li.last { background-repeat: repeat; background-position: 0px 0px; - height: 57px; /*[empty]color:;*/ } #navigation ul li span { - /*+placement:shift 14px 18px;*/ + /*+placement:shift 14px 13px;*/ position: relative; left: 14px; - top: 18px; - font-size: 13px; - color: #596D7F; + top: 13px; + font-size: 11px; + color: #515151; padding-left: 19px; + font-weight: bold; + /*+text-shadow:0px 1px #FFFFFF;*/ + -moz-text-shadow: 0px 1px #FFFFFF; + -webkit-text-shadow: 0px 1px #FFFFFF; + -o-text-shadow: 0px 1px #FFFFFF; + text-shadow: 0px 1px #FFFFFF; } #navigation ul li span.icon { background: url(../images/icons.png) no-repeat 0px 0px; - padding: 16px 16px 12px; - /*+placement:shift 17px 18px;*/ + padding: 16px 16px 13px; + /*+placement:shift 17px 10px;*/ position: relative; left: 17px; - top: 18px; + top: 10px; } #navigation ul li.custom-icon span.icon { @@ -2564,7 +2578,6 @@ div.detail-group.actions td { #navigation ul li.last.active, #navigation ul li.last:hover { - height: 52px; } /*Navigation icons*/ @@ -2633,6 +2646,7 @@ div.detail-group.actions td { /*Browser*/ #browser { width: 794px; + height: 700px; max-width: 794px; position: relative; float: left; @@ -2648,10 +2662,11 @@ div.detail-group.actions td { } #browser div.panel { - height: 698px; - background: #F7F7F7; + height: 100%; + background: #FFFFFF; border-right: 1px solid #A5A5A5; overflow: visible; + background-color: #F7F7F7; } #browser div.panel.panel-highlight-wrapper { @@ -2739,19 +2754,13 @@ div.detail-group.actions td { /*Toolbar*/ /*[clearfix]*/div.toolbar { - width: 793px; + width: 100%; height: 32px; - background: #A8AFB6; - border-top: 1px solid #D0D5DA; - border-bottom: 1px solid #43586B; - border-right: 1px solid #43586B; - border-left: 1px solid #43586B; - /*+border-radius:0 2px 2px;*/ - -moz-border-radius: 0 2px 2px; - -webkit-border-radius: 0 2px 2px; - -khtml-border-radius: 0 2px 2px; - border-radius: 0 2px 2px; - border-radius: 0 2px 2px 2px; + /*+box-shadow:0px 1px 4px #CFCFCF;*/ + -moz-box-shadow: 0px 1px 4px #CFCFCF; + -webkit-box-shadow: 0px 1px 4px #CFCFCF; + -o-box-shadow: 0px 1px 4px #CFCFCF; + box-shadow: 0px 1px 4px #CFCFCF; /*+placement:shift 0px -1px;*/ position: relative; left: 0px; @@ -2759,6 +2768,7 @@ div.detail-group.actions td { z-index: 6; position: absolute; top: 0px; + background: #ECECEC 0px -6px; } .detail-view .ui-tabs-panel div.toolbar { @@ -2777,14 +2787,9 @@ div.toolbar div.filters { } div.toolbar div.filters label { - color: #4E5C6B; + color: #3F3B3B; font-size: 12px; - font-weight: bold; - /*+text-shadow:0px 1px 1px #CDCDCD;*/ - -moz-text-shadow: 0px 1px 1px #CDCDCD; - -webkit-text-shadow: 0px 1px 1px #CDCDCD; - -o-text-shadow: 0px 1px 1px #CDCDCD; - text-shadow: 0px 1px 1px #CDCDCD; + font-weight: 100; display: block; float: left; padding: 5px 11px 0 0; @@ -2872,10 +2877,10 @@ div.toolbar div.button.main-action, position: relative; left: 0px; top: 5px; - background: url(../images/gradients.png) 0px -98px; - font-size: 11px; - font-weight: bold; - color: #4A5A6D; + background: #EAEAEA; + font-size: 12px; + font-weight: 100; + color: #000000; margin: 0 14px 0 0; cursor: pointer; /*+text-shadow:0px 1px 1px #DEE5EA;*/ @@ -2884,8 +2889,17 @@ div.toolbar div.button.main-action, -o-text-shadow: 0px 1px 1px #DEE5EA; text-shadow: 0px 1px 1px #DEE5EA; padding: 5px 7px 5px 6px; - border-left: 1px solid #808080; - border-right: 1px solid #808080; + background: #F7F7F7; + background: rgb(247, 247, 247); + background: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiA/Pgo8c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgdmlld0JveD0iMCAwIDEgMSIgcHJlc2VydmVBc3BlY3RSYXRpbz0ibm9uZSI+CiAgPGxpbmVhckdyYWRpZW50IGlkPSJncmFkLXVjZ2ctZ2VuZXJhdGVkIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgeDE9IjAlIiB5MT0iMCUiIHgyPSIwJSIgeTI9IjEwMCUiPgogICAgPHN0b3Agb2Zmc2V0PSIxJSIgc3RvcC1jb2xvcj0iI2Y3ZjdmNyIgc3RvcC1vcGFjaXR5PSIxIi8+CiAgICA8c3RvcCBvZmZzZXQ9IjEwMCUiIHN0b3AtY29sb3I9IiNlYWVhZWEiIHN0b3Atb3BhY2l0eT0iMSIvPgogIDwvbGluZWFyR3JhZGllbnQ+CiAgPHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEiIGhlaWdodD0iMSIgZmlsbD0idXJsKCNncmFkLXVjZ2ctZ2VuZXJhdGVkKSIgLz4KPC9zdmc+); + background: -moz-linear-gradient(top, rgba(247,247,247,1) 1%, rgba(234,234,234,1) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(1%,rgba(247,247,247,1)), color-stop(100%,rgba(234,234,234,1))); + background: -webkit-linear-gradient(top, rgba(247,247,247,1) 1%,rgba(234,234,234,1) 100%); + background: -o-linear-gradient(top, rgba(247,247,247,1) 1%,rgba(234,234,234,1) 100%); + background: -ms-linear-gradient(top, rgba(247,247,247,1) 1%,rgba(234,234,234,1) 100%); + background: linear-gradient(to bottom, rgba(247,247,247,1) 1%,rgba(234,234,234,1) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#f7f7f7', endColorstr='#eaeaea',GradientType=0 ); + border: 1px solid #B7B7B7; float: right; /*+border-radius:4px;*/ -moz-border-radius: 4px; @@ -2901,8 +2915,12 @@ div.toolbar div.button.refresh:hover, div.toolbar div.button.main-action:hover, .toolbar div.button.header-action:hover, .detail-group .button.add:hover { - background-position: 0 -132px; - border-left: 1px solid #585D60; + background: #E5E5E5; + /*+box-shadow:inset 0px 0px 5px #C3C3C3;*/ + -moz-box-shadow: inset 0px 0px 5px #C3C3C3; + -webkit-box-shadow: inset 0px 0px 5px #C3C3C3; + -o-box-shadow: inset 0px 0px 5px #C3C3C3; + box-shadow: inset 0px 0px 5px #C3C3C3; } div.toolbar div.button.main-action span.icon { @@ -3131,7 +3149,8 @@ div.panel div.toolbar div.section-switcher { div.toolbar div.section-switcher div.section-select { float: right; - background: #A8AFB6; + font-size: 12px; + font-weight: 100; } div.toolbar div.section-switcher div.section { @@ -3201,7 +3220,6 @@ div.toolbar div.section-switcher div.section.first.last a { div.toolbar div.section-switcher div.section-select { float: left; - background: #A8AFB6; height: 26px; } @@ -3244,14 +3262,6 @@ div.toolbar div.filters { } div.toolbar label { - color: #4E5C6B; - font-size: 12px; - font-weight: bold; - /*+text-shadow:0px 1px 1px #CDCDCD;*/ - -moz-text-shadow: 0px 1px 1px #CDCDCD; - -webkit-text-shadow: 0px 1px 1px #CDCDCD; - -o-text-shadow: 0px 1px 1px #CDCDCD; - text-shadow: 0px 1px 1px #CDCDCD; } div.toolbar div.filters select { @@ -3272,7 +3282,7 @@ div.toolbar div.filters select { #breadcrumbs { height: 29px; max-height: 29px; - background: url(../images/bg-breadcrumbs.png) repeat-x; + background: #FFFFFF; overflow: hidden; width: 100%; } @@ -3284,14 +3294,13 @@ div.toolbar div.filters select { } #breadcrumbs div.home { - width: 41px; - height: 32px; + width: 71px; + height: 23px; float: left; - background: url(../images/buttons.png) -9px -96px; - /*+placement:shift -1px -1px;*/ + /*+placement:shift -1px 0px;*/ position: relative; left: -1px; - top: -1px; + top: 0px; cursor: pointer; z-index: 5; } @@ -3305,18 +3314,13 @@ div.toolbar div.filters select { } #breadcrumbs ul li, -#breadcrumbs div.active-project { - background: url(../images/gradients.png) repeat-x -522px -10px; +#breadcrumbs div.active-project, +#breadcrumbs .home { height: 21px; float: left; - font-size: 12px; - /*+text-shadow:0px 1px 1px #2A3640;*/ - -moz-text-shadow: 0px 1px 1px #2A3640; - -webkit-text-shadow: 0px 1px 1px #2A3640; - -o-text-shadow: 0px 1px 1px #2A3640; - text-shadow: 0px 1px 1px #2A3640; + font-size: 11px; color: #FFFFFF; - padding: 8px 5px 0px 8px; + padding: 9px 5px 0px 8px; cursor: pointer; /*+placement:shift -13px 0px;*/ position: relative; @@ -3326,54 +3330,39 @@ div.toolbar div.filters select { margin: 0 0 0 2px; } +#breadcrumbs ul li:after, +#breadcrumbs .home:after { + content: ">"; + font-size: 11px; + /*+placement:shift 7px -1px;*/ + position: relative; + left: 7px; + top: -1px; + color: #C4C4C4; +} + .project-view #breadcrumbs ul li { background: url(../images/bg-breadcrumb-project-view.png) 0px -2px; } #breadcrumbs ul li, -#breadcrumbs div.active-project { +#breadcrumbs div.active-project, +#breadcrumbs .home { /*+placement:shift 0px 0px;*/ position: relative; left: 0px; top: 0px; - color: #CED4D9; - /*+text-shadow:0px 0px;*/ - -moz-text-shadow: 0px 0px; - -webkit-text-shadow: 0px 0px; - -o-text-shadow: 0px 0px; - text-shadow: 0px 0px; - -moz-text-shadow: 0px 0px none; - -webkit-text-shadow: 0px 0px none; - -o-text-shadow: 0px 0px none; - -moz-text-shadow: none; - -webkit-text-shadow: none; - -o-text-shadow: none; - font-weight: bold; + color: #63A9F1; } #breadcrumbs ul li:hover, #breadcrumbs ul li.active, #breadcrumbs ul li.maximized { - color: #FFFFFF; - /*+text-shadow:0px 1px 1px #000000;*/ - -moz-text-shadow: 0px 1px 1px #000000; - -webkit-text-shadow: 0px 1px 1px #000000; - -o-text-shadow: 0px 1px 1px #000000; - text-shadow: 0px 1px 1px #000000; - font-size: 12px; + color: #000000; } +/*NOTE: End divs are not displayed per UI changes*/ #breadcrumbs div.end { - background: url(../images/buttons.png) no-repeat -10px -130px; - height: 35px; - width: 13px; - float: left; - /*+placement:shift -13px -1px;*/ - position: relative; - left: -13px; - top: -1px; - z-index: 5; - margin-right: 23px; } #breadcrumbs ul div.end { @@ -3382,6 +3371,8 @@ div.toolbar div.filters select { left: -37px; top: -1px; margin-right: 0px; + /*Disabled*/ + display: none; } #breadcrumbs ul li { @@ -3392,7 +3383,6 @@ div.toolbar div.filters select { top: 0px; margin-left: -10px; text-indent: 13px; - font-weight: bold; } #breadcrumbs div.active-project { @@ -3588,20 +3578,15 @@ table tr.selected td.actions .action.disabled .icon { /*** Action icons Dialogs*/ .ui-dialog { - background: #E9E9E9 url(../images/bg-dialog-body.png) repeat-x 0px 0px; + background: #FFFFFF; text-align: left; - /*+border-radius:7px;*/ - -moz-border-radius: 7px; - -webkit-border-radius: 7px; - -khtml-border-radius: 7px; - border-radius: 7px; - border-radius: 7px 7px 7px 7px; /*+box-shadow:0px -4px 15px #4C4A4A;*/ -moz-box-shadow: 0px -4px 15px #4C4A4A; -webkit-box-shadow: 0px -4px 15px #4C4A4A; -o-box-shadow: 0px -4px 15px #4C4A4A; box-shadow: 0px -4px 15px #4C4A4A; position: absolute; + padding: 15px; } .ui-dialog .ui-widget-content { @@ -3736,8 +3721,8 @@ Dialogs*/ } .ui-dialog-titlebar { - background: #4C5F70 url(../images/bg-dialog-header.png); - color: #FFFFFF; + background: #FFFFFF; + color: #000000; height: 33px; /*+border-radius:7px 7px 0 0;*/ -moz-border-radius: 7px 7px 0 0; @@ -3758,11 +3743,6 @@ Dialogs*/ top: 9px; font-size: 14px; padding: 2px 0 5px 30px; - /*+text-shadow:0px -1px 1px #495968;*/ - -moz-text-shadow: 0px -1px 1px #495968; - -webkit-text-shadow: 0px -1px 1px #495968; - -o-text-shadow: 0px -1px 1px #495968; - text-shadow: 0px -1px 1px #495968; background: url(../images/icons.png) no-repeat 0px -255px; } @@ -3775,7 +3755,7 @@ Dialogs*/ } .ui-dialog.create-form .ui-dialog-title { - background: url(../images/icons.png) no-repeat 0px -256px; + background: url(../images/icons.png) no-repeat 0px -255px; } .ui-dialog.confirm .ui-button { @@ -3848,22 +3828,9 @@ Dialogs*/ .ui-dialog div.form-container div.value input { width: 98%; font-size: 14px; + padding: 4px; background: #F6F6F6; - /*+border-radius:4px;*/ - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - -khtml-border-radius: 4px; - border-radius: 4px; - border-radius: 4px 4px 4px 4px; border: 1px solid #AFAFAF; - /*+box-shadow:inset 0px 1px #727272;*/ - -moz-box-shadow: inset 0px 1px #727272; - -webkit-box-shadow: inset 0px 1px #727272; - -o-box-shadow: inset 0px 1px #727272; - box-shadow: inset 0px 1px #727272; - -moz-box-shadow: inset 0px 1px 0px #727272; - -webkit-box-shadow: inset 0px 1px 0px #727272; - -o-box-shadow: inset 0px 1px 0px #727272; float: left; } @@ -4030,68 +3997,31 @@ Dialogs*/ /*User options*/ #user-options { - width: 115px; - height: 33px; - display: none; + background: #FFFFFF; + z-index: 10000; + width: 104px; position: absolute; - z-index: 5000; - /*+placement:shift 804px 42px;*/ - position: relative; - left: 804px; - top: 42px; - border-top: 1px solid #FFFFFF; - border-bottom: 1px solid #C6C4C5; - /*+border-radius:0 0 5px 5px;*/ - -moz-border-radius: 0 0 5px 5px; - -webkit-border-radius: 0 0 5px 5px; - -khtml-border-radius: 0 0 5px 5px; - border-radius: 0 0 5px 5px; - /*+box-shadow:0px 8px 17px #505050;*/ - -moz-box-shadow: 0px 8px 17px #505050; - -webkit-box-shadow: 0px 8px 17px #505050; - -o-box-shadow: 0px 8px 17px #505050; - box-shadow: 0px 8px 17px #505050; - padding: 0; - border: 1px solid #8A8888; + padding: 15px; + top: 30px; + /*+border-radius:0 0 3px 3px;*/ + -moz-border-radius: 0 0 3px 3px; + -webkit-border-radius: 0 0 3px 3px; + -khtml-border-radius: 0 0 3px 3px; + border-radius: 0 0 3px 3px; + /*+box-shadow:0px 1px 7px #000000;*/ + -moz-box-shadow: 0px 1px 7px #000000; + -webkit-box-shadow: 0px 1px 7px #000000; + -o-box-shadow: 0px 1px 7px #000000; + box-shadow: 0px 1px 7px #000000; } #user-options a { - position: relative; - background: #929292; + float: left; width: 100%; - display: block; - clear: both; - text-indent: 13px; - color: #FFFFFF; - text-decoration: none; - font-size: 12px; - /*+placement:shift 0px -51px;*/ - position: relative; - left: 0px; - top: -51px; - padding: 11px 0 9px; - border-bottom: 1px solid #C5C5C5; - border-top: 1px solid #FFFFFF; - /*+text-shadow:0px 2px 1px #606060;*/ - -moz-text-shadow: 0px 2px 1px #606060; - -webkit-text-shadow: 0px 2px 1px #606060; - -o-text-shadow: 0px 2px 1px #606060; - text-shadow: 0px 2px 1px #606060; - /*+border-radius:0 0 5px 5px;*/ - -moz-border-radius: 0 0 5px 5px; - -webkit-border-radius: 0 0 5px 5px; - -khtml-border-radius: 0 0 5px 5px; - border-radius: 0 0 5px 5px; + padding: 10px 0; } #user-options a:hover { - background: #ADADAD; - color: #FFFFFF; - /*+text-shadow:0px 1px 2px #383838;*/ - -moz-text-shadow: 0px 1px 2px #383838; - -webkit-text-shadow: 0px 1px 2px #383838; - -o-text-shadow: 0px 1px 2px #383838; - text-shadow: 0px 1px 2px #383838; } /*Dashboard @@ -4107,12 +4037,11 @@ Dialogs*/ .dashboard.admin .dashboard-container { background: #FFFFFF; border: 1px solid #C8C2C2; - /*+border-radius:6px;*/ - -moz-border-radius: 6px; - -webkit-border-radius: 6px; - -khtml-border-radius: 6px; - border-radius: 6px; - border-radius: 6px 6px 6px 6px; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; padding: 0px 8px 18px 0px; margin: 0 0 11px; } @@ -4121,69 +4050,53 @@ Dialogs*/ width: 368px; } -.dashboard.admin .dashboard-container.sub .button.view-all { - font-size: 10px; +.dashboard.admin .dashboard-container.sub .button.view-all, +.dashboard.admin .dashboard-container .button.fetch-latest { + font-size: 13px; float: right; clear: none; - /*+text-shadow:0px 1px #333E49;*/ - -moz-text-shadow: 0px 1px #333E49; - -webkit-text-shadow: 0px 1px #333E49; - -o-text-shadow: 0px 1px #333E49; - text-shadow: 0px 1px #333E49; + /*+text-shadow:none;*/ + -moz-text-shadow: none; + -webkit-text-shadow: none; + -o-text-shadow: none; + text-shadow: none; -moz-text-shadow: 0px 1px 0px #333E49; -webkit-text-shadow: 0px 1px 0px #333E49; -o-text-shadow: 0px 1px 0px #333E49; padding: 3px 8px 3px 10px; - background: url(../images/bg-gradients.png) repeat-x 0px -411px; - border: 1px solid #383838; - /*+border-radius:6px;*/ - -moz-border-radius: 6px; - -webkit-border-radius: 6px; - -khtml-border-radius: 6px; - border-radius: 6px; - border-radius: 6px 6px 6px 6px; - /*+box-shadow:0px 1px 1px #718CA5;*/ - -moz-box-shadow: 0px 1px 1px #718CA5; - -webkit-box-shadow: 0px 1px 1px #718CA5; - -o-box-shadow: 0px 1px 1px #718CA5; - box-shadow: 0px 1px 1px #718CA5; + background: rgb(234, 234, 234); + background: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiA/Pgo8c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgdmlld0JveD0iMCAwIDEgMSIgcHJlc2VydmVBc3BlY3RSYXRpbz0ibm9uZSI+CiAgPGxpbmVhckdyYWRpZW50IGlkPSJncmFkLXVjZ2ctZ2VuZXJhdGVkIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgeDE9IjAlIiB5MT0iMCUiIHgyPSIwJSIgeTI9IjEwMCUiPgogICAgPHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2VhZWFlYSIgc3RvcC1vcGFjaXR5PSIxIi8+CiAgICA8c3RvcCBvZmZzZXQ9IjEwMCUiIHN0b3AtY29sb3I9IiNkNmQ2ZDYiIHN0b3Atb3BhY2l0eT0iMSIvPgogIDwvbGluZWFyR3JhZGllbnQ+CiAgPHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEiIGhlaWdodD0iMSIgZmlsbD0idXJsKCNncmFkLXVjZ2ctZ2VuZXJhdGVkKSIgLz4KPC9zdmc+); + background: -moz-linear-gradient(top, rgba(234,234,234,1) 0%, rgba(214,214,214,1) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(234,234,234,1)), color-stop(100%,rgba(214,214,214,1))); + background: -webkit-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: -o-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: -ms-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: linear-gradient(to bottom, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#eaeaea', endColorstr='#d6d6d6',GradientType=0 ); + border: 1px solid #9D9D9D; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; + /*+box-shadow:0px 1px #CACACA;*/ + -moz-box-shadow: 0px 1px #CACACA; + -webkit-box-shadow: 0px 1px #CACACA; + -o-box-shadow: 0px 1px #CACACA; + box-shadow: 0px 1px #CACACA; cursor: pointer; - font-weight: bold; + font-weight: 100; + color: #000000; } -.button.fetch-latest { - font-size: 10px; - float: right; - clear: none; - /*+text-shadow:0px 1px #333E49;*/ - -moz-text-shadow: 0px 1px #333E49; - -webkit-text-shadow: 0px 1px #333E49; - -o-text-shadow: 0px 1px #333E49; - text-shadow: 0px 1px #333E49; - -moz-text-shadow: 0px 1px 0px #333E49; - -webkit-text-shadow: 0px 1px 0px #333E49; - -o-text-shadow: 0px 1px 0px #333E49; - padding: 3px 8px 3px 10px; - background: url(../images/bg-gradients.png) repeat-x 0px -411px; - border: 1px solid #383838; - /*+border-radius:6px;*/ - -moz-border-radius: 6px; - -webkit-border-radius: 6px; - -khtml-border-radius: 6px; - border-radius: 6px; - border-radius: 6px 6px 6px 6px; - /*+box-shadow:0px 1px 1px #718CA5;*/ - -moz-box-shadow: 0px 1px 1px #718CA5; - -webkit-box-shadow: 0px 1px 1px #718CA5; - -o-box-shadow: 0px 1px 1px #718CA5; - box-shadow: 0px 1px 1px #718CA5; - cursor: pointer; - font-weight: bold; -} - -.button.fetch-latest:hover { - background-position: 0 -132px; - border-left: 1px solid #585D60; +.dashboard.admin .dashboard-container.sub .button.view-all:hover, +.dashboard.admin .dashboard-container .button.fetch-latest:hover { + background: #E8E8E8; + /*+box-shadow:inset 0px 0px 6px #636363;*/ + -moz-box-shadow: inset 0px 0px 6px #636363; + -webkit-box-shadow: inset 0px 0px 6px #636363; + -o-box-shadow: inset 0px 0px 6px #636363; + box-shadow: inset 0px 0px 6px #636363; } .dashboard.admin .dashboard-container.sub .title { @@ -4193,29 +4106,24 @@ Dialogs*/ /**** Head*/ .dashboard.admin .dashboard-container.head { width: 766px; - height: 304px; + height: 331px; margin: 9px 0 0; float: left; } .dashboard.admin .dashboard-container .top { - background: url(../images/bg-breadcrumb.png) repeat-x 0px -1px; + background: #EFEFEF 0px -4px; padding: 4px 4px 8px; width: 100%; float: left; - margin: 0 0 9px; + margin: 0; color: #FFFFFF; - /*+border-radius:7px 7px 0 0;*/ - -moz-border-radius: 7px 7px 0 0; - -webkit-border-radius: 7px 7px 0 0; - -khtml-border-radius: 7px 7px 0 0; - border-radius: 7px 7px 0 0; } .dashboard.admin .dashboard-container .title { float: left; - font-size: 12px; - font-weight: bold; + font-size: 13px; + font-weight: 100; /*+text-shadow:0px 1px 1px #9A9A9A;*/ -moz-text-shadow: 0px 1px 1px #9A9A9A; -webkit-text-shadow: 0px 1px 1px #9A9A9A; @@ -4225,11 +4133,12 @@ Dialogs*/ } .dashboard.admin .dashboard-container .title span { - /*+text-shadow:0px 1px 1px #050607;*/ - -moz-text-shadow: 0px 1px 1px #050607; - -webkit-text-shadow: 0px 1px 1px #050607; - -o-text-shadow: 0px 1px 1px #050607; - text-shadow: 0px 1px 1px #050607; + color: #000000; + /*+text-shadow:none;*/ + -moz-text-shadow: none; + -webkit-text-shadow: none; + -o-text-shadow: none; + text-shadow: none; } .dashboard.admin .dashboard-container.head .selects { @@ -4257,7 +4166,7 @@ Dialogs*/ /**** Charts / stats*/ .dashboard.admin .zone-stats { width: 774px; - height: 297px; + height: 348px; overflow: auto; overflow-x: hidden; /*+placement:shift 0px -11px;*/ @@ -4268,20 +4177,16 @@ Dialogs*/ .dashboard.admin .zone-stats ul { width: 796px; - /*+placement:shift -1px 0px;*/ + /*+placement:shift -2px 11px;*/ position: relative; - left: -1px; - top: 0px; + left: -2px; + top: 11px; } .dashboard.admin .zone-stats ul li { - background: #FFFFFF url(../images/bg-gradients.png) repeat-x 0px -1244px; - width: 387px; - font-size: 11px; - border: 1px solid #BDBDBD; - border-left: none; - border-top: none; - height: 73px; + width: 388px; + font-size: 14px; + height: 79px; float: left; position: absolute; position: relative; @@ -4299,28 +4204,41 @@ Dialogs*/ } .dashboard.admin .zone-stats ul li .label { - width: 138px; + width: 111px; float: left; - padding: 22px 0px 0 10px; + font-weight: 100; + border-bottom: 1px solid #E2E2E2; + margin: 5px 0 0 22px; + padding: 22px 0 7px; } .dashboard.admin .zone-stats ul li .info { float: left; - width: 120px; + width: 151px; + white-space: nowrap; margin: 12px 0 0; color: #636363; } .dashboard.admin .zone-stats ul li .info .name { font-weight: bold; + margin-top: 8px; + margin-bottom: 9px; + font-size: 12px; + font-weight: 100; + /*[empty]color:;*/ } .dashboard.admin .zone-stats ul li .pie-chart-container { - width: 95px; + width: 91px; height: 69px; overflow: hidden; float: left; position: relative; + /*+placement:shift -8px 7px;*/ + position: relative; + left: -8px; + top: 7px; } .dashboard.admin .zone-stats ul li .pie-chart-container .percent-label { @@ -4446,8 +4364,8 @@ Dialogs*/ /**** Alerts*/ .dashboard.admin .dashboard-container.sub.alerts { float: left; - margin: 0 17px 0 0; - height: 313px; + margin: 0 12px 0 0; + height: 270px; overflow: hidden; position: relative; } @@ -4458,7 +4376,7 @@ Dialogs*/ .dashboard.admin .dashboard-container.sub.alerts ul { width: 368px; - height: 274px; + height: 234px; overflow: auto; overflow-x: hidden; position: relative; @@ -4466,33 +4384,19 @@ Dialogs*/ } .dashboard.admin .dashboard-container.sub.alerts ul li { - display: block; - width: 358px; - height: 42px; - color: #000000; - background: url(../images/bg-gradients.png) 0px -48px; + background: #F0F0F0; + float: left; border: 1px solid #D4D0D0; - clear: both; - /*+placement:shift 0px 15px;*/ - position: relative; - left: 0px; - top: 15px; - /*+border-radius:5px;*/ - -moz-border-radius: 5px; - -webkit-border-radius: 5px; - -khtml-border-radius: 5px; - border-radius: 5px; - border-radius: 5px 5px 5px 5px; - margin: 0px 0 12px; - /*[empty]color:;*/ - padding: 0; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; + margin: 9px; + padding: 8px; } .dashboard.admin .dashboard-container.sub.alerts ul li .content { - height: 43px; - padding: 0 0px 0 63px; - margin: 0; - background: url(../images/icons.png) no-repeat -601px -428px; } .dashboard.admin .dashboard-container.sub.alerts ul li { @@ -4500,8 +4404,10 @@ Dialogs*/ .dashboard.admin .dashboard-container.sub.alerts ul li span.title { font-weight: bold; - font-size: 11px; - margin: 7px 0 0; + font-size: 14px; + font-weight: 100; + color: #266E9A; + margin: 3px 0 5px; /*+text-shadow:0px 1px #FFFFFF;*/ -moz-text-shadow: 0px 1px #FFFFFF; -webkit-text-shadow: 0px 1px #FFFFFF; @@ -4511,16 +4417,9 @@ Dialogs*/ } .dashboard.admin .dashboard-container.sub.alerts ul li p { - display: block; - clear: both; - font-size: 11px; float: left; - height: 10px; - max-width: 287px; - margin-top: 1px; - overflow: hidden; - text-overflow: ellipsis; - white-space: nowrap; + margin: 4px 0px 0px; + color: #252525; } .dashboard.admin .dashboard-container.sub.alerts ul li p br { @@ -5376,38 +5275,34 @@ label.error { float: left; width: 128px; height: 40px; - border: 1px solid #5B7084; - border-left: 1px solid #000000; - border-right: none; - background: url(../images/bg-gradients.png) 0px -261px; padding: 0 0px; position: relative; } .multi-wizard.instance-wizard .progress ul li { - width: 109px; + width: 102px; + margin-left: 8px; } .multi-wizard .progress ul li.first { - /*+border-radius:5px 0 0 5px;*/ - -moz-border-radius: 5px 0 0 5px; - -webkit-border-radius: 5px 0 0 5px; - -khtml-border-radius: 5px 0 0 5px; - border-radius: 5px 0 0 5px; + /*+border-radius:5px;*/ + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + -khtml-border-radius: 5px; + border-radius: 5px; } .multi-wizard .progress ul li.last { - /*+border-radius:0 5px 5px 0;*/ - -moz-border-radius: 0 5px 5px 0; - -webkit-border-radius: 0 5px 5px 0; - -khtml-border-radius: 0 5px 5px 0; - border-radius: 0 5px 5px 0; - border-right: 1px solid #5B7084; } .multi-wizard .progress ul li.active { background: url(../images/bg-gradients.png) 0px -221px; height: 40px; + /*+border-radius:5px;*/ + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + -khtml-border-radius: 5px; + border-radius: 5px; } .multi-wizard .progress ul li span { @@ -5417,7 +5312,7 @@ label.error { left: 46px; top: 17px; text-align: left; - color: #656565; + color: #000000; /*+text-shadow:0px 1px 1px #FFFFFF;*/ -moz-text-shadow: 0px 1px 1px #FFFFFF; -webkit-text-shadow: 0px 1px 1px #FFFFFF; @@ -5431,6 +5326,7 @@ label.error { .multi-wizard.instance-wizard .progress ul li span { left: 26px; + top: 16px; } .multi-wizard .progress ul li span.multiline { @@ -5447,10 +5343,11 @@ label.error { margin-top: -3px; background: url(../images/icons.png) no-repeat 0px -422px; z-index: 1000; + display: none; } .multi-wizard.instance-wizard .progress ul li span.arrow { - left: 27px; + left: 19px; } .multi-wizard .progress ul li.active span.arrow { @@ -5460,16 +5357,11 @@ label.error { .multi-wizard .progress ul li span.number { width: auto; position: absolute; - top: 8px; + top: 7px; left: 26px; font-size: 27px; font-weight: bold; - color: #E8E8E8; - /*+text-shadow:0px -1px 1px #C2BDBD;*/ - -moz-text-shadow: 0px -1px 1px #C2BDBD; - -webkit-text-shadow: 0px -1px 1px #C2BDBD; - -o-text-shadow: 0px -1px 1px #C2BDBD; - text-shadow: 0px -1px 1px #C2BDBD; + color: #BBBBBB; background: transparent; } @@ -5479,7 +5371,7 @@ label.error { .multi-wizard.instance-wizard .progress ul li span.multiline { width: 79px; - left: 23px; + left: 26px; } .multi-wizard .progress ul li.active span { @@ -5492,16 +5384,6 @@ label.error { } .multi-wizard .progress ul li.active span.number { - /*+opacity:26%;*/ - filter: alpha(opacity=26); - -ms-filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=26); - -moz-opacity: 0.26; - opacity: 0.26; - /*+text-shadow:0px -1px 1px #000000;*/ - -moz-text-shadow: 0px -1px 1px #000000; - -webkit-text-shadow: 0px -1px 1px #000000; - -o-text-shadow: 0px -1px 1px #000000; - text-shadow: 0px -1px 1px #000000; } /*** Content*/ @@ -5935,7 +5817,7 @@ label.error { .multi-wizard .buttons .button.previous { background: #D6D6D6; color: #62798E; - margin-left: 12px; + margin-left: 27px; } .multi-wizard .buttons .button.previous:hover { @@ -5977,11 +5859,11 @@ label.error { margin-top: 9px !important; } -.multi-wizard.instance-wizard .data-disk-offering.required.custom-disk-size .select-container { +.multi-wizard.instance-wizard .custom-disk-size .select-container { height: 279px; } -.multi-wizard.instance-wizard .data-disk-offering.custom-disk-size .select-container { +.multi-wizard.instance-wizard .custom-disk-size .select-container { height: 213px; margin: -7px 6px 0 8px; /*+border-radius:6px;*/ @@ -5992,21 +5874,21 @@ label.error { border-radius: 6px 6px 6px 6px; } -.multi-wizard.instance-wizard .data-disk-offering .content .section input { +.multi-wizard.instance-wizard .content .section input { float: left; } -.multi-wizard.instance-wizard .data-disk-offering .content .section input[type=radio] { +.multi-wizard.instance-wizard .content .section input[type=radio] { margin: 8px 2px 0 17px; } -.multi-wizard.instance-wizard .data-disk-offering .content .section label { +.multi-wizard.instance-wizard .content .section label { display: block; float: left; margin: 10px 7px 7px; } -.multi-wizard.instance-wizard .data-disk-offering .content .section label.size { +.multi-wizard.instance-wizard .content .section label.size { color: #647A8E; font-weight: bold; /*+text-shadow:0px 1px 1px #FFFFFF;*/ @@ -6016,27 +5898,61 @@ label.error { text-shadow: 0px 1px 1px #FFFFFF; } -.multi-wizard.instance-wizard .data-disk-offering .content .section.custom-size { +.multi-wizard.instance-wizard .section.custom-size { position: relative; + background: #F4F4F4; + padding: 7px; + border-radius: 4px; } -.multi-wizard.instance-wizard .data-disk-offering .content .section.custom-size input[type=radio] { +.multi-wizard.instance-wizard .section.custom-size input[type=radio] { float: left; } -.multi-wizard.instance-wizard .data-disk-offering .content .section.custom-size input[type=text] { +.multi-wizard.instance-wizard .section.custom-size input[type=text] { float: left; width: 28px; margin: 6px -1px 0 8px; } -.multi-wizard.instance-wizard .data-disk-offering .content .section.custom-size label.error { +.multi-wizard.instance-wizard .section.custom-size label.error { position: absolute; top: 29px; left: 242px; font-size: 10px; } +/*** Compute offering*/ +.instance-wizard .step.service-offering { +} + +.instance-wizard .step.service-offering.custom-size .select-container { + height: 235px; +} + +.instance-wizard .step.service-offering .custom-size { + display: none; +} + +.instance-wizard .step.service-offering.custom-size .custom-size { + display: block; +} + +.instance-wizard .step.service-offering .custom-size .field { + width: 30%; + float: left; + margin-bottom: 13px; +} + +.instance-wizard .step.service-offering .custom-size .field label { + text-indent: 20px; +} + +.instance-wizard .step.service-offering .custom-size .field input { + width: 88%; + margin-left: 26px; +} + /*** Network*/ .multi-wizard.instance-wizard .no-network { background: #FFFFFF; @@ -6415,10 +6331,10 @@ label.error { .multi-wizard.zone-wizard ul.subnav { text-align: left; - /*+placement:shift 13px 87px;*/ + /*+placement:shift 30px 104px;*/ position: relative; - left: 13px; - top: 87px; + left: 30px; + top: 104px; position: absolute; list-style: disc inside; } @@ -6427,7 +6343,9 @@ label.error { float: left; padding: 0; font-size: 12px; + white-space: nowrap; text-transform: uppercase; + list-style: none; height: 20px; margin-right: 34px; color: #9A9A9A; @@ -6494,8 +6412,9 @@ label.error { text-align: center; /*+placement:shift 0px 153px;*/ position: relative; - left: -10px; + left: 0px; top: 153px; + left: -10px; /*+text-shadow:0px 1px #FFFFFF;*/ -moz-text-shadow: 0px 1px #FFFFFF; -webkit-text-shadow: 0px 1px #FFFFFF; @@ -6542,7 +6461,7 @@ label.error { } .multi-wizard.zone-wizard .buttons { - top: 584px; + top: 609px; } .multi-wizard.zone-wizard .progress ul { @@ -6550,7 +6469,8 @@ label.error { } .multi-wizard.zone-wizard .progress ul li { - width: 114px; + width: 107px; + margin-left: 7px; padding: 0 32px 0 0; } @@ -7615,6 +7535,9 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t .multi-edit table th { min-width: 88px; + white-space: nowrap; + text-align: center; + text-indent: 0; } .detail-group .multi-edit table td { @@ -8627,28 +8550,23 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t height: 258px; width: 762px; display: block; - background: #8DA4B9 url(../images/bg-gradients.png) repeat-x 0px -475px; - /*+border-radius:8px;*/ - -moz-border-radius: 8px; - -webkit-border-radius: 8px; - -khtml-border-radius: 8px; - border-radius: 8px; - border-radius: 8px 8px 8px 8px; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; /*+box-shadow:inset 0px 0px 1px #FFFFFF;*/ -moz-box-shadow: inset 0px 0px 1px #FFFFFF; -webkit-box-shadow: inset 0px 0px 1px #FFFFFF; -o-box-shadow: inset 0px 0px 1px #FFFFFF; box-shadow: inset 0px 0px 1px #FFFFFF; - border: 1px solid #BFD4E1; position: relative; margin: 18px 0 0 15px; - font-weight: bold; } .system-dashboard.zone { height: 609px; background-position: 0px -1423px; - background-color: #FFFFFF; } .system-dashboard-view .toolbar { @@ -8656,15 +8574,14 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t } .system-dashboard .head { - color: #505F6F; + color: #000000; /*+text-shadow:0px 1px 1px #FFFFFF;*/ -moz-text-shadow: 0px 1px 1px #FFFFFF; -webkit-text-shadow: 0px 1px 1px #FFFFFF; -o-text-shadow: 0px 1px 1px #FFFFFF; text-shadow: 0px 1px 1px #FFFFFF; text-indent: 11px; - padding: 12px 0 11px; - border-bottom: 1px solid #728EA7; + padding: 0px 0 12px; /*+box-shadow:0px 0px 1px #FFFFFF;*/ -moz-box-shadow: 0px 0px 1px #FFFFFF; -webkit-box-shadow: 0px 0px 1px #FFFFFF; @@ -8675,106 +8592,95 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t .system-dashboard .view-more, .system-dashboard .view-all { float: right; - padding: 5px 21px 5px 9px; margin: -4px 19px 0 0; cursor: pointer; - font-size: 11px; - font-weight: bold; - background: #4B5B6A url(../images/bg-gradients.png) repeat-x 0px -735px; - color: #FFFFFF; + font-size: 13px; + font-weight: 100; + background: #DADADA repeat-x 0px -735px; + background: rgb(234, 234, 234); + background: url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiA/Pgo8c3ZnIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgd2lkdGg9IjEwMCUiIGhlaWdodD0iMTAwJSIgdmlld0JveD0iMCAwIDEgMSIgcHJlc2VydmVBc3BlY3RSYXRpbz0ibm9uZSI+CiAgPGxpbmVhckdyYWRpZW50IGlkPSJncmFkLXVjZ2ctZ2VuZXJhdGVkIiBncmFkaWVudFVuaXRzPSJ1c2VyU3BhY2VPblVzZSIgeDE9IjAlIiB5MT0iMCUiIHgyPSIwJSIgeTI9IjEwMCUiPgogICAgPHN0b3Agb2Zmc2V0PSIwJSIgc3RvcC1jb2xvcj0iI2VhZWFlYSIgc3RvcC1vcGFjaXR5PSIxIi8+CiAgICA8c3RvcCBvZmZzZXQ9IjEwMCUiIHN0b3AtY29sb3I9IiNkNmQ2ZDYiIHN0b3Atb3BhY2l0eT0iMSIvPgogIDwvbGluZWFyR3JhZGllbnQ+CiAgPHJlY3QgeD0iMCIgeT0iMCIgd2lkdGg9IjEiIGhlaWdodD0iMSIgZmlsbD0idXJsKCNncmFkLXVjZ2ctZ2VuZXJhdGVkKSIgLz4KPC9zdmc+); + background: -moz-linear-gradient(top, rgba(234,234,234,1) 0%, rgba(214,214,214,1) 100%); + background: -webkit-gradient(linear, left top, left bottom, color-stop(0%,rgba(234,234,234,1)), color-stop(100%,rgba(214,214,214,1))); + background: -webkit-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: -o-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: -ms-linear-gradient(top, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + background: linear-gradient(to bottom, rgba(234,234,234,1) 0%,rgba(214,214,214,1) 100%); + filter: progid:DXImageTransform.Microsoft.gradient( startColorstr='#eaeaea', endColorstr='#d6d6d6',GradientType=0 ); /*+border-radius:3px;*/ -moz-border-radius: 3px; -webkit-border-radius: 3px; -khtml-border-radius: 3px; border-radius: 3px; border-radius: 3px 3px 3px 3px; - border: 1px solid #5A5A5A; - /*+box-shadow:inset 0px 0px 1px #FFFFFF;*/ - -moz-box-shadow: inset 0px 0px 1px #FFFFFF; - -webkit-box-shadow: inset 0px 0px 1px #FFFFFF; - -o-box-shadow: inset 0px 0px 1px #FFFFFF; - box-shadow: inset 0px 0px 1px #FFFFFF; - /*+text-shadow:0px 1px 1px #000000;*/ - -moz-text-shadow: 0px 1px 1px #000000; - -webkit-text-shadow: 0px 1px 1px #000000; - -o-text-shadow: 0px 1px 1px #000000; - text-shadow: 0px 1px 1px #000000; + border: 1px solid #B5B5B5; } .system-dashboard .view-more:hover, .system-dashboard .view-all:hover { background-position: 0px -763px; - /*+box-shadow:inset 0px 1px 1px #313131;*/ - -moz-box-shadow: inset 0px 1px 1px #313131; - -webkit-box-shadow: inset 0px 1px 1px #313131; - -o-box-shadow: inset 0px 1px 1px #313131; - box-shadow: inset 0px 1px 1px #313131; + /*+box-shadow:inset 0px 1px 1px #000000;*/ + -moz-box-shadow: inset 0px 1px 1px #000000; + -webkit-box-shadow: inset 0px 1px 1px #000000; + -o-box-shadow: inset 0px 1px 1px #000000; + box-shadow: inset 0px 1px 1px #000000; + background: #C1C1C1; } .system-dashboard .status_box .view-all { - /*+placement:shift 18px 136px;*/ + /*+placement:shift 18px 110px;*/ position: relative; left: 18px; - top: 136px; + top: 110px; width: 78%; position: absolute; text-align: center; - padding: 5px 0 8px; + padding: 8px 0; } .system-dashboard .status_box { font-size: 14px; - margin: 28px 0 0; + margin: 10px 0 0; background: transparent; border: none; } .system-dashboard .status_box li { height: 178px; - width: 180px; + width: 178px; padding: 0; - background: url(../images/bg-gradients.png) repeat-x 0px -1003px; margin: 0 0 0 8px; - /*+border-radius:7px;*/ - -moz-border-radius: 7px; - -webkit-border-radius: 7px; - -khtml-border-radius: 7px; - border-radius: 7px; - border-radius: 7px 7px 7px 7px; - /*+box-shadow:inset 0px 1px 3px #000000;*/ - -moz-box-shadow: inset 0px 1px 3px #000000; - -webkit-box-shadow: inset 0px 1px 3px #000000; - -o-box-shadow: inset 0px 1px 3px #000000; - box-shadow: inset 0px 1px 3px #000000; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; position: relative; - border: none; + border: 1px solid #C6C6C6; float: left; } .system-dashboard.zone .status_box li { - margin-bottom: 120px; - height: 176px; - background-position: 0px -1005px; - background-color: #35404B; + margin-bottom: 8px; + height: 152px; + background-color: #F4F4F4; } .system-dashboard.zone .status_box li .icon { background: url(../images/infrastructure-icons.png) no-repeat 0px 0px; padding: 65px 80px 5px; - /*+placement:shift 25px 19px;*/ + /*+placement:shift 31px 19px;*/ position: relative; - left: 25px; + left: 31px; top: 19px; position: absolute; + /*+opacity:56%;*/ + filter: alpha(opacity=56); + -ms-filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=56); + -moz-opacity: 0.56; + opacity: 0.56; } .system-dashboard .status_box li span { - color: #FFFFFF; - /*+text-shadow:0px 1px 1px #000000;*/ - -moz-text-shadow: 0px 1px 1px #000000; - -webkit-text-shadow: 0px 1px 1px #000000; - -o-text-shadow: 0px 1px 1px #000000; - text-shadow: 0px 1px 1px #000000; } .system-dashboard .status_box li span.label { @@ -8802,15 +8708,12 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t position: relative; left: 13px; top: 5px; - /*+text-shadow:0px 1px 2px #000000;*/ - -moz-text-shadow: 0px 1px 2px #000000; - -webkit-text-shadow: 0px 1px 2px #000000; - -o-text-shadow: 0px 1px 2px #000000; - text-shadow: 0px 1px 2px #000000; + font-weight: 100; } .system-dashboard.zone .status_box li span.header { - font-size: 13px; + font-size: 14px; + color: #4F4F4F; } .system-dashboard .status_box li span.status { @@ -8883,7 +8786,13 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t left: 9px; top: 29px; position: absolute; - font-weight: normal; + font-weight: 100; + color: #2B7DAF; + /*+text-shadow:0px -1px 2px #FFFFFF;*/ + -moz-text-shadow: 0px -1px 2px #FFFFFF; + -webkit-text-shadow: 0px -1px 2px #FFFFFF; + -o-text-shadow: 0px -1px 2px #FFFFFF; + text-shadow: 0px -1px 2px #FFFFFF; } .system-dashboard .status_box li.capacity span.overview.total { @@ -8948,6 +8857,60 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t position: absolute; } +.system-dashboard-view .socket-info { + width: 100%; + height: 239px; + overflow: auto; + float: left; + padding: 0; +} + +.system-dashboard-view .socket-info > .title { + padding: 8px; + font-size: 13px; +} + +.system-dashboard-view .socket-info ul { +} + +.system-dashboard-view .socket-info li { + width: 139px; + padding: 13px; + /*+border-radius:3px;*/ + -moz-border-radius: 3px; + -webkit-border-radius: 3px; + -khtml-border-radius: 3px; + border-radius: 3px; + margin: 7px; + border: 1px solid #CCC; + background: #EFEFEF; + float: left; +} + +.system-dashboard-view .socket-info li > div { + text-decoration: none; + float: left; +} + +.system-dashboard-view .socket-info li .name { + width: 100%; + font-weight: 100; + margin-bottom: 13px; +} + +.system-dashboard-view .socket-info li .hosts, +.system-dashboard-view .socket-info li .sockets { + width: 54px; + /*[empty]color:;*/ +} + +.system-dashboard-view .socket-info li div .title { + color: #424242; + border: none; + font-size: 13px; + padding-bottom: 3px; +} + .add-zone-resource .form-container { height: auto !important; display: inline-block; @@ -9116,7 +9079,7 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t .project-switcher { float: left; width: 223px; - padding: 9px 17px 0 0; + padding: 9px 17px 0 19px; /*+border-radius:4px;*/ -moz-border-radius: 4px; -webkit-border-radius: 4px; @@ -9125,54 +9088,29 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t } .project-switcher label { - position: absolute; - top: 15px; + top: 29px; color: #FFFFFF; font-size: 12px; font-weight: bold; + float: left; + margin-right: 7px; + margin-top: 5px; } .project-switcher select { - width: 79%; - font-weight: bold; - font-size: 12px; - /*+text-shadow:0px -1px 1px #000000;*/ - -moz-text-shadow: 0px -1px 1px #000000; - -webkit-text-shadow: 0px -1px 1px #000000; - -o-text-shadow: 0px -1px 1px #000000; - text-shadow: 0px -1px 1px #000000; - border: 1px solid #9A9A9A; - border-bottom: #FFFFFF; - /*+box-shadow:inset 0px -1px #A2A2A2;*/ - -moz-box-shadow: inset 0px -1px #A2A2A2; - -webkit-box-shadow: inset 0px -1px #A2A2A2; - -o-box-shadow: inset 0px -1px #A2A2A2; - box-shadow: inset 0px -1px #A2A2A2; - /*+border-radius:4px;*/ - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - -khtml-border-radius: 4px; - border-radius: 4px; + width: 70%; + float: left; + margin-top: -1px; + border: 1px solid #393939; + /*+text-shadow:0px -1px 1px #373737;*/ + -moz-text-shadow: 0px -1px 1px #373737; + -webkit-text-shadow: 0px -1px 1px #373737; + -o-text-shadow: 0px -1px 1px #373737; + text-shadow: 0px -1px 1px #373737; + background: #515151; + font-size: 15px; + font-weight: 100; color: #FFFFFF; - margin-top: -2px; - margin-left: 53px; - background: url(../images/bg-gradients.png) 0px -867px; - padding: 5px; - height: 28px; -} - -.project-switcher select:hover { - /*+box-shadow:inset 0px 2px 6px #3B3B3B;*/ - -moz-box-shadow: inset 0px 2px 6px #3B3B3B; - -webkit-box-shadow: inset 0px 2px 6px #3B3B3B; - -o-box-shadow: inset 0px 2px 6px #3B3B3B; - box-shadow: inset 0px 2px 6px #3B3B3B; - cursor: pointer; - border-bottom: 1px solid #828282; -} - -.project-switcher select option { - background: #7F8487; } /*** Select project*/ @@ -9863,8 +9801,9 @@ div.ui-dialog div.multi-edit-add-list div.view div.data-table table.body tbody t color: #838181; /*+placement:shift 488px 9px;*/ position: relative; - left: 480px; + left: 488px; top: 9px; + left: 480px; margin: 19px 0 0 40px; } @@ -11628,24 +11567,19 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it .region-switcher { display: inline-block; position: relative; - background: url(../images/bg-gradients.png) repeat-x 0px -867px; - border: 1px solid #5E5E5E; /*+border-radius:4px;*/ -moz-border-radius: 4px; -webkit-border-radius: 4px; -khtml-border-radius: 4px; border-radius: 4px; - border-top: 1px solid #717171; - border-bottom: 1px solid #FFFFFF; height: 28px; float: left; margin: 5px 13px 0 0; cursor: pointer; - /*+box-shadow:inset 0px 1px 1px #000000;*/ - -moz-box-shadow: inset 0px 1px 1px #000000; - -webkit-box-shadow: inset 0px 1px 1px #000000; - -o-box-shadow: inset 0px 1px 1px #000000; - box-shadow: inset 0px 1px 1px #000000; + /*+placement:shift 27px 1px;*/ + position: relative; + left: 27px; + top: 1px; } .region-selector { @@ -11657,12 +11591,12 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it width: 318px; background: url(../images/bg-notifications.png) center; height: 372px; - /*+placement:shift 321px 49px;*/ + /*+placement:shift 185px 49px;*/ position: relative; - left: 321px; + left: 185px; top: 49px; position: absolute; - z-index: 1000; + z-index: 5500; } .region-selector h2 { @@ -11783,8 +11717,11 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it font-weight: bold; max-width: 67px; overflow: hidden; - text-overflow: ellipsis; white-space: nowrap; + /*+placement:shift -1px 0px;*/ + position: relative; + left: -1px; + top: 0px; } .region-switcher:hover, @@ -11841,6 +11778,7 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it } .destroy .icon, +.expunge .icon, .remove .icon, .delete .icon, .decline .icon, @@ -11849,6 +11787,7 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it } .destroy:hover .icon, +.expunge:hover .icon, .remove:hover .icon, .delete:hover .icon, .deleteacllist:hover .icon { @@ -11984,13 +11923,15 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it .add .icon, .addNew .icon, -.assignVm .icon { +.assignVm .icon, +.rootAdminAddGuestNetwork .icon { background-position: -37px -61px; } .add:hover .icon, .addNew:hover .icon, -.assignVm:hover .icon { +.assignVm:hover .icon, +.rootAdminAddGuestNetwork:hover .icon { background-position: -37px -643px; } @@ -12265,103 +12206,132 @@ div.ui-dialog div.autoscaler div.field-group div.form-container form div.form-it background-position: -230px -677px; } +.assignVmToAnotherAccount .icon { + background-position: -270px -36px; +} + +.assignVmToAnotherAccount:hover .icon { + background-position: -270px -615px; +} + .label-hovered { cursor: pointer; color: #0000FF !important; } .accounts-wizard table { - margin: 0; - width: 100%; - table-layout: fixed; + margin: 0; + width: 100%; + table-layout: fixed; } + .accounts-wizard .ui-button { - display: inline-block !important; - float: none !important; + display: inline-block !important; + float: none !important; } + .accounts-wizard td:last-child { - border: none; + border: none; } + .accounts-wizard tbody tr:nth-child(even) { - background: #DFE1E3; + background: #DFE1E3; } + .accounts-wizard tbody tr:nth-child(odd) { - background: #F2F0F0; + background: #F2F0F0; } + .accounts-wizard .content { - display: inline-block; + display: inline-block; } + .accounts-wizard .content:last-child { - margin-left: 14px; + margin-left: 14px; } + .accounts-wizard .input-area { - width: 320px; - font-size: 13px; - color: #485867; - text-shadow: 0px 2px 1px #FFFFFF; + width: 320px; + font-size: 13px; + color: #485867; + text-shadow: 0px 2px 1px #FFFFFF; } + .ldap-account-choice { - border: none !important; - border-radius: 0 0 0 0 !important; + border: none !important; + border-radius: 0 0 0 0 !important; } + .manual-account-details .name { - margin-top: 2px; - width: 100px; - float: left; - padding-bottom:10px; + margin-top: 2px; + width: 100px; + float: left; + padding-bottom: 10px; } + .manual-account-details { - height: auto !important; - overflow: visible !important; - overflow-x: visible !important; + height: auto !important; + overflow: visible !important; + overflow-x: visible !important; } + .manual-account-details label.error { - display: block; - font-size: 10px; + display: block; + font-size: 10px; } + .manual-account-details .value { - float: left; + float: left; } + .manual-account-details .form-item:after { - content:"."; - display: block; - clear: both; - visibility: hidden; - line-height: 0; - height: 0; + content: "."; + display: block; + clear: both; + visibility: hidden; + line-height: 0; + height: 0; } + .manual-account-details .form-item { - padding: 5px; - width: 100%; - -webkit-box-sizing: border-box; - -moz-box-sizing: border-box; - box-sizing: border-box; -} -.manual-account-details select, .manual-account-details input { - width: 150px; + padding: 5px; + width: 100%; + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; } + +.manual-account-details select, .manual-account-details input { - background: #F6F6F6; - -moz-border-radius: 4px; - -webkit-border-radius: 4px; - -khtml-border-radius: 4px; - border-radius: 4px; - border-radius: 4px 4px 4px 4px; - border: 1px solid #AFAFAF; - -moz-box-shadow: inset 0px 1px #727272; - -webkit-box-shadow: inset 0px 1px #727272; - -o-box-shadow: inset 0px 1px #727272; - box-shadow: inset 0px 1px #727272; - -moz-box-shadow: inset 0px 1px 0px #727272; - -webkit-box-shadow: inset 0px 1px 0px #727272; - -o-box-shadow: inset 0px 1px 0px #727272; + width: 150px; } + +.manual-account-details input { + background: #F6F6F6; + -moz-border-radius: 4px; + -webkit-border-radius: 4px; + -khtml-border-radius: 4px; + border-radius: 4px; + border-radius: 4px 4px 4px 4px; + border: 1px solid #AFAFAF; + -moz-box-shadow: inset 0px 1px #727272; + -webkit-box-shadow: inset 0px 1px #727272; + -o-box-shadow: inset 0px 1px #727272; + box-shadow: inset 0px 1px #727272; + -moz-box-shadow: inset 0px 1px 0px #727272; + -webkit-box-shadow: inset 0px 1px 0px #727272; + -o-box-shadow: inset 0px 1px 0px #727272; +} + .manual-account-details > *:nth-child(even) { - background: #DFE1E3; + background: #DFE1E3; } + .manual-account-details > *:nth-child(odd) { - background: #F2F0F0; + background: #F2F0F0; } + .manual-account-details .value { - display: inline-block; + display: inline-block; } + diff --git a/ui/dictionary.jsp b/ui/dictionary.jsp index 15c199964b3..1bf0eab4648 100644 --- a/ui/dictionary.jsp +++ b/ui/dictionary.jsp @@ -25,6 +25,12 @@ under the License. <% long now = System.currentTimeMillis(); %> + + + + + + + -
-

Schedule:

+ + + + + + + + + + + + + + + + + + -
-
    -
  • -
  • -
  • -
  • -
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - -
-
- - - -
-
-
- - -
-
- - -
-
-
- -
-
- - -
-
Keep
-
- - -
-
-
-
- - -
-
- - - -
-
Time
-
- - - -
-
- - -
-
Timezone
-
- -
-
- - -
-
Keep
-
- - -
-
-
-
- - -
-
- - - -
-
Time
-
- - - -
-
- - -
-
Day of week
-
- -
-
- - -
-
Timezone
-
- -
-
- - -
-
Keep
-
- - -
-
-
-
- - -
-
- - - -
-
Time
-
- - - -
-
- - -
-
Day of month
-
- -
-
- - -
-
Timezone
-
- -
-
- - -
-
Keep
-
- - -
-
-
-
-
- -
-
-
-
- -
-

Scheduled Snapshots

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Time: min past the hrTimezone:
Keep:
 
Time: Timezone:
Keep:
 
Time: Every Timezone:
Keep:
 
Time: Day of monthTimezone:
Keep:
 
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + diff --git a/ui/modules/vpc/vpc.js b/ui/modules/vpc/vpc.js index 9c0d39d3cf1..c07f444e914 100644 --- a/ui/modules/vpc/vpc.js +++ b/ui/modules/vpc/vpc.js @@ -183,7 +183,7 @@ var $router = elems.tier({ context: args.context, tier: { - name: 'Router', + name: 'Router' }, dashboardItems: args.dashboardItems }).addClass('router'); diff --git a/ui/scripts/accounts.js b/ui/scripts/accounts.js index 9006e74ff11..a754d31b478 100644 --- a/ui/scripts/accounts.js +++ b/ui/scripts/accounts.js @@ -88,7 +88,7 @@ custom: cloudStack.uiCustom.accountsWizard( cloudStack.accountsWizard ) - }, + } } }, @@ -291,6 +291,24 @@ }); } + if (args.data.networkLimit != null) { + var data = { + resourceType: 6, + max: args.data.networkLimit, + domainid: accountObj.domainid, + account: accountObj.name + }; + + $.ajax({ + url: createURL('updateResourceLimit'), + data: data, + async: false, + success: function(json) { + accountObj["networkLimit"] = args.data.networkLimit; + } + }); + } + if (args.data.primaryStorageLimit != null) { var data = { resourceType: 10, @@ -651,6 +669,15 @@ return false; } }, + networkLimit: { + label: 'label.network.limits', + isEditable: function(context) { + if (context.accounts[0].accounttype == roleTypeUser || context.accounts[0].accounttype == roleTypeDomainAdmin) //updateResourceLimits is only allowed on account whose type is user or domain-admin + return true; + else + return false; + } + }, primaryStorageLimit: { label: 'label.primary.storage.limits', isEditable: function(context) { @@ -733,6 +760,9 @@ case "4": accountObj["templateLimit"] = limit.max; break; + case "6": + accountObj["networkLimit"] = limit.max; + break; case "7": accountObj["vpcLimit"] = limit.max; break; diff --git a/ui/scripts/autoscaler.js b/ui/scripts/autoscaler.js index 6fabf6810c7..ab1b459e98c 100644 --- a/ui/scripts/autoscaler.js +++ b/ui/scripts/autoscaler.js @@ -1116,7 +1116,7 @@ var apiCmd, apiCmdRes; if (!('multiRules' in args.context)) { //from a new LB var data = { - zoneid: args.context.ipAddresses[0].zoneid, //args.context.networks[0] doesn't have zoneid property, so use args.context.ipAddresses[0] instead + zoneid: args.context.networks[0].zoneid, //get zoneid from args.context.networks[0] instead of args.context.ipAddresses[0] because args.context.ipAddresses is null when adding AutoScale rule from Add Load Balancer tab in Network page serviceofferingid: args.data.serviceOfferingId, templateid: args.data.templateNames, destroyvmgraceperiod: args.data.destroyVMgracePeriod, diff --git a/ui/scripts/configuration.js b/ui/scripts/configuration.js index 5eff5b4d4a1..42805973881 100644 --- a/ui/scripts/configuration.js +++ b/ui/scripts/configuration.js @@ -95,8 +95,15 @@ }); } }, + isCustomized: { + label: 'Custom', + isBoolean: true, + isReverse: true, + isChecked: false + }, cpuNumber: { label: 'label.num.cpu.cores', + dependsOn: 'isCustomized', docID: 'helpComputeOfferingCPUCores', validation: { required: true, @@ -105,6 +112,7 @@ }, cpuSpeed: { label: 'label.cpu.mhz', + dependsOn: 'isCustomized', docID: 'helpComputeOfferingCPUMHz', validation: { required: true, @@ -113,6 +121,7 @@ }, memory: { label: 'label.memory.mb', + dependsOn: 'isCustomized', docID: 'helpComputeOfferingMemory', validation: { required: true, @@ -280,11 +289,27 @@ name: args.data.name, displaytext: args.data.description, storageType: args.data.storageType, - cpuNumber: args.data.cpuNumber, - cpuSpeed: args.data.cpuSpeed, - memory: args.data.memory + customized: (args.data.isCustomized == "on") }; - + + //custom fields (begin) + if (args.$form.find('.form-item[rel=cpuNumber]').css("display") != "none") { + $.extend(data, { + cpuNumber: args.data.cpuNumber + }); + } + if (args.$form.find('.form-item[rel=cpuSpeed]').css("display") != "none") { + $.extend(data, { + cpuSpeed: args.data.cpuSpeed + }); + } + if (args.$form.find('.form-item[rel=memory]').css("display") != "none") { + $.extend(data, { + memory: args.data.memory + }); + } + //custom fields (end) + if (args.data.deploymentPlanner != null && args.data.deploymentPlanner.length > 0) { $.extend(data, { deploymentplanner: args.data.deploymentPlanner @@ -529,7 +554,10 @@ }, deploymentplanner: { label: 'Deployment Planner' - }, + }, + plannerMode: { + label: 'Planner Mode' + }, tags: { label: 'label.storage.tags' }, @@ -556,6 +584,13 @@ async: true, success: function(json) { var item = json.listserviceofferingsresponse.serviceoffering[0]; + + if (item.deploymentplanner != null && item.serviceofferingdetails != null) { + if (item.deploymentplanner == 'ImplicitDedicationPlanner' && item.serviceofferingdetails.ImplicitDedicationMode != null) { + item.plannerMode = item.serviceofferingdetails.ImplicitDedicationMode; + } + } + args.response.success({ actionFitler: serviceOfferingActionfilter, data: item diff --git a/ui/scripts/docs.js b/ui/scripts/docs.js index 1431c1b7b1f..3a4f8ca604f 100755 --- a/ui/scripts/docs.js +++ b/ui/scripts/docs.js @@ -643,6 +643,22 @@ cloudStack.docs = { desc: 'In iSCSI, this is the LUN number. For example, 3.', externalLink: '' }, + helpPrimaryStorageRBDMonitor: { + desc: 'The address of a Ceph monitor. Can also be a Round Robin DNS record', + externalLink: '' + }, + helpPrimaryStorageRBDPool: { + desc: 'The pool to use on the Ceph cluster. This pool should already exist', + externalLink: '' + }, + helpPrimaryStorageRBDId: { + desc: 'The cephx user to use without the client. prefix. For example: admin', + externalLink: '' + }, + helpPrimaryStorageRBDSecret: { + desc: 'The base64 encoded secret of the cephx user.', + externalLink: '' + }, helpPrimaryStorageTags: { desc: 'Comma-separated list of tags for this storage device. Must be the same set or a superset of the tags on your disk offerings.', externalLink: '' diff --git a/ui/scripts/domains.js b/ui/scripts/domains.js index 139412d9a48..7306a38d2d7 100644 --- a/ui/scripts/domains.js +++ b/ui/scripts/domains.js @@ -99,24 +99,31 @@ var domainObj; var data = { - id: args.context.domains[0].id, - networkdomain: args.data.networkdomain + id: args.context.domains[0].id }; - if (args.data.name != null) { + if (args.data.name != null) { //args.data.name == undefined means name field is not editable (when log in as normal user or domain admin) $.extend(data, { name: args.data.name }); } - - $.ajax({ - url: createURL("updateDomain"), - async: false, - data: data, - success: function(json) { - domainObj = json.updatedomainresponse.domain; - } - }); + + if (args.data.networkdomain != null) { //args.data.networkdomain == undefined means networkdomain field is not editable (when log in as normal user or domain admin) + $.extend(data, { + networkdomain: args.data.networkdomain + }); + } + + if('name' in data || 'networkdomain' in data) { + $.ajax({ + url: createURL("updateDomain"), + async: false, + data: data, + success: function(json) { + domainObj = json.updatedomainresponse.domain; + } + }); + } if (args.data.vmLimit != null) { $.ajax({ @@ -328,8 +335,8 @@ fields: [{ name: { label: 'label.name', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to change domain name + isEditable: function(args) { + if (isAdmin() && args.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to change domain name return true; else return false; @@ -346,96 +353,101 @@ networkdomain: { label: 'label.network.domain', - isEditable: true - }, - vmLimit: { - label: 'label.instance.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits + isEditable: function(args) { + if (isAdmin()) return true; else return false; } }, + vmLimit: { + label: 'label.instance.limits', + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as + return false; + else + return true; + } + }, ipLimit: { label: 'label.ip.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, volumeLimit: { label: 'label.volume.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, snapshotLimit: { label: 'label.snapshot.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, templateLimit: { label: 'label.template.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, vpcLimit: { label: 'VPC limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, cpuLimit: { label: 'label.cpu.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, memoryLimit: { label: 'label.memory.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, primaryStorageLimit: { label: 'label.primary.storage.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, secondaryStorageLimit: { label: 'label.secondary.storage.limits', - isEditable: function(context) { - if (context.domains[0].level != 0) //ROOT domain (whose level is 0) is not allowed to updateResourceLimits - return true; - else + isEditable: function(args) { + if (args.domains[0].id == g_domainid) //disallow to update the field on the domain you log in as return false; + else + return true; } }, accountTotal: { @@ -620,6 +632,10 @@ if (jsonObj.level != 0) { //ROOT domain (whose level is 0) is not allowed to delete allowedActions.push("delete"); } + } else if (isDomainAdmin()) { + if (args.context.domains[0].id != g_domainid) { + allowedActions.push("edit"); //merge updateResourceLimit into edit + } } allowedActions.push("updateResourceCount"); return allowedActions; diff --git a/ui/scripts/events.js b/ui/scripts/events.js index 46d45c04605..38f7136c140 100644 --- a/ui/scripts/events.js +++ b/ui/scripts/events.js @@ -111,6 +111,9 @@ data: data, success: function(data) { args.response.success(); + }, + error:function(data) { + args.response.error(parseXMLHttpResponse(data)); } }); } @@ -452,6 +455,9 @@ success: function(data) { args.response.success(); + }, + error:function(data) { + args.response.error(parseXMLHttpResponse(data)); } }); } diff --git a/ui/scripts/globalSettings.js b/ui/scripts/globalSettings.js index d703e643bb9..0e3d1ef883d 100644 --- a/ui/scripts/globalSettings.js +++ b/ui/scripts/globalSettings.js @@ -111,7 +111,7 @@ }, port: { label: 'LDAP Port' - }, + } }, dataProvider: function(args) { var data = {}; @@ -329,4 +329,4 @@ } } }; -})(cloudStack); \ No newline at end of file +})(cloudStack); diff --git a/ui/scripts/instanceWizard.js b/ui/scripts/instanceWizard.js index 1179a87519e..d2cde2f270c 100644 --- a/ui/scripts/instanceWizard.js +++ b/ui/scripts/instanceWizard.js @@ -237,6 +237,13 @@ data: { templates: templatesObj, hypervisors: hypervisorObjs + }, + customHidden: function(args) { + if (selectedTemplate == 'select-template') { + return false; //show Root Disk Size field + } else { //selectedTemplate == 'select-iso' + return true; //hide Root Disk Size field + } } }); }, @@ -287,6 +294,7 @@ success: function(json) { serviceOfferingObjs = json.listserviceofferingsresponse.serviceoffering; args.response.success({ + customFlag: 'iscustomized', data: { serviceOfferings: serviceOfferingObjs } diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index cf8aca5b485..ac8605a4f1d 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -458,6 +458,12 @@ }, notification: function(args) { return 'label.action.reboot.instance'; + }, + complete: function(args) { + if (args.password != null && args.password.length > 0) + return 'Password has been reset to ' + args.password; + else + return null; } }, notification: { @@ -523,32 +529,57 @@ }, notification: { - pool: pollAsyncJobResult + poll: pollAsyncJobResult } }, destroy: { label: 'label.action.destroy.instance', compactLabel: 'label.destroy', - messages: { - confirm: function(args) { - return 'message.action.destroy.instance'; + createForm: { + title: 'label.action.destroy.instance', + desc: 'Please confirm that you want to destroy this instance', + preFilter: function(args) { + if (isAdmin() || isDomainAdmin()) { + args.$form.find('.form-item[rel=expunge]').css('display', 'inline-block'); + } else { + args.$form.find('.form-item[rel=expunge]').hide(); + } }, + fields: { + expunge: { + label: 'Expunge', + isBoolean: true, + isChecked: false + } + } + }, + messages: { notification: function(args) { return 'label.action.destroy.instance'; } }, - action: function(args) { + action: function(args) { + var data = { + id: args.context.instances[0].id + }; + if (args.data.expunge == 'on') { + $.extend(data, { + expunge: true + }); + } $.ajax({ - url: createURL("destroyVirtualMachine&id=" + args.context.instances[0].id), - dataType: "json", - async: true, + url: createURL('destroyVirtualMachine'), + data: data, success: function(json) { var jid = json.destroyvirtualmachineresponse.jobid; args.response.success({ _custom: { jobId: jid, - getUpdatedItem: function(json) { - return json.queryasyncjobresultresponse.jobresult.virtualmachine; + getUpdatedItem: function(json) { + if ('virtualmachine' in json.queryasyncjobresultresponse.jobresult) //destroy without expunge + return json.queryasyncjobresultresponse.jobresult.virtualmachine; + else //destroy with expunge + return { 'toRemove': true }; }, getActionFilter: function() { return vmActionfilter; @@ -562,6 +593,39 @@ poll: pollAsyncJobResult } }, + expunge: { + label: 'label.action.expunge.instance', + compactLabel: 'label.expunge', + messages: { + confirm: function(args) { + return 'message.action.expunge.instance'; + }, + notification: function(args) { + return 'label.action.expunge.instance'; + } + }, + action: function(args) { + $.ajax({ + url: createURL("expungeVirtualMachine&id=" + args.context.instances[0].id), + dataType: "json", + async: true, + success: function(json) { + var jid = json.expungevirtualmachineresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getActionFilter: function() { + return vmActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, restore: { label: 'label.action.restore.instance', compactLabel: 'label.restore', @@ -790,13 +854,11 @@ isdynamicallyscalable: (args.data.isdynamicallyscalable == "on"), ostypeid: args.data.guestosid }; - if (args.data.displayname != args.context.instances[0].displayname) { $.extend(data, { displayName: args.data.displayname }); } - $.ajax({ url: createURL('updateVirtualMachine'), data: data, @@ -807,6 +869,54 @@ }); } }); + + + //***** addResourceDetail ***** + //XenServer only (starts here) + if(args.$detailView.find('form').find('div .detail-group').find('.xenserverToolsVersion61plus').length > 0) { + $.ajax({ + url: createURL('addResourceDetail'), + data: { + resourceType: 'uservm', + resourceId: args.context.instances[0].id, + 'details[0].key': 'hypervisortoolsversion', + 'details[0].value': (args.data.xenserverToolsVersion61plus == "on") ? 'xenserver61' : 'xenserver56' + }, + success: function(json) { + var jobId = json.addResourceDetailresponse.jobid; + var addResourceDetailIntervalID = setInterval(function() { + $.ajax({ + url: createURL("queryAsyncJobResult&jobid=" + jobId), + dataType: "json", + success: function(json) { + var result = json.queryasyncjobresultresponse; + + if (result.jobstatus == 0) { + return; //Job has not completed + } else { + clearInterval(addResourceDetailIntervalID); + + if (result.jobstatus == 1) { + //do nothing + } else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ + message: "Failed to update XenServer Tools Version 6.1+ field. Error: " + _s(result.jobresult.errortext) + }); + } + } + }, + error: function(XMLHttpResponse) { + cloudStack.dialog.notice({ + message: "Failed to update XenServer Tools Version 6.1+ field. Error: " + parseXMLHttpResponse(XMLHttpResponse) + }); + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + //XenServer only (ends here) + } }, @@ -1308,7 +1418,7 @@ $(serviceofferings).each(function() { items.push({ id: this.id, - description: this.displaytext + description: this.name }); }); args.response.success({ @@ -1318,8 +1428,6 @@ }); } } - - } }, @@ -1360,9 +1468,79 @@ notification: { poll: pollAsyncJobResult } - }, - + + assignVmToAnotherAccount: { + label: 'Assign Instance to Another Account', + createForm: { + title: 'Assign Instance to Another Account', + fields: { + domainid: { + label: 'label.domain', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL('listDomains'), + data: { + listAll: true, + details: 'min' + }, + success: function(json) { + var array1 = []; + var domains = json.listdomainsresponse.domain; + if (domains != null && domains.length > 0) { + for (var i = 0; i < domains.length; i++) { + array1.push({ + id: domains[i].id, + description: domains[i].path + }); + } + } + args.response.success({ + data: array1 + }); + } + }); + } + }, + account: { + label: 'label.account', + validation: { + required: true + } + } + } + }, + action: function(args) { + $.ajax({ + url: createURL('assignVirtualMachine&virtualmachine'), + data: { + virtualmachineid: args.context.instances[0].id, + domainid: args.data.domainid, + account: args.data.account + }, + success: function(json) { + var item = json.virtualmachine.virtualmachine; + args.response.success({ + data: item + }); + } + }); + }, + messages: { + notification: function(args) { + return 'Assign Instance to Another Account'; + } + }, + notification: { + poll: function(args) { + args.complete(); + } + } + }, + viewConsole: { label: 'label.view.console', action: { @@ -1389,9 +1567,13 @@ if (isAdmin()) { hiddenFields = []; } else { - hiddenFields = ["hypervisor"]; + hiddenFields = ["hypervisor", 'xenserverToolsVersion61plus']; } - + + if ('instances' in args.context && args.context.instances[0].hypervisor != 'XenServer') { + hiddenFields.push('xenserverToolsVersion61plus'); + } + if (!args.context.instances[0].publicip) { hiddenFields.push('publicip'); } @@ -1473,6 +1655,18 @@ label: 'label.hypervisor' }, + xenserverToolsVersion61plus: { + label: 'XenServer Tools Version 6.1+', + isBoolean: true, + isEditable: function () { + if (isAdmin()) + return true; + else + return false; + }, + converter: cloudStack.converters.toBooleanText + }, + /* isoid: { label: 'label.attached.iso', @@ -1543,11 +1737,22 @@ var jsonObj; if (json.listvirtualmachinesresponse.virtualmachine != null && json.listvirtualmachinesresponse.virtualmachine.length > 0) jsonObj = json.listvirtualmachinesresponse.virtualmachine[0]; + else if (isAdmin()) + jsonObj = $.extend(args.context.instances[0], { + state: "Expunged" + }); //after root admin expunge a VM, listVirtualMachines API will no longer returns this expunged VM to all users. else jsonObj = $.extend(args.context.instances[0], { state: "Destroyed" }); //after a regular user destroys a VM, listVirtualMachines API will no longer returns this destroyed VM to the regular user. + if ('details' in jsonObj && 'hypervisortoolsversion' in jsonObj.details) { + if (jsonObj.details.hypervisortoolsversion == 'xenserver61') + jsonObj.xenserverToolsVersion61plus = true; + else + jsonObj.xenserverToolsVersion61plus = false; + } + args.response.success({ actionFilter: vmActionfilter, data: jsonObj @@ -1870,6 +2075,8 @@ if (isAdmin() || isDomainAdmin()) { allowedActions.push("restore"); } + if (isAdmin()) + allowedActions.push("expunge"); } else if (jsonObj.state == 'Running') { allowedActions.push("stop"); allowedActions.push("restart"); @@ -1918,10 +2125,18 @@ if (jsonObj.hypervisor == "BareMetal") { allowedActions.push("createTemplate"); } + + if (isAdmin() || isDomainAdmin()) { + allowedActions.push("assignVmToAnotherAccount"); + } + } else if (jsonObj.state == 'Starting') { // allowedActions.push("stop"); } else if (jsonObj.state == 'Error') { allowedActions.push("destroy"); + } else if (jsonObj.state == 'Expunging') { + if (isAdmin()) + allowedActions.push("expunge"); } return allowedActions; } diff --git a/ui/scripts/network.js b/ui/scripts/network.js index a09e56515aa..12e5389d9e6 100755 --- a/ui/scripts/network.js +++ b/ui/scripts/network.js @@ -139,6 +139,7 @@ var ipObj = args.context.item; var status = ipObj.state; + //***** apply to both Isolated Guest Network IP, VPC IP (begin) ***** if (status == 'Destroyed' || status == 'Releasing' || status == 'Released' || @@ -149,90 +150,105 @@ return []; } - if (args.context.networks[0].networkofferingconservemode == false) { - /* - (1) If IP is SourceNat, no StaticNat/VPN/PortForwarding/LoadBalancer can be enabled/added. - */ - if (ipObj.issourcenat == true) { - disallowedActions.push('enableStaticNAT'); - disallowedActions.push('enableVPN'); - } - - /* - (2) If IP is non-SourceNat, show StaticNat/VPN/PortForwarding/LoadBalancer at first. - 1. Once StaticNat is enabled, hide VPN/PortForwarding/LoadBalancer. - 2. Once VPN is enabled, hide StaticNat/PortForwarding/LoadBalancer. - 3. Once a PortForwarding rule is added, hide StaticNat/VPN/LoadBalancer. - 4. Once a LoadBalancer rule is added, hide StaticNat/VPN/PortForwarding. - */ - else { //ipObj.issourcenat == false - if (ipObj.isstaticnat) { //1. Once StaticNat is enabled, hide VPN/PortForwarding/LoadBalancer. - disallowedActions.push('enableVPN'); - } - if (ipObj.vpnenabled) { //2. Once VPN is enabled, hide StaticNat/PortForwarding/LoadBalancer. - disallowedActions.push('enableStaticNAT'); - } - - //3. Once a PortForwarding rule is added, hide StaticNat/VPN/LoadBalancer. - $.ajax({ - url: createURL('listPortForwardingRules'), - data: { - ipaddressid: ipObj.id, - listAll: true - }, - dataType: 'json', - async: false, - success: function(json) { - var rules = json.listportforwardingrulesresponse.portforwardingrule; - if (rules != null && rules.length > 0) { - disallowedActions.push('enableVPN'); - disallowedActions.push('enableStaticNAT'); - } - } - }); - - //4. Once a LoadBalancer rule is added, hide StaticNat/VPN/PortForwarding. - $.ajax({ - url: createURL('listLoadBalancerRules'), - data: { - publicipid: ipObj.id, - listAll: true - }, - dataType: 'json', - async: false, - success: function(json) { - var rules = json.listloadbalancerrulesresponse.loadbalancerrule; - if (rules != null && rules.length > 0) { - disallowedActions.push('enableVPN'); - disallowedActions.push('enableStaticNAT'); - } - } - }); - } - } - - if (ipObj.isstaticnat) { - disallowedActions.push('enableStaticNAT'); - } else { - disallowedActions.push('disableStaticNAT'); - } - - if (ipObj.networkOfferingHavingVpnService == true) { - if (ipObj.vpnenabled) { - disallowedActions.push('enableVPN'); - } else { - disallowedActions.push('disableVPN'); - } - } else { //ipObj.networkOfferingHavingVpnService == false - disallowedActions.push('disableVPN'); - disallowedActions.push('enableVPN'); - } - - if (ipObj.issourcenat) { + if (ipObj.issourcenat) { //sourceNAT IP doesn't support staticNAT disallowedActions.push('enableStaticNAT'); disallowedActions.push('disableStaticNAT'); disallowedActions.push('remove'); - } + } else { //non-sourceNAT IP supports staticNAT + if (ipObj.isstaticnat) { + disallowedActions.push('enableStaticNAT'); + } else { + disallowedActions.push('disableStaticNAT'); + } + } + //***** apply to both Isolated Guest Network IP, VPC IP (end) ***** + + + if (!('vpc' in args.context)) { //***** Guest Network section > Guest Network page > IP Address page ***** + if (args.context.networks[0].networkofferingconservemode == false) { + /* + (1) If IP is SourceNat, no StaticNat/VPN/PortForwarding/LoadBalancer can be enabled/added. + */ + if (ipObj.issourcenat == true) { + disallowedActions.push('enableStaticNAT'); + disallowedActions.push('enableVPN'); + } + + /* + (2) If IP is non-SourceNat, show StaticNat/VPN/PortForwarding/LoadBalancer at first. + 1. Once StaticNat is enabled, hide VPN/PortForwarding/LoadBalancer. + 2. Once VPN is enabled, hide StaticNat/PortForwarding/LoadBalancer. + 3. Once a PortForwarding rule is added, hide StaticNat/VPN/LoadBalancer. + 4. Once a LoadBalancer rule is added, hide StaticNat/VPN/PortForwarding. + */ + else { //ipObj.issourcenat == false + if (ipObj.isstaticnat) { //1. Once StaticNat is enabled, hide VPN/PortForwarding/LoadBalancer. + disallowedActions.push('enableVPN'); + } + if (ipObj.vpnenabled) { //2. Once VPN is enabled, hide StaticNat/PortForwarding/LoadBalancer. + disallowedActions.push('enableStaticNAT'); + } + + //3. Once a PortForwarding rule is added, hide StaticNat/VPN/LoadBalancer. + $.ajax({ + url: createURL('listPortForwardingRules'), + data: { + ipaddressid: ipObj.id, + listAll: true + }, + dataType: 'json', + async: false, + success: function(json) { + var rules = json.listportforwardingrulesresponse.portforwardingrule; + if (rules != null && rules.length > 0) { + disallowedActions.push('enableVPN'); + disallowedActions.push('enableStaticNAT'); + } + } + }); + + //4. Once a LoadBalancer rule is added, hide StaticNat/VPN/PortForwarding. + $.ajax({ + url: createURL('listLoadBalancerRules'), + data: { + publicipid: ipObj.id, + listAll: true + }, + dataType: 'json', + async: false, + success: function(json) { + var rules = json.listloadbalancerrulesresponse.loadbalancerrule; + if (rules != null && rules.length > 0) { + disallowedActions.push('enableVPN'); + disallowedActions.push('enableStaticNAT'); + } + } + }); + } + } + + if (ipObj.networkOfferingHavingVpnService == true) { + if (ipObj.vpnenabled) { + disallowedActions.push('enableVPN'); + } else { + disallowedActions.push('disableVPN'); + } + } else { //ipObj.networkOfferingHavingVpnService == false + disallowedActions.push('disableVPN'); + disallowedActions.push('enableVPN'); + } + } else { //***** VPC section > Configuration VPC > Router > Public IP Addresses ***** + if (ipObj.issourcenat) { //VPC sourceNAT IP: supports VPN + if (ipObj.vpnenabled) { + disallowedActions.push('enableVPN'); + } else { + disallowedActions.push('disableVPN'); + } + } else { //VPC non-sourceNAT IP: doesn't support VPN + disallowedActions.push('enableVPN'); + disallowedActions.push('disableVPN'); + } + } allowedActions = $.grep(allowedActions, function(item) { return $.inArray(item, disallowedActions) == -1; @@ -382,7 +398,7 @@ zoneid: args.zoneId, guestiptype: 'Isolated', supportedServices: 'SourceNat', - state: 'Enabled', + state: 'Enabled' }; if ('vpc' in args.context) { //from VPC section @@ -444,9 +460,7 @@ }) }); } - }); - //??? - + }); } }, @@ -1078,7 +1092,7 @@ label: 'label.restart.required', converter: function(booleanValue) { if (booleanValue == true) - return "Yes"; + return "Yes"; else if (booleanValue == false) return "No"; } @@ -1086,7 +1100,11 @@ vlan: { label: 'label.vlan.id' }, - + + broadcasturi: { + label: 'broadcasturi' + }, + networkofferingid: { label: 'label.network.offering', isEditable: true, @@ -1934,7 +1952,7 @@ } //*** from Guest Network section *** - if ('networks' in args.context) { + if (!('vpc' in args.context)) { if (args.context.networks[0].vpcid == null) { //Guest Network section > non-VPC network, show Acquire IP button return true; } else { //Guest Network section > VPC network, hide Acquire IP button @@ -1942,7 +1960,7 @@ } } //*** from VPC section *** - else { //'vpc' in args.context + else { //'vpc' in args.context //args.context.networks[0] has only one property => name: 'Router' return true; //VPC section, show Acquire IP button } }, @@ -2054,82 +2072,70 @@ }, dataProvider: function(args) { - var data = {}; - listViewDataProvider(args, data); - - if (g_supportELB == "guest") // IPs are allocated on guest network - $.extend(data, { - forvirtualnetwork: false, - forloadbalancing: true - }); - else if (g_supportELB == "public") // IPs are allocated on public network - $.extend(data, { - forvirtualnetwork: true, - forloadbalancing: true - }); - + var items = []; + var data = {}; + listViewDataProvider(args, data); if (args.context.networks) { $.extend(data, { associatedNetworkId: args.context.networks[0].id }); - } - if ("vpc" in args.context) { $.extend(data, { vpcid: args.context.vpc[0].id }); - } - + } + $.ajax({ url: createURL('listPublicIpAddresses'), - data: data, + data: $.extend({}, data, { + forvirtualnetwork: true //IPs are allocated on public network + }), dataType: "json", - async: true, + async: false, success: function(json) { - var items = json.listpublicipaddressesresponse.publicipaddress; - - $(items).each(function() { - getExtaPropertiesForIpObj(this, args); - }); - - args.response.success({ - actionFilter: actionFilters.ipAddress, - data: items - }); - }, - error: function(data) { - args.response.error(parseXMLHttpResponse(data)); + var ips = json.listpublicipaddressesresponse.publicipaddress; + if(ips != null) { + for(var i = 0; i < ips.length; i++) { + getExtaPropertiesForIpObj(ips[i], args); + items.push(ips[i]); + } + } } }); + + if (g_supportELB == "guest") { + $.ajax({ + url: createURL('listPublicIpAddresses'), + data: $.extend({}, data, { + forvirtualnetwork: false, // ELB IPs are allocated on guest network + forloadbalancing: true + }), + dataType: "json", + async: false, + success: function(json) { + var ips = json.listpublicipaddressesresponse.publicipaddress; + if(ips != null) { + for(var i = 0; i < ips.length; i++) { + getExtaPropertiesForIpObj(ips[i], args); + items.push(ips[i]); + } + } + } + }); + } + + args.response.success({ + actionFilter: actionFilters.ipAddress, + data: items + }); }, // Detail view detailView: { name: 'IP address detail', tabFilter: function(args) { - var item = args.context.ipAddresses[0]; - - // Get VPN data - $.ajax({ - url: createURL('listRemoteAccessVpns'), - data: { - listAll: true, - publicipid: item.id - }, - dataType: 'json', - async: false, - success: function(vpnResponse) { - var isVPNEnabled = vpnResponse.listremoteaccessvpnsresponse.count; - if (isVPNEnabled) { - item.vpnenabled = true; - item.remoteaccessvpn = vpnResponse.listremoteaccessvpnsresponse.remoteaccessvpn[0]; - }; - }, - error: function(data) { - args.response.error(parseXMLHttpResponse(data)); - } - }); + var item = args.context.ipAddresses[0]; var disabledTabs = []; var ipAddress = args.context.ipAddresses[0]; @@ -5922,7 +5928,7 @@ }; function getExtaPropertiesForIpObj(ipObj, args) { - if (!('vpc' in args.context)) { //from Guest Network section + if (!('vpc' in args.context)) { //***** Guest Network section > Guest Network page > IP Address page ***** var services = args.context.networks[0].service; if(services != null) { for(var i = 0; i < services.length; i++) { @@ -5946,12 +5952,32 @@ if (isVPNEnabled) { ipObj.vpnenabled = true; ipObj.remoteaccessvpn = vpnResponse.listremoteaccessvpnsresponse.remoteaccessvpn[0]; - }; + } else { + ipObj.vpnenabled = false; + } } }); } - } else { //from VPC section - ipObj.networkOfferingHavingVpnService = false; //VPN is not supported in IP in VPC, so hardcode it as false + } else { //***** VPC section > Configuration VPC > Router > Public IP Addresses ***** + if (ipObj.issourcenat) { //VPC sourceNAT IP: supports VPN + $.ajax({ + url: createURL('listRemoteAccessVpns'), + data: { + listAll: true, + publicipid: ipObj.id + }, + async: false, + success: function(vpnResponse) { + var isVPNEnabled = vpnResponse.listremoteaccessvpnsresponse.count; + if (isVPNEnabled) { + ipObj.vpnenabled = true; + ipObj.remoteaccessvpn = vpnResponse.listremoteaccessvpnsresponse.remoteaccessvpn[0]; + } else { + ipObj.vpnenabled = false; + } + } + }); + } } } diff --git a/ui/scripts/sharedFunctions.js b/ui/scripts/sharedFunctions.js index 1596973bd30..fb07d3715a9 100644 --- a/ui/scripts/sharedFunctions.js +++ b/ui/scripts/sharedFunctions.js @@ -235,9 +235,9 @@ var addGuestNetworkDialog = { label: 'label.physical.network', dependsOn: 'zoneId', select: function(args) { - if ('physicalNetworks' in args.context) { + if ('physicalNetworks' in args.context) { //Infrastructure menu > zone detail > guest traffic type > network tab (only shown in advanced zone) > add guest network dialog addGuestNetworkDialog.physicalNetworkObjs = args.context.physicalNetworks; - } else { + } else { //Network menu > guest network section > add guest network dialog var selectedZoneId = args.$form.find('.form-item[rel=zoneId]').find('select').val(); $.ajax({ url: createURL('listPhysicalNetworks'), @@ -245,8 +245,33 @@ var addGuestNetworkDialog = { zoneid: selectedZoneId }, async: false, - success: function(json) { - addGuestNetworkDialog.physicalNetworkObjs = json.listphysicalnetworksresponse.physicalnetwork; + success: function(json) { + var items = []; + var physicalnetworks = json.listphysicalnetworksresponse.physicalnetwork; + if (physicalnetworks != null) { + for (var i = 0; i < physicalnetworks.length; i++) { + $.ajax({ + url: createURL('listTrafficTypes'), + data: { + physicalnetworkid: physicalnetworks[i].id + }, + async: false, + success: function(json) { + var traffictypes = json.listtraffictypesresponse.traffictype; + if (traffictypes != null) { + for (var k = 0; k < traffictypes.length; k++) { + if (traffictypes[k].traffictype == 'Guest') { + items.push(physicalnetworks[i]); + break; + } + } + } + } + }); + } + } + + addGuestNetworkDialog.physicalNetworkObjs = items; } }); } @@ -413,7 +438,7 @@ var addGuestNetworkDialog = { subdomainaccess: { label: 'label.subdomain.access', isBoolean: true, - isHidden: true, + isHidden: true }, account: { label: 'label.account' @@ -830,6 +855,7 @@ cloudStack.preFilter = { args.$form.find('.form-item[rel=isPublic]').hide(); } args.$form.find('.form-item[rel=isFeatured]').hide(); + args.$form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); } }, addLoadBalancerDevice: function(args) { //add netscaler device OR add F5 device @@ -1143,9 +1169,12 @@ var addExtraPropertiesToGuestNetworkObject = function(jsonObj) { jsonObj.scope = "Account (" + jsonObj.domain + ", " + jsonObj.account + ")"; } - if (jsonObj.vlan == null && jsonObj.broadcasturi != null) { + if (jsonObj.vlan == null && jsonObj.broadcasturi != null && jsonObj.broadcasturi.substring(0,7) == "vlan://") { jsonObj.vlan = jsonObj.broadcasturi.replace("vlan://", ""); } + if(jsonObj.vxlan == null && jsonObj.broadcasturi != null && jsonObj.broadcasturi.substring(0,8) == "vxlan://") { + jsonObj.vxlan = jsonObj.broadcasturi.replace("vxlan://", ""); + } } //used by infrastructure page diff --git a/ui/scripts/storage.js b/ui/scripts/storage.js index ec63e1586f1..314621e3368 100644 --- a/ui/scripts/storage.js +++ b/ui/scripts/storage.js @@ -188,7 +188,7 @@ number: true }, isHidden: true - }, + } } }, @@ -1760,6 +1760,36 @@ } }, + revertSnapshot: { + label: 'label.action.revert.snapshot', + messages: { + confirm: function(args) { + return 'message.action.revert.snapshot'; + }, + notification: function(args) { + return 'label.action.revert.snapshot'; + } + }, + action: function(args) { + $.ajax({ + url: createURL("revertSnapshot&id="+args.context.snapshots[0].id), + dataType: "json", + async: true, + success: function(json) { + var jid = json.revertsnapshotresponse.jobid; + args.response.success({ + _custom: { + jobId: jid + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + remove: { label: 'label.action.delete.snapshot', messages: { @@ -1929,6 +1959,10 @@ if (jsonObj.state == "BackedUp") { allowedActions.push("createTemplate"); allowedActions.push("createVolume"); + + if (jsonObj.revertable && args.context.volumes[0].vmstate == "Stopped") { + allowedActions.push("revertSnapshot"); + } } allowedActions.push("remove"); diff --git a/ui/scripts/system.js b/ui/scripts/system.js index 1ddbc21cb8a..088cb3cff59 100644 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -238,11 +238,11 @@ //comment the 4 lines above and uncomment the following 4 lines if listHosts API still responds slowly. /* - dataFns.primaryStorageCount($.extend(data, { + dataFns.primaryStorageCount($.extend(data, { clusterCount: json.listclustersresponse.count ? json.listclustersresponse.count : 0 })); - */ + */ } }); }, @@ -364,7 +364,7 @@ return total; }; - complete($.extend(data, { + dataFns.socketInfo($.extend(data, { cpuCapacityTotal: capacityTotal(1, cloudStack.converters.convertHz), memCapacityTotal: capacityTotal(0, cloudStack.converters.convertBytes), storageCapacityTotal: capacityTotal(2, cloudStack.converters.convertBytes) @@ -372,12 +372,31 @@ } }); } else { - complete($.extend(data, { + dataFns.socketInfo($.extend(data, { cpuCapacityTotal: cloudStack.converters.convertHz(0), memCapacityTotal: cloudStack.converters.convertBytes(0), storageCapacityTotal: cloudStack.converters.convertBytes(0) })); } + }, + + socketInfo: function(data) { + $.ajax({ + url: createURL('listHypervisors'), + success: function(json) { + var hypervisors = json.listhypervisorsresponse.hypervisor; + + complete($.extend(data, { + socketInfo: $(hypervisors).map(function(index, hypervisor) { + return { + name: hypervisor.name, + hosts: 0, + sockets: 0 + }; + }) + })); + } + }); } }; @@ -1082,19 +1101,19 @@ actions: { edit: { label: 'label.edit', - action: function(args) { + action: function(args) { var data = { - id: selectedPhysicalNetworkObj.id, - }; - - $.extend(data, { - vlan: args.data.vlan - }); - - $.extend(data, { - tags: args.data.tags - }); - + id: selectedPhysicalNetworkObj.id + }; + + $.extend(data, { + vlan: args.data.vlan + }); + + $.extend(data, { + tags: args.data.tags + }); + $.ajax({ url: createURL('updatePhysicalNetwork'), data: data, @@ -1148,7 +1167,7 @@ vlan: { label: 'VLAN Range(s)', isEditable: true - }, + }, tags: { label: 'Tags', isEditable: true @@ -1192,17 +1211,17 @@ // var startVlan, endVlan; var vlan = selectedPhysicalNetworkObj.vlan; /* if(vlan != null && vlan.length > 0) { - if(vlan.indexOf("-") != -1) { - var vlanArray = vlan.split("-"); - startVlan = vlanArray[0]; - endVlan = vlanArray[1]; - } - else { - startVlan = vlan; - } - selectedPhysicalNetworkObj["startVlan"] = startVlan; - selectedPhysicalNetworkObj["endVlan"] = endVlan; - }*/ + if(vlan.indexOf("-") != -1) { + var vlanArray = vlan.split("-"); + startVlan = vlanArray[0]; + endVlan = vlanArray[1]; + } + else { + startVlan = vlan; + } + selectedPhysicalNetworkObj["startVlan"] = startVlan; + selectedPhysicalNetworkObj["endVlan"] = endVlan; + }*/ //traffic type var xentrafficlabel, kvmtrafficlabel, vmwaretrafficlabel; @@ -1723,13 +1742,13 @@ }); } }); - + //include currently selected network offeirng to dropdown items.push({ id: selectedGuestNetworkObj.networkofferingid, description: selectedGuestNetworkObj.networkofferingdisplaytext - }); - + }); + args.response.success({ data: items }); @@ -1971,7 +1990,7 @@ fields: [{ guestvlanrange: { label: 'VLAN Range(s)' - }, + } }, { domain: { label: 'label.domain' @@ -1993,7 +2012,7 @@ var item = json.listdedicatedguestvlanrangesresponse.dedicatedguestvlanrange[0]; args.response.success({ data: item - }) + }); } }); } @@ -5260,7 +5279,7 @@ numretries: { label: 'label.numretries', defaultValue: '2' - }, + } } }, action: function(args) { @@ -5457,7 +5476,7 @@ }) }); } - }, + } }, actions: { enable: { @@ -5653,7 +5672,7 @@ validation: { required: true } - }, + } } }, action: function(args) { @@ -5709,10 +5728,14 @@ url: createURL('removeVmwareDc'), data: data, success: function(json) { - var item = json.updatezoneresponse.zone; - args.response.success({ - actionFilter: zoneActionfilter, - data: item + delete args.context.physicalResources[0].vmwaredcName; + delete args.context.physicalResources[0].vmwaredcVcenter; + delete args.context.physicalResources[0].vmwaredcId; + + selectedZoneObj = args.context.physicalResources[0]; + + args.response.success({ + data: args.context.physicalResources[0] }); } }); @@ -6072,10 +6095,10 @@ $.ajax({ url: createURL('listDedicatedZones'), data: { - zoneid: args.context.physicalResources[0].id + zoneid: args.context.physicalResources[0].id }, async: false, - success: function(json) { + success: function(json) { if (json.listdedicatedzonesresponse.dedicatedzone != undefined) { var dedicatedzoneObj = json.listdedicatedzonesresponse.dedicatedzone[0]; if (dedicatedzoneObj.domainid != null) { @@ -6096,17 +6119,17 @@ }); $.ajax({ - url: createURL('listClusters'), - data: { + url: createURL('listClusters'), + data: { zoneid: args.context.physicalResources[0].id }, async: false, - success: function(json) { - var clusters = json.listclustersresponse.cluster; - if (clusters != null) { - for (var i = 0; i < clusters.length; i++) { - if (clusters[i].hypervisortype == 'VMware') { - $.ajax({ + success: function(json) { + var clusters = json.listclustersresponse.cluster; + if (clusters != null) { + for (var i = 0; i < clusters.length; i++) { + if (clusters[i].hypervisortype == 'VMware') { + $.ajax({ url: createURL('listVmwareDcs'), //listVmwareDcs API exists in only non-oss bild data: { zoneid: args.context.physicalResources[0].id @@ -6120,16 +6143,16 @@ selectedZoneObj.vmwaredcId = vmwaredcs[0].id; } } - //, error: function(XMLHttpResponse) {} //override default error handling: cloudStack.dialog.notice({ message: parseXMLHttpResponse(XMLHttpResponse)}); - }); - - break; - } - } - } + //, error: function(XMLHttpResponse) {} //override default error handling: cloudStack.dialog.notice({ message: parseXMLHttpResponse(XMLHttpResponse)}); + }); + + break; + } + } + } } - }); - + }); + args.response.success({ actionFilter: zoneActionfilter, data: selectedZoneObj @@ -6451,13 +6474,13 @@ createForm: { title: 'label.change.service.offering', desc: function(args) { - var description = ''; - var vmObj = args.jsonObj; - //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property - if (vmObj.state == 'Running') { - description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; - } - return description; + var description = ''; + var vmObj = args.jsonObj; + //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property + if (vmObj.state == 'Running') { + description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; + } + return description; }, fields: { serviceOfferingId: { @@ -6992,17 +7015,17 @@ var systemvmObjs = json.listsystemvmsresponse.systemvm; if (systemvmObjs != null) { $.ajax({ - url: createURL("listHosts&listAll=true"), + url: createURL("listHosts&listAll=true"), success: function(json) { var hostObjs = json.listhostsresponse.host; for (var i = 0; i < systemvmObjs.length; i++) { - for (var k = 0; k < hostObjs.length; k++) { - if (hostObjs[k].name == systemvmObjs[i].name) { - systemvmObjs[i].agentstate = hostObjs[k].state; - break; - } - } - } + for (var k = 0; k < hostObjs.length; k++) { + if (hostObjs[k].name == systemvmObjs[i].name) { + systemvmObjs[i].agentstate = hostObjs[k].state; + break; + } + } + } args.response.success({ data: systemvmObjs }); @@ -7049,7 +7072,7 @@ var listView = $.extend(true, {}, cloudStack.sections.system.subsections.virtualRouters.listView, { dataProvider: function(args) { var searchByArgs = args.filterBy.search.value.length ? - '&name=' + args.filterBy.search.value : ''; + '&keyword=' + args.filterBy.search.value : ''; var routers = []; $.ajax({ @@ -7426,13 +7449,13 @@ createForm: { title: 'label.change.service.offering', desc: function(args) { - var description = ''; - var vmObj = args.jsonObj; - //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property - if (vmObj.state == 'Running') { - description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; - } - return description; + var description = ''; + var vmObj = args.jsonObj; + //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property + if (vmObj.state == 'Running') { + description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; + } + return description; }, fields: { serviceOfferingId: { @@ -7983,13 +8006,13 @@ createForm: { title: 'label.change.service.offering', desc: function(args) { - var description = ''; - var vmObj = args.jsonObj; - //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property - if (vmObj.state == 'Running') { - description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; - } - return description; + var description = ''; + var vmObj = args.jsonObj; + //if (vmObj.state == 'Running' && vmObj.hypervisor == 'VMware') { //needs to wait for API fix that will return hypervisor property + if (vmObj.state == 'Running') { + description = 'Please read the dynamic scaling section in the admin guide before scaling up.'; + } + return description; }, fields: { serviceOfferingId: { @@ -8458,7 +8481,7 @@ notification: { poll: pollAsyncJobResult } - }, + } }, dataProvider: function(args) { $.ajax({ @@ -8536,7 +8559,7 @@ notification: { poll: pollAsyncJobResult } - }, + } }, dataProvider: function(args) { $.ajax({ @@ -9208,7 +9231,7 @@ fields: { hostname: { label: 'label.bigswitch.controller.address' - }, + } }, actions: { add: { @@ -9223,7 +9246,7 @@ numretries: { label: 'label.numretries', defaultValue: '2' - }, + } } }, action: function(args) { @@ -9334,7 +9357,7 @@ }, hostname: { label: 'label.ip.address' - }, + } }], dataProvider: function(args) { $.ajax({ @@ -9484,8 +9507,6 @@ isBoolean: true, isChecked: false, docID: 'helpDedicateResource' - - }, domainId: { @@ -9529,7 +9550,6 @@ validation: { required: false } - } } @@ -9576,7 +9596,7 @@ notification: { poll: pollAsyncJobResult, interval: 4500, - desc: "Dedicate Pod" + desc: "Dedicate Pod" }, data: item @@ -9585,7 +9605,6 @@ }, error: function(json) { - args.response.error(parseXMLHttpResponse(XMLHttpResponse)); } }); @@ -9602,8 +9621,6 @@ } }); - - }, notification: { @@ -9769,7 +9786,7 @@ getActionFilter: function() { return podActionfilter; } - }, + } }); }, error: function(json) { @@ -10096,14 +10113,14 @@ createForm: { title: 'label.add.cluster', preFilter: function(args) { - var $form = args.$form; - $form.click(function() { + var $form = args.$form; + $form.click(function() { var $nexusDvsOptFields = $form.find('.form-item').filter(function() { var nexusDvsOptFields = [ 'vsmipaddress', 'vsmusername', 'vsmpassword' - ]; + ]; return $.inArray($(this).attr('rel'), nexusDvsOptFields) > -1; }); var $nexusDvsReqFields = $form.find('.form-item').filter(function() { @@ -10111,22 +10128,22 @@ 'vsmipaddress_req', 'vsmusername_req', 'vsmpassword_req' - ]; + ]; return $.inArray($(this).attr('rel'), nexusDvsReqFields) > -1; - }); - - if ($form.find('.form-item[rel=hypervisor] select').val() == 'VMware' ) { - $form.find('.form-item[rel=vCenterHost]').css('display', 'inline-block'); + }); + + if ($form.find('.form-item[rel=hypervisor] select').val() == 'VMware' ) { + $form.find('.form-item[rel=vCenterHost]').css('display', 'inline-block'); $form.find('.form-item[rel=vCenterUsername]').css('display', 'inline-block'); $form.find('.form-item[rel=vCenterPassword]').css('display', 'inline-block'); $form.find('.form-item[rel=vCenterDatacenter]').css('display', 'inline-block'); - + var $overridePublicTraffic = $form.find('.form-item[rel=overridepublictraffic] input[type=checkbox]'); - var $vSwitchPublicType = $form.find('.form-item[rel=vSwitchPublicType] select'); - var $overrideGuestTraffic = $form.find('.form-item[rel=overrideguesttraffic] input[type=checkbox]'); - var $vSwitchGuestType = $form.find('.form-item[rel=vSwitchGuestType] select'); - - + var $vSwitchPublicType = $form.find('.form-item[rel=vSwitchPublicType] select'); + var $overrideGuestTraffic = $form.find('.form-item[rel=overrideguesttraffic] input[type=checkbox]'); + var $vSwitchGuestType = $form.find('.form-item[rel=vSwitchGuestType] select'); + + var useDvs = false; $.ajax({ url: createURL('listConfigurations'), @@ -10139,12 +10156,12 @@ useDvs = true; } } - }); - if (useDvs == true) { //If using Distributed vswitch, there is OverrideTraffic option. - $form.find('.form-item[rel=overridepublictraffic]').css('display', 'inline-block'); - $form.find('.form-item[rel=overrideguesttraffic]').css('display', 'inline-block'); - - var useNexusDvs = false; + }); + if (useDvs == true) { //If using Distributed vswitch, there is OverrideTraffic option. + $form.find('.form-item[rel=overridepublictraffic]').css('display', 'inline-block'); + $form.find('.form-item[rel=overrideguesttraffic]').css('display', 'inline-block'); + + var useNexusDvs = false; $.ajax({ url: createURL('listConfigurations'), data: { @@ -10157,65 +10174,65 @@ } } }); - if (useNexusDvs == true) { //If using Nexus Distributed vswitch, show Nexus Distributed vswitch fields (either required ones or optional ones). - if (($overridePublicTraffic.is(':checked') && $vSwitchPublicType.val() == 'nexusdvs') || - ($overrideGuestTraffic.is(':checked') && $vSwitchGuestType.val() == 'nexusdvs' )) { - $nexusDvsReqFields.css('display', 'inline-block'); - $nexusDvsOptFields.hide(); - } else { - $nexusDvsOptFields.css('display', 'inline-block'); - $nexusDvsReqFields.hide(); - } - - } else { //If not using Nexus Distributed vswitch, hide Nexus Distributed vswitch fields. - $nexusDvsOptFields.hide(); - $nexusDvsReqFields.hide(); - } - - } else { //useDvs == false + if (useNexusDvs == true) { //If using Nexus Distributed vswitch, show Nexus Distributed vswitch fields (either required ones or optional ones). + if (($overridePublicTraffic.is(':checked') && $vSwitchPublicType.val() == 'nexusdvs') || + ($overrideGuestTraffic.is(':checked') && $vSwitchGuestType.val() == 'nexusdvs' )) { + $nexusDvsReqFields.css('display', 'inline-block'); + $nexusDvsOptFields.hide(); + } else { + $nexusDvsOptFields.css('display', 'inline-block'); + $nexusDvsReqFields.hide(); + } + + } else { //If not using Nexus Distributed vswitch, hide Nexus Distributed vswitch fields. + $nexusDvsOptFields.hide(); + $nexusDvsReqFields.hide(); + } + + } else { //useDvs == false $form.find('.form-item[rel=overridepublictraffic]').css('display', 'none'); $form.find('.form-item[rel=vSwitchPublicType]').css('display', 'none'); $form.find('.form-item[rel=vSwitchPublicName]').css('display', 'none'); - + $form.find('.form-item[rel=overrideguesttraffic]').css('display', 'none'); - $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'none'); - $form.find('.form-item[rel=vSwitchGuestName]').css('display', 'none'); - + $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'none'); + $form.find('.form-item[rel=vSwitchGuestName]').css('display', 'none'); + $nexusDvsOptFields.hide(); - $nexusDvsReqFields.hide(); + $nexusDvsReqFields.hide(); } - - + + } else { //XenServer, KVM, etc (non-VMware) $form.find('.form-item[rel=vCenterHost]').css('display', 'none'); $form.find('.form-item[rel=vCenterUsername]').css('display', 'none'); $form.find('.form-item[rel=vCenterPassword]').css('display', 'none'); $form.find('.form-item[rel=vCenterDatacenter]').css('display', 'none'); $form.find('.form-item[rel=enableNexusVswitch]').css('display', 'none'); - + $form.find('.form-item[rel=overridepublictraffic]').css('display', 'none'); - $form.find('.form-item[rel=overrideguesttraffic]').css('display', 'none'); - $nexusDvsOptFields.hide(); - $nexusDvsReqFields.hide(); - } - + $form.find('.form-item[rel=overrideguesttraffic]').css('display', 'none'); + $nexusDvsOptFields.hide(); + $nexusDvsReqFields.hide(); + } + if ($form.find('.form-item[rel=overridepublictraffic]').css('display') != 'none' && $overridePublicTraffic.is(':checked')) { - $form.find('.form-item[rel=vSwitchPublicType]').css('display', 'inline-block'); - $form.find('.form-item[rel=vSwitchPublicName]').css('display', 'inline-block'); + $form.find('.form-item[rel=vSwitchPublicType]').css('display', 'inline-block'); + $form.find('.form-item[rel=vSwitchPublicName]').css('display', 'inline-block'); } else { - $form.find('.form-item[rel=vSwitchPublicType]').css('display', 'none'); + $form.find('.form-item[rel=vSwitchPublicType]').css('display', 'none'); $form.find('.form-item[rel=vSwitchPublicName]').css('display', 'none'); } - + if ($form.find('.form-item[rel=overrideguesttraffic]').css('display') != 'none' && $overrideGuestTraffic.is(':checked')) { - $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'inline-block'); - $form.find('.form-item[rel=vSwitchGuestName]').css('display', 'inline-block'); + $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'inline-block'); + $form.find('.form-item[rel=vSwitchGuestName]').css('display', 'inline-block'); } else { - $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'none'); + $form.find('.form-item[rel=vSwitchGuestType]').css('display', 'none'); $form.find('.form-item[rel=vSwitchGuestName]').css('display', 'none'); - } + } }); - + $form.trigger('click'); }, fields: { @@ -10253,7 +10270,7 @@ hypervisor: { label: 'label.hypervisor', docID: 'helpClusterHypervisor', - select: function(args) { + select: function(args) { $.ajax({ url: createURL("listHypervisors"), dataType: "json", @@ -10451,12 +10468,12 @@ args.response.success({ data: items }); - }, - isHidden: true + }, + isHidden: true }, vSwitchPublicName: { - label: 'Public Traffic vSwitch Name', + label: 'Public Traffic vSwitch Name', isHidden: true }, @@ -10472,7 +10489,7 @@ label: 'Guest Traffic vSwitch Type', select: function(args) { var items = [] - + var useNexusDvs = false; $.ajax({ url: createURL('listConfigurations'), @@ -10516,7 +10533,7 @@ description: "Cisco Nexus 1000v Distributed Virtual Switch" }); } - + args.response.success({ data: items }); @@ -10525,7 +10542,7 @@ }, vSwitchGuestName: { - label: ' Guest Traffic vSwitch Name', + label: ' Guest Traffic vSwitch Name', isHidden: true }, @@ -10613,29 +10630,29 @@ if (args.data.vSwitchGuestName != "") array1.push("&guestvswitchname=" + args.data.vSwitchGuestName); - //Nexus VSM fields + //Nexus VSM fields if (args.$form.find('.form-item[rel=vsmipaddress]').css('display') != 'none' && args.data.vsmipaddress != null && args.data.vsmipaddress.length > 0) { - array1.push('&vsmipaddress=' + args.data.vsmipaddress); + array1.push('&vsmipaddress=' + args.data.vsmipaddress); } if (args.$form.find('.form-item[rel=vsmipaddress_req]').css('display') != 'none' && args.data.vsmipaddress_req != null && args.data.vsmipaddress_req.length > 0) { - array1.push('&vsmipaddress=' + args.data.vsmipaddress_req); + array1.push('&vsmipaddress=' + args.data.vsmipaddress_req); } - + if(args.$form.find('.form-item[rel=vsmusername]').css('display') != 'none' && args.data.vsmusername != null && args.data.vsmusername.length > 0) { - array1.push('&vsmusername=' + args.data.vsmusername); + array1.push('&vsmusername=' + args.data.vsmusername); } if(args.$form.find('.form-item[rel=vsmusername_req]').css('display') != 'none' && args.data.vsmusername_req != null && args.data.vsmusername_req.length > 0) { - array1.push('&vsmusername=' + args.data.vsmusername_req); + array1.push('&vsmusername=' + args.data.vsmusername_req); } - + if(args.$form.find('.form-item[rel=vsmpassword]').css('display') != 'none' && args.data.vsmpassword != null && args.data.vsmpassword.length > 0) { - array1.push('&vsmpassword=' + args.data.vsmpassword); - } + array1.push('&vsmpassword=' + args.data.vsmpassword); + } if(args.$form.find('.form-item[rel=vsmpassword_req]').css('display') != 'none' && args.data.vsmpassword_req != null && args.data.vsmpassword_req.length > 0) { - array1.push('&vsmpassword=' + args.data.vsmpassword_req); - } - - + array1.push('&vsmpassword=' + args.data.vsmpassword_req); + } + + var hostname = args.data.vCenterHost; var dcName = args.data.vCenterDatacenter; @@ -10671,7 +10688,7 @@ $.ajax({ url: createURL("addCluster" + array1.join("")), dataType: "json", - type: "POST", + type: "POST", success: function(json) { var item = json.addclusterresponse.cluster[0]; clusterId = json.addclusterresponse.cluster[0].id; @@ -11442,8 +11459,8 @@ array1.push("&podid=" + args.context.pods[0].id); if ("clusters" in args.context) array1.push("&clusterid=" + args.context.clusters[0].id); - } else { - array1.push("&hostid=" + args.context.instances[0].hostid); + } else { //Instances menu > Instance detailView > View Hosts + array1.push("&id=" + args.context.instances[0].hostid); } $.ajax({ @@ -12809,10 +12826,6 @@ id: "SharedMountPoint", description: "SharedMountPoint" }); - items.push({ - id: "rbd", - description: "RBD" - }); args.response.success({ data: items }); @@ -13123,6 +13136,7 @@ // RBD rbdmonitor: { label: 'label.rbd.monitor', + docID: 'helpPrimaryStorageRBDMonitor', validation: { required: true }, @@ -13130,6 +13144,7 @@ }, rbdpool: { label: 'label.rbd.pool', + docID: 'helpPrimaryStorageRBDPool', validation: { required: true }, @@ -13137,6 +13152,7 @@ }, rbdid: { label: 'label.rbd.id', + docID: 'helpPrimaryStorageRBDId', validation: { required: false }, @@ -13144,6 +13160,7 @@ }, rbdsecret: { label: 'label.rbd.secret', + docID: 'helpPrimaryStorageRBDSecret', validation: { required: false }, @@ -13444,7 +13461,7 @@ isEditable: true }, zonename: { - label: 'label.zone' + label: 'label.zone' }, podname: { label: 'label.pod' @@ -13580,36 +13597,36 @@ label: 'label.url' } }, - dataProvider: function(args) { - $.ajax({ - url: createURL('listUcsManagers'), - data: { - zoneid: args.context.physicalResources[0].id - }, - success: function(json) { - //for testing only (begin) - /* - json = - { - "listucsmanagerreponse": { - "count": 1, - "ucsmanager": [ - { - "id": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "name": "ucsmanager", - "url": "10.223.184.2", - "zoneid": "54c9a65c-ba89-4380-96e9-1d429c5372e3" - } - ] - } - }; - */ - //for testing only (end) - - var items = json.listucsmanagerreponse.ucsmanager; - args.response.success({ data: items }); - } - }); + dataProvider: function(args) { + $.ajax({ + url: createURL('listUcsManagers'), + data: { + zoneid: args.context.physicalResources[0].id + }, + success: function(json) { + //for testing only (begin) + /* + json = + { + "listucsmanagerreponse": { + "count": 1, + "ucsmanager": [ + { + "id": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "name": "ucsmanager", + "url": "10.223.184.2", + "zoneid": "54c9a65c-ba89-4380-96e9-1d429c5372e3" + } + ] + } + }; + */ + //for testing only (end) + + var items = json.listucsmanagerreponse.ucsmanager; + args.response.success({ data: items }); + } + }); }, actions: { add: { @@ -13693,7 +13710,7 @@ isMaximized: true, noCompact: true, actions: { - remove: { + remove: { label: 'Delete UCS Manager', messages: { confirm: function(args) { @@ -13705,11 +13722,11 @@ }, action: function(args) { var data = { - ucsmanagerid: args.context.ucsManagers[0].id + ucsmanagerid: args.context.ucsManagers[0].id }; $.ajax({ url: createURL('deleteUcsManager'), - data: data, + data: data, success: function(json) { args.response.success(); }, @@ -13724,64 +13741,65 @@ } } } - }, - tabs: { + }, + tabs: { details: { title: 'label.details', fields: [{ name: { - label: 'label.name', + label: 'label.name' } }, { id: { label: 'label.id' - }, + }, url: { label: 'label.url' - }, + } }], - dataProvider: function(args) { + dataProvider: function(args) { $.ajax({ - url: createURL('listUcsManagers'), - data: { - id: args.context.ucsManagers[0].id - }, + url: createURL('listUcsManagers'), + data: { + id: args.context.ucsManagers[0].id + }, success: function(json) { //for testing only (begin) - /* - json = - { - "listucsmanagerreponse": { - "count": 1, - "ucsmanager": [ - { - "id": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "name": "ucsmanager", - "url": "10.223.184.2", - "zoneid": "54c9a65c-ba89-4380-96e9-1d429c5372e3" - } - ] - } - }; - */ - //for testing only (end) - + /* + json = + { + "listucsmanagerreponse": { + "count": 1, + "ucsmanager": [ + { + "id": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "name": "ucsmanager", + "url": "10.223.184.2", + "zoneid": "54c9a65c-ba89-4380-96e9-1d429c5372e3" + } + ] + } + }; + */ + //for testing only (end) + var item = json.listucsmanagerreponse.ucsmanager[0]; - args.response.success({ + args.response.success({ data: item }); } }); } - }, - + }, + blades: { title: 'Blades', listView: { id: 'blades', - fields: { + hideSearchBar: true, + fields: { chassis: { label: 'Chassis' }, @@ -13800,125 +13818,184 @@ }, success: function(json) { //for testing only (begin) - /* - json = { - "listucsbladeresponse": { - "count": 4, - "ucsblade": [ - { - "id": "84edb958-cf8a-4e71-99c6-190ccc3fe2bd", - "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "bladedn": "sys/chassis-1/blade-1", - "profiledn": "org-root/ls-profile-for-blade-1" - }, - { - "id": "524a3e55-5b61-4561-9464-1b19e3543189", - "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "bladedn": "sys/chassis-1/blade-2", - "profiledn": "org-root/ls-profile-for-blade-2" - }, - { - "id": "4828f560-6191-46e6-8a4c-23d1d7d017f0", - "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "bladedn": "sys/chassis-1/blade-3" - }, - { - "id": "80ab25c8-3dcf-400e-8849-84dc5e1e6594", - "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "bladedn": "sys/chassis-1/blade-4" - } - ] - } - }; - */ - //for testing only (end) - - var items = json.listucsbladeresponse.ucsblade ? json.listucsbladeresponse.ucsblade : []; - for (var i = 0; i < items.length; i++) { - addExtraPropertiesToUcsBladeObject(items[i]); + /* + json = { + "listucsbladeresponse": { + "count": 4, + "ucsblade": [ + { + "id": "84edb958-cf8a-4e71-99c6-190ccc3fe2bd", + "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "bladedn": "sys/chassis-1/blade-1", + "profiledn": "org-root/ls-profile-for-blade-1" + }, + { + "id": "524a3e55-5b61-4561-9464-1b19e3543189", + "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "bladedn": "sys/chassis-1/blade-2", + "profiledn": "org-root/ls-profile-for-blade-2" + }, + { + "id": "4828f560-6191-46e6-8a4c-23d1d7d017f0", + "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "bladedn": "sys/chassis-1/blade-3" + }, + { + "id": "80ab25c8-3dcf-400e-8849-84dc5e1e6594", + "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", + "bladedn": "sys/chassis-1/blade-4" + } + ] + } + }; + */ + //for testing only (end) + + var items = json.listucsbladeresponse.ucsblade ? json.listucsbladeresponse.ucsblade : []; + for (var i = 0; i < items.length; i++) { + addExtraPropertiesToUcsBladeObject(items[i]); } args.response.success({ - actionFilter: bladeActionfilter, + actionFilter: bladeActionfilter, data: items }); } }); }, actions: { - associateProfileToBlade: { - label: 'Associate Profile to Blade', + refreshUcsBlades: { + isHeader: true, + label: 'Refresh Blades', + messages: { + confirm: function(args) { + return 'Please confirm that you want to refresh blades.'; + }, + notification: function(args) { + return 'Refresh Blades'; + } + }, + action: function(args) { + $.ajax({ + url: createURL('refreshUcsBlades'), + data: { + ucsmanagerid: args.context.ucsManagers[0].id + }, + success: function(json) { + //for testing only (begin) + /* + json = { + "refreshucsbladesresponse": { + "count": 7, + "ucsblade": [ + { + "id": "6c6a2d2c-575e-41ac-9782-eee51b0b80f8", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-5" + }, + { + "id": "d371d470-a51f-489c-aded-54a63dfd76c7", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-6" + }, + { + "id": "c0f64591-4a80-4083-bb7b-576220b436a2", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-7" + }, + { + "id": "74b9b69a-cb16-42f5-aad6-06391ebdd759", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-1" + }, + { + "id": "713a5adb-0136-484f-9acb-d9203af497be", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-2" + }, + { + "id": "da633578-21cb-4678-9eb4-981a53198b41", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-4" + }, + { + "id": "3d491c6e-f0b6-40b0-bf6e-f89efdd73c30", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-3" + } + ] + } + }; + */ + //for testing only (end) + + /* + var item = json.refreshucsbladesresponse.ucsblade[0]; + addExtraPropertiesToUcsBladeObject(item); + args.response.success({ + data: item + }); + */ + $(window).trigger('cloudStack.fullRefresh'); + } + }); + }, + notification: { + poll: function(args) { + args.complete(); + } + } + }, + + associateTemplateToBlade: { + label: 'Instanciate Template and Associate Profile to Blade', addRow: 'false', messages: { notification: function(args) { - return 'Associate Profile to Blade'; + return 'Instanciate Template and Associate Profile to Blade'; } }, createForm: { - title: 'Associate Profile to Blade', + title: 'Instanciate Template and Associate Profile to Blade', fields: { - profiledn: { - label: 'Select Profile', + templatedn: { + label: 'Select Template', select: function(args) { var items = []; $.ajax({ - url: createURL('listUcsProfiles'), + url: createURL('listUcsTemplates'), data: { ucsmanagerid: args.context.ucsManagers[0].id }, async: false, - success: function(json) { + success: function(json) { //for testing only (begin) - /* - json = { - "listucsprofileresponse": { - "count": 5, - "ucsprofile": [ - { - "ucsdn": "org-root/ls-profile-for-blade-2" - }, - { - "ucsdn": "org-root/ls-profile-for-blade-1" - }, - { - "ucsdn": "org-root/ls-simpleProfile" - }, - { - "ucsdn": "org-root/ls-testProfile" - }, - { - "ucsdn": "org-root/ls-UCS_Test" - } - ] - } - }; + /* + json = { + "listucstemplatesresponse": { + "count": 1, + "ucstemplate": [ + { + "ucsdn": "org-root/ls-test" + } + ] + } + }; */ - //for testing only (end) - - var ucsprofiles = json.listucsprofileresponse.ucsprofile; - if (ucsprofiles != null) { - for (var i = 0; i < ucsprofiles.length; i++) { + //for testing only (end) + + var ucstemplates = json.listucstemplatesresponse.ucstemplate; + if (ucstemplates != null) { + for (var i = 0; i < ucstemplates.length; i++) { items.push({ - id: ucsprofiles[i].ucsdn, - description: ucsprofiles[i].ucsdn + id: ucstemplates[i].ucsdn, + description: ucstemplates[i].ucsdn }); } } } }); - //for testing only (begin) - /* - items.push({id: 'org-root/ls-testProfile1', description: 'org-root/ls-testProfile1'}); - items.push({id: 'org-root/ls-testProfile2', description: 'org-root/ls-testProfile2'}); - items.push({id: 'org-root/ls-testProfile3', description: 'org-root/ls-testProfile3'}); - items.push({id: 'org-root/ls-testProfile4', description: 'org-root/ls-testProfile4'}); - items.push({id: 'org-root/ls-testProfile5', description: 'org-root/ls-testProfile5'}); - items.push({id: 'org-root/ls-testProfile6', description: 'org-root/ls-testProfile6'}); - items.push({id: 'org-root/ls-testProfile7', description: 'org-root/ls-testProfile7'}); - */ - //for testing only (end) - args.response.success({ data: items }); @@ -13927,64 +14004,75 @@ validation: { required: true } + }, + profilename: { + label: 'Profile' } } }, action: function(args) { - $.ajax({ - url: createURL('associateUcsProfileToBlade'), //This API has been changed from sync to async at 7/25/2013 - data: { + var data = { ucsmanagerid: args.context.ucsManagers[0].id, - profiledn: args.data.profiledn, + templatedn: args.data.templatedn, bladeid: args.context.blades[0].id - }, + }; + + if (args.data.profilename != null && args.data.profilename.length > 0) { + $.extend(data, { + profilename: args.data.profilename + }); + } + + $.ajax({ + url: createURL('instantiateUcsTemplateAndAssocaciateToBlade'), + data: data, success: function(json) { - //for testing only (begin) - /* - json = { - "associateucsprofiletobladeresponse": { - "jobid": "770bec68-7739-4127-8609-4b87bd7867d2" - } - } - */ - //for testing only (end) - - var jid = json.associateucsprofiletobladeresponse.jobid; + //for testing only (begin) + /* + json = { + "instantiateucstemplateandassociatetobladeresponse": { + "jobid": "cd9d0282-4dae-463f-80b6-451e168e2e92" + } + } + */ + //for testing only (end) + + var jid = json.instantiateucstemplateandassociatetobladeresponse.jobid; args.response.success({ _custom: { jobId: jid, - getUpdatedItem: function(json) { - //for testing only (begin) - /* - json = { - "queryasyncjobresultresponse": { - "accountid": "b24f6e36-f0ca-11e2-8c16-d637902e3581", - "userid": "b24f7d8d-f0ca-11e2-8c16-d637902e3581", - "cmd": "org.apache.cloudstack.api.AssociateUcsProfileToBladeCmd", - "jobstatus": 1, - "jobprocstatus": 0, - "jobresultcode": 0, - "jobresulttype": "object", - "jobresult": { - "ucsblade": { - "id": "80ab25c8-3dcf-400e-8849-84dc5e1e6594", - "ucsmanagerid": "07b5b813-83ed-4859-952c-c95cafb63ac4", - "bladedn": "sys/chassis-1/blade-4", - "profiledn": "org-root/ls-profile-for-blade-4" - } - }, - "created": "2013-07-26T13:53:01-0700", - "jobid": "770bec68-7739-4127-8609-4b87bd7867d2" - } - }; - */ - //for testing only (end) - - addExtraPropertiesToUcsBladeObject(json.queryasyncjobresultresponse.jobresult.ucsblade); + getUpdatedItem: function(json) { + //for testing only (begin) + /* + json = { + "queryasyncjobresultresponse": { + "accountid": "970b694a-2f8c-11e3-a77d-000c29b36ff5", + "userid": "970b7b4f-2f8c-11e3-a77d-000c29b36ff5", + "cmd": "org.apache.cloudstack.api.InstantiateUcsTemplateAndAssociateToBladeCmd", + "jobstatus": 1, + "jobprocstatus": 0, + "jobresultcode": 0, + "jobresulttype": "object", + "jobresult": { + "ucsblade": { + "id": "3d491c6e-f0b6-40b0-bf6e-f89efdd73c30", + "ucsmanagerid": "9a34c186-12fa-4bbc-af04-5f1a2bf7ae4a", + "bladedn": "sys/chassis-1/blade-3", + "profiledn": "org-root/ls-xxxx" + } + }, + "created": "2013-10-10T17:29:00-0700", + "jobid": "cd9d0282-4dae-463f-80b6-451e168e2e92" + } + }; + */ + //for testing only (end) + + addExtraPropertiesToUcsBladeObject(json.queryasyncjobresultresponse.jobresult.ucsblade); return json.queryasyncjobresultresponse.jobresult.ucsblade; } } - }); + }); } }); }, @@ -13992,81 +14080,88 @@ poll: pollAsyncJobResult } }, - + disassociateProfileFromBlade: { label: 'Disassociate Profile from Blade', addRow: 'false', messages: { - confirm: function(args) { - return 'Please confirm that you want to disassociate Profile from Blade.'; - }, notification: function(args) { return 'Disassociate Profile from Blade'; } - }, - action: function(args) { + }, + createForm: { + title: 'Disassociate Profile from Blade', + fields: { + deleteprofile: { + label: 'Delete Profile', + isBoolean: true, + isChecked: true + } + } + }, + action: function(args) { $.ajax({ - url: createURL('disassociateUcsProfileFromBlade'), + url: createURL('disassociateUcsProfileFromBlade'), data: { - //ucsmanagerid: args.context.ucsManagers[0].id, - bladeid: args.context.blades[0].id + bladeid: args.context.blades[0].id, + deleteprofile: (args.data.deleteprofile == 'on'? true: false) }, - success: function(json) { - //for testing only (begin) - /* - json = { - "disassociateucsprofilefrombladeresponse": { - "jobid": "e371592e-31be-4e53-9346-a5c565d420df" - } - } - */ - //for testing only (end) - - var jid = json.disassociateucsprofilefrombladeresponse.jobid; + success: function(json) { + //for testing only (begin) + /* + json = { + "disassociateucsprofilefrombladeresponse": { + "jobid": "e371592e-31be-4e53-9346-a5c565d420df" + } + } + */ + //for testing only (end) + + var jid = json.disassociateucsprofilefrombladeresponse.jobid; args.response.success({ _custom: { jobId: jid, - getUpdatedItem: function(json) { - //for testing only (begin) - /* - json = { - "queryasyncjobresultresponse": { - "accountid": "835fb2d5-0b76-11e3-9350-f4f3e49b5dfe", - "userid": "835fc0e5-0b76-11e3-9350-f4f3e49b5dfe", - "cmd": "org.apache.cloudstack.api.DisassociateUcsProfileCmd", - "jobstatus": 1, - "jobprocstatus": 0, - "jobresultcode": 0, - "jobresulttype": "object", - "jobresult": { - "ucsblade": { - "id": "f8d08575-7a1c-4f79-a588-d129c38bcc4f", - "ucsmanagerid": "0d87c1a6-5664-425c-9024-2ddd9605d260", - "bladedn": "sys/chassis-1/blade-1" - } - }, - "created": "2013-09-13T22:17:29-0700", - "jobid": "2c3698a8-39ac-43e6-8ade-86eb2d3726a0" - } - }; - */ - //for testing only (end) - - addExtraPropertiesToUcsBladeObject(json.queryasyncjobresultresponse.jobresult.ucsblade); + getUpdatedItem: function(json) { + //for testing only (begin) + /* + json = { + "queryasyncjobresultresponse": { + "accountid": "835fb2d5-0b76-11e3-9350-f4f3e49b5dfe", + "userid": "835fc0e5-0b76-11e3-9350-f4f3e49b5dfe", + "cmd": "org.apache.cloudstack.api.DisassociateUcsProfileCmd", + "jobstatus": 1, + "jobprocstatus": 0, + "jobresultcode": 0, + "jobresulttype": "object", + "jobresult": { + "ucsblade": { + "id": "f8d08575-7a1c-4f79-a588-d129c38bcc4f", + "ucsmanagerid": "0d87c1a6-5664-425c-9024-2ddd9605d260", + "bladedn": "sys/chassis-1/blade-1" + } + }, + "created": "2013-09-13T22:17:29-0700", + "jobid": "2c3698a8-39ac-43e6-8ade-86eb2d3726a0" + } + }; + */ + //for testing only (end) + + addExtraPropertiesToUcsBladeObject(json.queryasyncjobresultresponse.jobresult.ucsblade); return json.queryasyncjobresultresponse.jobresult.ucsblade; } } - }); + }); } }); }, notification: { poll: pollAsyncJobResult } - } - } + } + } } - } + } } } } @@ -14097,7 +14192,7 @@ } }, - + dataProvider: function(args) { var array1 = []; if(args.filterBy != null) { @@ -14212,6 +14307,7 @@ $form.find('.form-item[rel=sockettimeout]').css('display', 'inline-block'); $form.find('.form-item[rel=createNfsCache]').find('input').attr('checked', 'checked'); + $form.find('.form-item[rel=createNfsCache]').find('input').attr('disabled', 'disabled'); //Create NFS staging is required for S3 at this moment. So, disallow user to uncheck "Create NFS Secondary Staging" checkbox $form.find('.form-item[rel=createNfsCache]').css('display', 'inline-block'); $form.find('.form-item[rel=nfsCacheZoneid]').css('display', 'inline-block'); $form.find('.form-item[rel=nfsCacheNfsServer]').css('display', 'inline-block'); @@ -14506,9 +14602,9 @@ $.ajax({ url: createURL('addImageStore'), data: data, - success: function(json) { + success: function(json) { g_regionsecondaryenabled = true; - + var item = json.addimagestoreresponse.imagestore; args.response.success({ data: item @@ -14567,9 +14663,9 @@ $.ajax({ url: createURL('addImageStore'), data: data, - success: function(json) { + success: function(json) { g_regionsecondaryenabled = true; - + var item = json.addimagestoreresponse.imagestore; args.response.success({ data: item @@ -14601,7 +14697,38 @@ detailView: { name: 'Secondary storage details', isMaximized: true, - actions: { + actions: { + prepareObjectStoreMigration: { + label: 'Prepare Object Store Migration', + messages: { + confirm: function(args) { + return 'Please confirm you want to prepare migration of secondary storage to object store.'; + }, + notification: function(args) { + return 'Prepare Object Store Migration'; + } + }, + action: function(args) { + $.ajax({ + url: createURL('prepareSecondaryStorageForMigration'), + data: { + id: args.context.secondaryStorage[0].id + }, + success: function(json) { + var jid = json.preparesecondarystorageformigrationresponse.jobid; + args.response.success({ + _custom: { + jobId: jid + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + remove: { label: 'label.action.delete.secondary.storage', messages: { @@ -14851,7 +14978,7 @@ name: 'Secondary Staging Store details', isMaximized: true, actions: { - remove: { + remove: { label: 'Delete Secondary Staging Store', messages: { confirm: function(args) { @@ -14863,7 +14990,7 @@ }, action: function(args) { var data = { - id: args.context.cacheStorage[0].id + id: args.context.cacheStorage[0].id }; $.ajax({ url: createURL('deleteSecondaryStagingStore'), @@ -15022,10 +15149,10 @@ label: 'IPv4 End IP' }, ip6cidr: { - label: 'IPv6 CIDR' + label: 'IPv6 CIDR' }, ip6gateway: { - label: 'IPv6 Gateway' + label: 'IPv6 Gateway' }, startipv6: { label: 'IPv6 Start IP' @@ -15047,12 +15174,12 @@ array2.push("&startip=" + args.data.startipv4); if (args.data.endipv4 != null && args.data.endipv4.length > 0) array2.push("&endip=" + args.data.endipv4); - + if (args.data.ip6cidr != null && args.data.ip6cidr.length > 0) array2.push("&ip6cidr=" + args.data.ip6cidr); if (args.data.ip6gateway != null && args.data.ip6gateway.length > 0) array2.push("&ip6gateway=" + args.data.ip6gateway); - + if (args.data.startipv6 != null && args.data.startipv6.length > 0) array2.push("&startipv6=" + args.data.startipv6); if (args.data.endipv6 != null && args.data.endipv6.length > 0) @@ -16071,6 +16198,11 @@ var jsonObj = args.context.item; var allowedActions = []; allowedActions.push("remove"); + + if (jsonObj.providername == 'NFS') { + allowedActions.push("prepareObjectStoreMigration"); + } + return allowedActions; } @@ -16080,16 +16212,16 @@ if (jsonObj.state == 'Running') { allowedActions.push("stop"); - + //when systemVm is running, scaleUp is not supported for KVM and XenServer. //however, listRouters API doesn't return hypervisor property.... /* if (jsonObj.hypervisor != 'KVM' && jsonObj.hypervisor != 'XenServer') { - allowedActions.push("scaleUp"); - } + allowedActions.push("scaleUp"); + } */ allowedActions.push("scaleUp"); - + allowedActions.push("restart"); allowedActions.push("viewConsole"); @@ -16097,7 +16229,7 @@ allowedActions.push("migrate"); } else if (jsonObj.state == 'Stopped') { allowedActions.push("start"); - allowedActions.push("scaleUp"); //when vm is stopped, scaleUp is supported for all hypervisors + allowedActions.push("scaleUp"); //when vm is stopped, scaleUp is supported for all hypervisors allowedActions.push("remove"); } return allowedActions; @@ -16127,37 +16259,37 @@ allowedActions.push("stop"); allowedActions.push("restart"); allowedActions.push("remove"); - - //when systemVm is running, scaleUp is not supported for KVM and XenServer. + + //when systemVm is running, scaleUp is not supported for KVM and XenServer. //however, listSystemVms API doesn't return hypervisor property.... /* if (jsonObj.hypervisor != 'KVM' && jsonObj.hypervisor != 'XenServer') { - allowedActions.push("scaleUp"); - } + allowedActions.push("scaleUp"); + } */ allowedActions.push("scaleUp"); - + allowedActions.push("viewConsole"); if (isAdmin()) allowedActions.push("migrate"); } else if (jsonObj.state == 'Stopped') { allowedActions.push("start"); - allowedActions.push("scaleUp"); //when vm is stopped, scaleUp is supported for all hypervisors + allowedActions.push("scaleUp"); //when vm is stopped, scaleUp is supported for all hypervisors allowedActions.push("remove"); } else if (jsonObj.state == 'Error') { allowedActions.push("remove"); } return allowedActions; } - - var bladeActionfilter = function(args) { + + var bladeActionfilter = function(args) { var jsonObj = args.context.item; var allowedActions = []; if(jsonObj.profiledn == null) { - allowedActions.push("associateProfileToBlade"); + allowedActions.push("associateTemplateToBlade"); } else { - allowedActions.push("disassociateProfileFromBlade"); - } + allowedActions.push("disassociateProfileFromBlade"); + } return allowedActions; } diff --git a/ui/scripts/templates.js b/ui/scripts/templates.js index 3f79b09cf3b..f45c2d26927 100644 --- a/ui/scripts/templates.js +++ b/ui/scripts/templates.js @@ -181,10 +181,13 @@ return; var apiCmd; - if (args.zone == -1) - apiCmd = "listHypervisors&zoneid=-1"; - else + if (args.zone == -1) { //All Zones + //apiCmd = "listHypervisors&zoneid=-1"; //"listHypervisors&zoneid=-1" has been changed to return only hypervisors available in all zones (bug 8809) + apiCmd = "listHypervisors"; + } + else { apiCmd = "listHypervisors&zoneid=" + args.zone; + } $.ajax({ url: createURL(apiCmd), @@ -211,10 +214,21 @@ $form.find('.form-item[rel=rootDiskControllerType]').css('display', 'inline-block'); $form.find('.form-item[rel=nicAdapterType]').css('display', 'inline-block'); $form.find('.form-item[rel=keyboardType]').css('display', 'inline-block'); + + $form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); + } else if ($(this).val() == "XenServer") { + $form.find('.form-item[rel=rootDiskControllerType]').hide(); + $form.find('.form-item[rel=nicAdapterType]').hide(); + $form.find('.form-item[rel=keyboardType]').hide(); + + if (isAdmin()) + $form.find('.form-item[rel=xenserverToolsVersion61plus]').css('display', 'inline-block'); } else { $form.find('.form-item[rel=rootDiskControllerType]').hide(); $form.find('.form-item[rel=nicAdapterType]').hide(); $form.find('.form-item[rel=keyboardType]').hide(); + + $form.find('.form-item[rel=xenserverToolsVersion61plus]').hide(); } }); @@ -222,6 +236,30 @@ } }, + xenserverToolsVersion61plus: { + label: 'XenServer Tools Version 6.1+', + isBoolean: true, + isChecked: function (args) { + var b = false; + if (isAdmin()) { + $.ajax({ + url: createURL('listConfigurations'), + data: { + name: 'xen.pvdriver.version' + }, + async: false, + success: function (json) { + if (json.listconfigurationsresponse.configuration != null && json.listconfigurationsresponse.configuration[0].value == 'xenserver61') { + b = true; + } + } + }); + } + return b; + }, + isHidden: true + }, + //fields for hypervisor == "VMware" (starts here) rootDiskControllerType: { label: 'label.root.disk.controller', @@ -341,6 +379,11 @@ id: 'TAR', description: 'TAR' }); + } else if (args.hypervisor == "Hyperv") { + items.push({ + id: 'VHD', + description: 'VHD' + }); } args.response.success({ data: items @@ -436,6 +479,16 @@ }); } + + //XenServer only (starts here) + if (args.$form.find('.form-item[rel=xenserverToolsVersion61plus]').css("display") != "none") { + $.extend(data, { + 'details[0].hypervisortoolsversion': (args.data.xenserverToolsVersion61plus == "on") ? "xenserver61" : "xenserver56" + }); + } + //XenServer only (ends here) + + //VMware only (starts here) if (args.$form.find('.form-item[rel=rootDiskControllerType]').css("display") != "none" && args.data.rootDiskControllerType != "") { $.extend(data, { @@ -547,7 +600,7 @@ //***** updateTemplate ***** var data = { id: args.context.templates[0].id, - zoneid: args.context.templates[0].zoneid, + //zoneid: args.context.templates[0].zoneid, //can't update template/ISO in only one zone. It always get updated in all zones. name: args.data.name, displaytext: args.data.displaytext, ostypeid: args.data.ostypeid, @@ -567,7 +620,7 @@ //***** updateTemplatePermissions ***** var data = { id: args.context.templates[0].id, - zoneid: args.context.templates[0].zoneid + //zoneid: args.context.templates[0].zoneid //can't update template/ISO in only one zone. It always get updated in all zones. }; //if args.data.ispublic is undefined, do not pass ispublic to API call. @@ -609,6 +662,53 @@ } }); + + //***** addResourceDetail ***** + //XenServer only (starts here) + if(args.$detailView.find('form').find('div .detail-group').find('.xenserverToolsVersion61plus').length > 0) { + $.ajax({ + url: createURL('addResourceDetail'), + data: { + resourceType: 'template', + resourceId: args.context.templates[0].id, + 'details[0].key': 'hypervisortoolsversion', + 'details[0].value': (args.data.xenserverToolsVersion61plus == "on") ? 'xenserver61' : 'xenserver56' + }, + success: function(json) { + var jobId = json.addResourceDetailresponse.jobid; + var addResourceDetailIntervalID = setInterval(function() { + $.ajax({ + url: createURL("queryAsyncJobResult&jobid=" + jobId), + dataType: "json", + success: function(json) { + var result = json.queryasyncjobresultresponse; + + if (result.jobstatus == 0) { + return; //Job has not completed + } else { + clearInterval(addResourceDetailIntervalID); + + if (result.jobstatus == 1) { + //do nothing + } else if (result.jobstatus == 2) { + cloudStack.dialog.notice({ + message: "Failed to update XenServer Tools Version 6.1+ field. Error: " + _s(result.jobresult.errortext) + }); + } + } + }, + error: function(XMLHttpResponse) { + cloudStack.dialog.notice({ + message: "Failed to update XenServer Tools Version 6.1+ field. Error: " + parseXMLHttpResponse(XMLHttpResponse) + }); + } + }); + }, g_queryAsyncJobResultInterval); + } + }); + } + //XenServer only (ends here) + //***** listTemplates ***** //So, we call listTemplates API to get a complete template object @@ -797,8 +897,13 @@ if (isAdmin()) { hiddenFields = []; } else { - hiddenFields = ["hypervisor"]; + hiddenFields = ["hypervisor", 'xenserverToolsVersion61plus']; } + + if ('templates' in args.context && args.context.templates[0].hypervisor != 'XenServer') { + hiddenFields.push('xenserverToolsVersion61plus'); + } + return hiddenFields; }, @@ -830,6 +935,17 @@ hypervisor: { label: 'label.hypervisor' }, + xenserverToolsVersion61plus: { + label: 'XenServer Tools Version 6.1+', + isBoolean: true, + isEditable: function () { + if (isAdmin()) + return true; + else + return false; + }, + converter: cloudStack.converters.toBooleanText + }, templatetype: { label: 'label.type' }, @@ -956,13 +1072,21 @@ url: createURL(apiCmd), dataType: "json", success: function(json) { - args.response.success({ - actionFilter: templateActionfilter, - data: json.listtemplatesresponse.template[0] - }); + var jsonObj = json.listtemplatesresponse.template[0]; + + if ('details' in jsonObj && 'hypervisortoolsversion' in jsonObj.details) { + if (jsonObj.details.hypervisortoolsversion == 'xenserver61') + jsonObj.xenserverToolsVersion61plus = true; + else + jsonObj.xenserverToolsVersion61plus = false; + } + + args.response.success({ + actionFilter: templateActionfilter, + data: jsonObj + }); } }); - } } } @@ -1295,7 +1419,7 @@ //***** updateIso ***** var data = { id: args.context.isos[0].id, - zoneid: args.context.isos[0].zoneid, + //zoneid: args.context.isos[0].zoneid, //can't update template/ISO in only one zone. It always get updated in all zones. name: args.data.name, displaytext: args.data.displaytext, ostypeid: args.data.ostypeid @@ -1313,7 +1437,7 @@ //***** updateIsoPermissions ***** var data = { id: args.context.isos[0].id, - zoneid: args.context.isos[0].zoneid, + //zoneid: args.context.isos[0].zoneid //can't update template/ISO in only one zone. It always get updated in all zones. }; //if args.data.ispublic is undefined, do not pass ispublic to API call. if (args.data.ispublic == "on") { diff --git a/ui/scripts/ui-custom/granularSettings.js b/ui/scripts/ui-custom/granularSettings.js index 06aa82a20dd..5ab60b7af97 100644 --- a/ui/scripts/ui-custom/granularSettings.js +++ b/ui/scripts/ui-custom/granularSettings.js @@ -29,6 +29,9 @@ name: { label: 'label.name' }, + description: { + label: 'label.description' + }, value: { label: 'label.value', editable: true diff --git a/ui/scripts/ui-custom/instanceWizard.js b/ui/scripts/ui-custom/instanceWizard.js index f4d4560b60f..4368ec47ac3 100644 --- a/ui/scripts/ui-custom/instanceWizard.js +++ b/ui/scripts/ui-custom/instanceWizard.js @@ -267,6 +267,8 @@ }, 'select-iso': function($step, formData) { + $step.find('.section.custom-size').hide(); + var originalValues = function(formData) { var $inputs = $step.find('.wizard-step-conditional:visible') .find('input[type=radio]'); @@ -288,7 +290,7 @@ return { response: { - success: function(args) { + success: function(args) { if (formData['select-template']) { $step.find('.wizard-step-conditional').filter(function() { return $(this).hasClass(formData['select-template']); @@ -380,6 +382,21 @@ }); originalValues(formData); + + var custom = args.customHidden({ + context: context, + data: args.data + }); + + $step.find('.custom-size-label').remove(); + + if (!custom) { + $step.find('.section.custom-size').show(); + $step.addClass('custom-disk-size'); + } else { + $step.find('.section.custom-size').hide(); + $step.removeClass('custom-disk-size'); + } } } }; @@ -407,7 +424,27 @@ }, { 'wizard-field': 'service-offering' }) - ); + ); + + $step.find('input[type=radio]').bind('change', function() { + var $target = $(this); + var val = $target.val(); + var item = $.grep(args.data.serviceOfferings, function(elem) { + return elem.id == val; + })[0]; + + if (!item) return true; + + var custom = item[args.customFlag]; + + if (custom) { + $step.addClass('custom-size'); + } else { + $step.removeClass('custom-size'); + } + + return true; + }); originalValues(formData); } @@ -736,16 +773,23 @@ filterNetworkList(-1); // Security groups (alt. page) - $step.find('.security-groups .select-container').append( - makeSelects('security-groups', args.data.securityGroups, { - name: 'name', - desc: 'description', - id: 'id' - }, { - type: 'checkbox', - 'wizard-field': 'security-groups' - }) - ); + var $sgSelects = makeSelects('security-groups', args.data.securityGroups, { + name: 'name', + desc: 'description', + id: 'id' + }, { + type: 'checkbox', + 'wizard-field': 'security-groups' + }); + $step.find('.security-groups .select-container').append($sgSelects); + + //If there is only one security group and the only one is 'default', make it selected by default + if ($sgSelects.length == 1) { + var $firstCheckbox = $sgSelects.eq(0); + if ($firstCheckbox.find('div .name').text() == 'default') { + $firstCheckbox.find('input:checkbox').click(); + } + } originalValues(formData); checkShowAddNetwork($newNetwork); @@ -1007,20 +1051,24 @@ // Setup tabs and slider $wizard.find('.section.custom-size .size.max span').html(maxCustomDiskSize); $wizard.find('.tab-view').tabs(); - $wizard.find('.slider').slider({ - min: 1, - max: maxCustomDiskSize, - start: function(event) { - $wizard.find('div.data-disk-offering div.custom-size input[type=radio]').click(); - }, - slide: function(event, ui) { - $wizard.find('div.data-disk-offering div.custom-size input[type=text]').val( - ui.value - ); - $wizard.find('div.data-disk-offering span.custom-disk-size').html( - ui.value - ); - } + $wizard.find('.slider').each(function() { + var $slider = $(this); + + $slider.slider({ + min: 1, + max: maxCustomDiskSize, + start: function(event) { + $slider.closest('.section.custom-size').find('input[type=radio]').click(); + }, + slide: function(event, ui) { + $slider.closest('.section.custom-size').find('input[type=text]').val( + ui.value + ); + $slider.closest('.step').find('span.custom-disk-size').html( + ui.value + ); + } + }); }); $wizard.find('div.data-disk-offering div.custom-size input[type=text]').bind('change', function() { diff --git a/ui/scripts/ui-custom/physicalResources.js b/ui/scripts/ui-custom/physicalResources.js index f1492eb2405..fcc2f6ab5bd 100644 --- a/ui/scripts/ui-custom/physicalResources.js +++ b/ui/scripts/ui-custom/physicalResources.js @@ -38,6 +38,30 @@ var $elem = $dashboard.find('[data-item=' + key + ']'); $elem.hide().html(value).fadeIn(); }); + + // Socket info + var $socketInfo = $dashboard.find('.socket-info ul'); + $socketInfo.find('li').remove(); // Clean up + $(args.data.socketInfo).each(function() { + var item = this; + var name = item.name; + var hosts = item.hosts; + var sockets = item.sockets; + + var $li = $('
  • ').append( + $('
    ').addClass('name').html(name), + $('
    ').addClass('hosts').append( + $('
    ').addClass('title').html(_l('label.hosts')), + $('
    ').addClass('value').html(hosts) + ), + $('
    ').addClass('sockets').append( + $('
    ').addClass('title').html(_l('label.sockets')), + $('
    ').addClass('value').html(sockets) + ) + ); + + $li.appendTo($socketInfo); + }); } } }); diff --git a/ui/scripts/ui-custom/zoneWizard.js b/ui/scripts/ui-custom/zoneWizard.js index cf52107ed33..28df1933fb2 100644 --- a/ui/scripts/ui-custom/zoneWizard.js +++ b/ui/scripts/ui-custom/zoneWizard.js @@ -715,10 +715,12 @@ $('