Merge branch 'master' into javelin

This commit is contained in:
Alex Huang 2012-09-06 16:31:32 -07:00
commit a1bd6d7eb7
30 changed files with 629 additions and 6062 deletions

41
LICENSE
View File

@ -375,7 +375,7 @@ Within the deps/awsapi-lib directory
cloud-gson.jar http://code.google.com/p/google-gson/
licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above)
$license.CopyrightNotice
from Json.simple Project http://code.google.com/p/json-simple/
json_simple-1.1.jar http://code.google.com/p/json-simple/source/checkout
@ -2368,7 +2368,8 @@ Within the target/jar directory
cloud-backport-util-concurrent-3.0.jar
licensed under the Apache License, Version 1.1 http://www.apache.org/licenses/LICENSE-1.1 (as follows)
Copyright © 2012 The Apache Software Foundation
/* ====================================================================
* The Apache Software License, Version 1.1
@ -2462,11 +2463,42 @@ Within the target/jar directory
cloud-google-gson-1.7.1.jar http://code.google.com/p/google-gson/
licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above)
$license.CopyrightNotice
from Jetty Committers http://jetty.codehaus.org/jetty/
jetty-6.1.26.jar http://repo1.maven.org/maven2/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26-sources.jar
jetty-util-6.1.26.jar http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar
licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows)
Copyright © 2009, Caringo, Inc.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
from Caringo, Inc. http://www.caringo.com/
CAStorSDK.jar http://www.castor.org/download.html
licensed under the BSD (3-clause) http://www.opensource.org/licenses/BSD-3-Clause (as follows)
Copyright © 2002-2011 Atsuhiko Yamanaka, JCraft,Inc.
@ -4118,7 +4150,8 @@ Within the ui/lib/jquery-ui directory
Within the ui/lib/qunit directory
licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows)
Copyright © 2012 John Resig, Jörn Zaefferer
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the

View File

@ -1,370 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.agent.vmdata;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.File;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.codec.binary.Base64;
import org.apache.log4j.Logger;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Handler;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.handler.DefaultHandler;
import org.mortbay.jetty.handler.HandlerList;
import org.mortbay.jetty.handler.ResourceHandler;
import org.mortbay.jetty.nio.SelectChannelConnector;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.ServletHolder;
import org.mortbay.thread.QueuedThreadPool;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.routing.VmDataCommand;
import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.network.Networks.TrafficType;
import com.cloud.storage.JavaStorageLayer;
import com.cloud.storage.StorageLayer;
import com.cloud.utils.net.NetUtils;
import com.cloud.utils.script.Script;
/**
* Serves vm data using embedded Jetty server
*
*/
@Local(value = { VmDataServer.class })
public class JettyVmDataServer implements VmDataServer {
private static final Logger s_logger = Logger
.getLogger(JettyVmDataServer.class);
public static final String USER_DATA = "user-data";
public static final String META_DATA = "meta-data";
protected String _vmDataDir;
protected Server _jetty;
protected String _hostIp;
protected Map<String, String> _ipVmMap = new HashMap<String, String>();
protected StorageLayer _fs = new JavaStorageLayer();
public class VmDataServlet extends HttpServlet {
private static final long serialVersionUID = -1640031398971742349L;
JettyVmDataServer _vmDataServer;
String _dataType; // userdata or meta-data
public VmDataServlet(JettyVmDataServer dataServer, String dataType) {
this._vmDataServer = dataServer;
this._dataType = dataType;
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
int port = req.getServerPort();
if (port != 80 && port != 8000) {
resp.sendError(HttpServletResponse.SC_NOT_FOUND,
"Request not understood");
return;
}
if (_dataType.equalsIgnoreCase(USER_DATA)) {
handleUserData(req, resp);
} else if (_dataType.equalsIgnoreCase(META_DATA)) {
handleMetaData(req, resp);
}
}
protected void handleUserData(HttpServletRequest req,
HttpServletResponse resp) throws ServletException, IOException {
String metadataItem = req.getPathInfo();
String requester = req.getRemoteAddr();
resp.setContentType("text/html");
resp.setStatus(HttpServletResponse.SC_OK);
String data = null;
if (metadataItem != null) {
String[] path = metadataItem.split("/");
if (path.length > 1) {
metadataItem = path[1];
}
}
if (metadataItem != null)
data = _vmDataServer.getVmDataItem(requester, metadataItem);
if (data != null) {
resp.getWriter().print(data);
} else {
resp.setStatus(HttpServletResponse.SC_NOT_FOUND);
resp.sendError(HttpServletResponse.SC_NOT_FOUND,
"Request not found");
}
}
protected void handleMetaData(HttpServletRequest req,
HttpServletResponse resp) throws ServletException, IOException {
String metadataItem = req.getPathInfo();
String requester = req.getRemoteAddr();
resp.setContentType("text/html");
resp.setStatus(HttpServletResponse.SC_OK);
String metaData = _vmDataServer.getVmDataItem(requester,
metadataItem);
if (metaData != null) {
resp.getWriter().print(
_vmDataServer.getVmDataItem(requester, metadataItem));
} else {
resp.sendError(HttpServletResponse.SC_NOT_FOUND,
"Request not found");
}
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
boolean success = true;
try {
int vmDataPort = 80;
int fileservingPort = 8000;
_vmDataDir = (String) params.get("vm.data.dir");
String port = (String) params.get("vm.data.port");
if (port != null) {
vmDataPort = Integer.parseInt(port);
}
port = (String) params.get("file.server.port");
if (port != null) {
fileservingPort = Integer.parseInt(port);
}
_hostIp = (String) params.get("host.ip");
if (_vmDataDir == null) {
_vmDataDir = "/var/www/html";
}
success = _fs.mkdirs(_vmDataDir);
success = success && buildIpVmMap();
if (success) {
setupJetty(vmDataPort, fileservingPort);
}
} catch (Exception e) {
s_logger.warn("Failed to configure jetty", e);
throw new ConfigurationException("Failed to configure jetty!!");
}
return success;
}
protected boolean buildIpVmMap() {
String[] dirs = _fs.listFiles(_vmDataDir);
for (String dir : dirs) {
String[] path = dir.split("/");
String vm = path[path.length - 1];
if (vm.startsWith("i-")) {
String[] dataFiles = _fs.listFiles(dir);
for (String dfile : dataFiles) {
String path2[] = dfile.split("/");
String ipv4file = path2[path2.length - 1];
if (ipv4file.equalsIgnoreCase("local-ipv4")) {
try {
BufferedReader input = new BufferedReader(
new FileReader(dfile));
String line = null;
while ((line = input.readLine()) != null) {
if (NetUtils.isValidIp(line)) {
_ipVmMap.put(line, vm);
s_logger.info("Found ip " + line
+ " for vm " + vm);
} else {
s_logger.info("Invalid ip " + line
+ " for vm " + vm);
}
}
} catch (FileNotFoundException e) {
s_logger.warn("Failed to find file " + dfile);
} catch (IOException e) {
s_logger.warn("Failed to get ip address of " + vm);
}
}
}
}
}
return true;
}
public String getVmDataItem(String requester, String dataItem) {
String vmName = _ipVmMap.get(requester);
if (vmName == null) {
return null;
}
String vmDataFile = _vmDataDir + File.separator + vmName
+ File.separator + dataItem;
try {
BufferedReader input = new BufferedReader(
new FileReader(vmDataFile));
StringBuilder result = new StringBuilder();
String line = null;
while ((line = input.readLine()) != null) {
result.append(line);
}
input.close();
return result.toString();
} catch (FileNotFoundException e) {
s_logger.warn("Failed to find requested file " + vmDataFile);
return null;
} catch (IOException e) {
s_logger.warn("Failed to read requested file " + vmDataFile);
return null;
}
}
private void setupJetty(int vmDataPort, int fileservingPort)
throws Exception {
_jetty = new Server();
SelectChannelConnector connector0 = new SelectChannelConnector();
connector0.setHost(_hostIp);
connector0.setPort(fileservingPort);
connector0.setMaxIdleTime(30000);
connector0.setRequestBufferSize(8192);
SelectChannelConnector connector1 = new SelectChannelConnector();
connector1.setHost(_hostIp);
connector1.setPort(vmDataPort);
connector1.setThreadPool(new QueuedThreadPool(5));
connector1.setMaxIdleTime(30000);
connector1.setRequestBufferSize(8192);
_jetty.setConnectors(new Connector[] { connector0, connector1 });
Context root = new Context(_jetty, "/latest", Context.SESSIONS);
root.setResourceBase(_vmDataDir);
root.addServlet(new ServletHolder(new VmDataServlet(this, USER_DATA)),
"/*");
ResourceHandler resource_handler = new ResourceHandler();
resource_handler.setResourceBase("/var/lib/images/");
HandlerList handlers = new HandlerList();
handlers.setHandlers(new Handler[] { root, resource_handler,
new DefaultHandler() });
_jetty.setHandler(handlers);
_jetty.start();
// _jetty.join();
}
@Override
public boolean start() {
// TODO Auto-generated method stub
return false;
}
@Override
public boolean stop() {
return true;
}
@Override
public String getName() {
return "JettyVmDataServer";
}
@Override
public Answer handleVmDataCommand(VmDataCommand cmd) {
String vmDataDir = _vmDataDir + File.separator + cmd.getVmName();
Script.runSimpleBashScript("rm -rf " + vmDataDir);
_fs.mkdirs(vmDataDir);
for (String[] item : cmd.getVmData()) {
try {
_fs.create(vmDataDir, item[1]);
String vmDataFile = vmDataDir + File.separator + item[1];
byte[] data;
if (item[2] != null) {
if (item[1].equals("user-data")) {
data = Base64.decodeBase64(item[2]);
} else {
data = item[2].getBytes();
}
if (data != null && data.length > 0) {
FileOutputStream writer = new FileOutputStream(
vmDataFile);
writer.write(data);
writer.close();
}
}
} catch (IOException e) {
s_logger.warn("Failed to write vm data item " + item[1], e);
return new Answer(cmd, false, "Failed to write vm data item "
+ item[1]);
}
}
return new Answer(cmd);
}
@Override
public void handleVmStarted(VirtualMachineTO vm) {
for (NicTO nic : vm.getNics()) {
if (nic.getType() == TrafficType.Guest) {
if (nic.getIp() != null) {
String ipv4File = _vmDataDir + File.separator
+ vm.getName() + File.separator + "local-ipv4";
try {
_fs.create(_vmDataDir + File.separator + vm.getName(),
"local-ipv4");
BufferedWriter writer = new BufferedWriter(
new FileWriter(ipv4File));
writer.write(nic.getIp());
_ipVmMap.put(nic.getIp(), vm.getName());
writer.close();
} catch (IOException e) {
s_logger.warn(
"Failed to create or write to local-ipv4 file "
+ ipv4File, e);
}
}
}
}
}
@Override
public void handleVmStopped(String vmName) {
String vmDataDir = _vmDataDir + File.separator + vmName;
Script.runSimpleBashScript("rm -rf " + vmDataDir);
}
}

View File

@ -94,6 +94,11 @@
<artifactId>jasypt</artifactId>
<version>${cs.jasypt.version}</version>
</dependency>
<dependency>
<groupId>com.caringo.client</groupId>
<artifactId>CAStorSDK</artifactId>
<version>1.3.1-CS40</version>
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,20 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- This file was auto-generated from WSDL -->
<!-- by the Apache Axis2 version: 1.5 Built on : Apr 30, 2009 (06:07:24 EDT) -->
<serviceGroup>

View File

@ -0,0 +1,479 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.bridge.io;
import java.util.Arrays;
import java.util.HashSet;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import javax.activation.DataHandler;
import javax.activation.DataSource;
import org.apache.log4j.Logger;
import com.cloud.bridge.service.core.s3.S3BucketAdapter;
import com.cloud.bridge.service.core.s3.S3MultipartPart;
import com.cloud.bridge.service.exception.ConfigurationException;
import com.cloud.bridge.service.exception.FileNotExistException;
import com.cloud.bridge.service.exception.InternalErrorException;
import com.cloud.bridge.service.exception.OutOfStorageException;
import com.cloud.bridge.service.exception.UnsupportedException;
import com.cloud.bridge.util.StringHelper;
import com.cloud.bridge.util.OrderedPair;
import com.caringo.client.locate.Locator;
import com.caringo.client.locate.StaticLocator;
import com.caringo.client.locate.ZeroconfLocator;
import com.caringo.client.ResettableFileInputStream;
import com.caringo.client.ScspClient;
import com.caringo.client.ScspExecutionException;
import com.caringo.client.ScspHeaders;
import com.caringo.client.ScspQueryArgs;
import com.caringo.client.ScspResponse;
import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.Header;
/**
* Creates an SCSP client to a CAStor cluster, configured in "storage.root",
* and use CAStor as the back-end storage instead of a file system.
*/
public class S3CAStorBucketAdapter implements S3BucketAdapter {
protected final static Logger s_logger = Logger.getLogger(S3CAStorBucketAdapter.class);
private static final int HTTP_OK = 200;
private static final int HTTP_CREATED = 201;
private static final int HTTP_UNSUCCESSFUL = 300;
private static final int HTTP_PRECONDITION_FAILED = 412;
// For ScspClient
private static final int DEFAULT_SCSP_PORT = 80;
private static final int DEFAULT_MAX_POOL_SIZE = 50;
private static final int DEFAULT_MAX_RETRIES = 5;
private static final int CONNECTION_TIMEOUT = 60 * 1000; // Request activity timeout - 1 minute
private static final int CM_IDLE_TIMEOUT = 60 * 1000; // HttpConnectionManager idle timeout - 1 minute
private static final int LOCATOR_RETRY_TIMEOUT = 0; // StaticLocator pool retry timeout
private ScspClient _scspClient; // talks to CAStor cluster
private Locator _locator; // maintains list of CAStor nodes
private String _domain; // domain where all CloudStack streams will live
private synchronized ScspClient myClient(String mountedRoot) {
if (_scspClient!=null) {
return _scspClient;
}
// The castor cluster is specified either by listing the ip addresses of some nodes, or
// by specifying "zeroconf=" and the cluster's mdns name -- this is "cluster" in castor's node.cfg.
// The "domain" to store streams can be specified. If not specified, streams will be written
// without a "domain" query arg, so they will go into the castor default domain.
// The port is optional and must be at the end of the config string, defaults to 80.
// Examples: "castor 172.16.78.130 172.16.78.131 80", "castor 172.16.78.130 domain=mycluster.example.com",
// "castor zeroconf=mycluster.example.com domain=mycluster.example.com 80"
String[] cfg = mountedRoot.split(" ");
int numIPs = cfg.length-1;
String possiblePort = cfg[cfg.length-1];
int castorPort = DEFAULT_SCSP_PORT;
try {
castorPort = Integer.parseInt(possiblePort);
--numIPs;
} catch (NumberFormatException nfe) {
// okay, it's an ip address, not a port number
}
if (numIPs <= 0) {
throw new ConfigurationException("No CAStor nodes specified in '" + mountedRoot + "'");
}
HashSet<String> ips = new HashSet<String>();
String clusterName = null;
for ( int i = 0; i < numIPs; ++i ) {
String option = cfg[i+1]; // ip address or zeroconf=mycluster.example.com or domain=mydomain.example.com
if (option.toLowerCase().startsWith("zeroconf=")) {
String[] confStr = option.split("=");
if (confStr.length != 2) {
throw new ConfigurationException("Could not parse cluster name from '" + option + "'");
}
clusterName = confStr[1];
} else if (option.toLowerCase().startsWith("domain=")) {
String[] confStr = option.split("=");
if (confStr.length != 2) {
throw new ConfigurationException("Could not parse domain name from '" + option + "'");
}
_domain = confStr[1];
} else {
ips.add(option);
}
}
if (clusterName == null && ips.isEmpty()) {
throw new ConfigurationException("No CAStor nodes specified in '" + mountedRoot + "'");
}
String[] castorNodes = ips.toArray(new String[0]); // list of configured nodes
if (clusterName == null) {
try {
_locator = new StaticLocator(castorNodes, castorPort, LOCATOR_RETRY_TIMEOUT);
_locator.start();
} catch (IOException e) {
throw new ConfigurationException("Could not create CAStor static locator for '" +
Arrays.toString(castorNodes) + "'");
}
} else {
try {
clusterName = clusterName.replace(".", "_"); // workaround needed for CAStorSDK 1.3.1
_locator = new ZeroconfLocator(clusterName);
_locator.start();
} catch (IOException e) {
throw new ConfigurationException("Could not create CAStor zeroconf locator for '" + clusterName + "'");
}
}
try {
s_logger.info("CAStor client starting: " + (_domain==null ? "default domain" : "domain " + _domain) + " " + (clusterName==null ? Arrays.toString(castorNodes) : clusterName) + " :" + castorPort);
_scspClient = new ScspClient(_locator, castorPort, DEFAULT_MAX_POOL_SIZE, DEFAULT_MAX_RETRIES, CONNECTION_TIMEOUT, CM_IDLE_TIMEOUT);
_scspClient.start();
} catch (Exception e) {
s_logger.error("Unable to create CAStor client for '" + mountedRoot + "': " + e.getMessage(), e);
throw new ConfigurationException("Unable to create CAStor client for '" + mountedRoot + "': " + e);
}
return _scspClient;
}
private String castorURL(String mountedRoot, String bucket, String fileName) {
// TODO: Replace this method with access to ScspClient's Locator,
// or add read method that returns the body as an unread
// InputStream for use by loadObject() and loadObjectRange().
myClient(mountedRoot); // make sure castorNodes and castorPort initialized
InetSocketAddress nodeAddr = _locator.locate();
if (nodeAddr == null) {
throw new ConfigurationException("Unable to locate CAStor node with locator " + _locator);
}
InetAddress nodeInetAddr = nodeAddr.getAddress();
if (nodeInetAddr == null) {
_locator.foundDead(nodeAddr);
throw new ConfigurationException("Unable to resolve CAStor node name '" + nodeAddr.getHostName() +
"' to IP address");
}
return "http://" + nodeInetAddr.getHostAddress() + ":" + nodeAddr.getPort() + "/" + bucket + "/" + fileName +
(_domain==null ? "" : "?domain=" + _domain);
}
private ScspQueryArgs domainQueryArg() {
ScspQueryArgs qa = new ScspQueryArgs();
if (this._domain != null)
qa.setValue("domain", this._domain);
return qa;
}
public S3CAStorBucketAdapter() {
// TODO: is there any way to initialize CAStor client here, can it
// get to config?
}
@Override
public void createContainer(String mountedRoot, String bucket) {
try {
ScspResponse bwResponse = myClient(mountedRoot).write(bucket, new ByteArrayInputStream("".getBytes()), 0, domainQueryArg(), new ScspHeaders());
if (bwResponse.getHttpStatusCode() != HTTP_CREATED) {
if (bwResponse.getHttpStatusCode() == HTTP_PRECONDITION_FAILED)
s_logger.error("CAStor unable to create bucket " + bucket + " because domain " +
(this._domain==null ? "(default)" : this._domain) + " does not exist");
else
s_logger.error("CAStor unable to create bucket " + bucket + ": " + bwResponse.getHttpStatusCode());
throw new OutOfStorageException("CAStor unable to create bucket " + bucket + ": " +
bwResponse.getHttpStatusCode());
}
} catch (ScspExecutionException e) {
s_logger.error("CAStor unable to create bucket " + bucket, e);
throw new OutOfStorageException("CAStor unable to create bucket " + bucket + ": " + e.getMessage());
}
}
@Override
public void deleteContainer(String mountedRoot, String bucket) {
try {
ScspResponse bwResponse = myClient(mountedRoot).delete("", bucket, domainQueryArg(), new ScspHeaders());
if (bwResponse.getHttpStatusCode() >= HTTP_UNSUCCESSFUL) {
s_logger.error("CAStor unable to delete bucket " + bucket + ": " + bwResponse.getHttpStatusCode());
throw new OutOfStorageException("CAStor unable to delete bucket " + bucket + ": " +
bwResponse.getHttpStatusCode());
}
} catch (ScspExecutionException e) {
s_logger.error("CAStor unable to delete bucket " + bucket, e);
throw new OutOfStorageException("CAStor unable to delete bucket " + bucket + ": " + e.getMessage());
}
}
@Override
public String saveObject(InputStream is, String mountedRoot, String bucket, String fileName)
{
// TODO: Currently this writes the object to a temporary file,
// so that the MD5 can be computed and so that we have the
// stream length needed by this version of CAStor SDK. Will
// change to calculate MD5 while streaming to CAStor and to
// either pass Content-length to this method or use newer SDK
// that doesn't require it.
FileOutputStream fos = null;
MessageDigest md5 = null;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
s_logger.error("Unexpected exception " + e.getMessage(), e);
throw new InternalErrorException("Unable to get MD5 MessageDigest", e);
}
File spoolFile = null;
try {
spoolFile = File.createTempFile("castor", null);
} catch (IOException e) {
s_logger.error("Unexpected exception creating temporary CAStor spool file: " + e.getMessage(), e);
throw new InternalErrorException("Unable to create temporary CAStor spool file", e);
}
try {
String retVal;
int streamLen = 0;
try {
fos = new FileOutputStream(spoolFile);
byte[] buffer = new byte[4096];
int len = 0;
while( (len = is.read(buffer)) > 0) {
fos.write(buffer, 0, len);
streamLen = streamLen + len;
md5.update(buffer, 0, len);
}
//Convert MD5 digest to (lowercase) hex String
retVal = StringHelper.toHexString(md5.digest());
} catch(IOException e) {
s_logger.error("Unexpected exception " + e.getMessage(), e);
throw new OutOfStorageException(e);
} finally {
try {
if (null != fos)
fos.close();
} catch( Exception e ) {
s_logger.error("Can't close CAStor spool file " +
spoolFile.getAbsolutePath() + ": " + e.getMessage(), e);
throw new OutOfStorageException("Unable to close CAStor spool file: " + e.getMessage(), e);
}
}
try {
ScspResponse bwResponse =
myClient(mountedRoot).write(bucket + "/" + fileName,
new ResettableFileInputStream(spoolFile), streamLen,
domainQueryArg(), new ScspHeaders());
if (bwResponse.getHttpStatusCode() >= HTTP_UNSUCCESSFUL) {
s_logger.error("CAStor write responded with error " + bwResponse.getHttpStatusCode());
throw new OutOfStorageException("Unable to write object to CAStor " +
bucket + "/" + fileName + ": " + bwResponse.getHttpStatusCode());
}
} catch (ScspExecutionException e) {
s_logger.error("Unable to write object to CAStor " + bucket + "/" + fileName, e);
throw new OutOfStorageException("Unable to write object to CAStor " + bucket + "/" + fileName + ": " +
e.getMessage());
} catch (IOException ie) {
s_logger.error("Unable to write object to CAStor " + bucket + "/" + fileName, ie);
throw new OutOfStorageException("Unable to write object to CAStor " + bucket + "/" + fileName + ": " +
ie.getMessage());
}
return retVal;
} finally {
try {
if (!spoolFile.delete()) {
s_logger.error("Failed to delete CAStor spool file " + spoolFile.getAbsolutePath());
}
} catch (SecurityException e) {
s_logger.error("Unable to delete CAStor spool file " + spoolFile.getAbsolutePath(), e);
}
}
}
/**
* From a list of files (each being one part of the multipart upload), concatentate all files into a single
* object that can be accessed by normal S3 calls. This function could take a long time since a multipart is
* allowed to have upto 10,000 parts (each 5 gib long). Amazon defines that while this operation is in progress
* whitespace is sent back to the client inorder to keep the HTTP connection alive.
*
* @param mountedRoot - where both the source and dest buckets are located
* @param destBucket - resulting location of the concatenated objects
* @param fileName - resulting file name of the concatenated objects
* @param sourceBucket - special bucket used to save uploaded file parts
* @param parts - an array of file names in the sourceBucket
* @param client - if not null, then keep the servlet connection alive while this potentially long concatentation takes place
* @return OrderedPair with the first value the MD5 of the final object, and the second value the length of the final object
*/
@Override
public OrderedPair<String,Long> concatentateObjects(String mountedRoot, String destBucket, String fileName, String sourceBucket, S3MultipartPart[] parts, OutputStream client)
{
// TODO
throw new UnsupportedException("Multipart upload support not yet implemented in CAStor plugin");
/*
MessageDigest md5;
long totalLength = 0;
try {
md5 = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
s_logger.error("Unexpected exception " + e.getMessage(), e);
throw new InternalErrorException("Unable to get MD5 MessageDigest", e);
}
File file = new File(getBucketFolderDir(mountedRoot, destBucket) + File.separatorChar + fileName);
try {
// -> when versioning is off we need to rewrite the file contents
file.delete();
file.createNewFile();
final FileOutputStream fos = new FileOutputStream(file);
byte[] buffer = new byte[4096];
// -> get the input stream for the next file part
for( int i=0; i < parts.length; i++ )
{
DataHandler nextPart = loadObject( mountedRoot, sourceBucket, parts[i].getPath());
InputStream is = nextPart.getInputStream();
int len = 0;
while( (len = is.read(buffer)) > 0) {
fos.write(buffer, 0, len);
md5.update(buffer, 0, len);
totalLength += len;
}
is.close();
// -> after each file write tell the client we are still here to keep connection alive
if (null != client) {
client.write( new String(" ").getBytes());
client.flush();
}
}
fos.close();
return new OrderedPair<String, Long>(StringHelper.toHexString(md5.digest()), new Long(totalLength));
//Create an ordered pair whose first element is the MD4 digest as a (lowercase) hex String
}
catch(IOException e) {
s_logger.error("concatentateObjects unexpected exception " + e.getMessage(), e);
throw new OutOfStorageException(e);
}
*/
}
@Override
public DataHandler loadObject(String mountedRoot, String bucket, String fileName) {
try {
return new DataHandler(new URL(castorURL(mountedRoot, bucket, fileName)));
} catch (MalformedURLException e) {
s_logger.error("Failed to loadObject from CAStor", e);
throw new FileNotExistException("Unable to load object from CAStor: " + e.getMessage());
}
}
@Override
public void deleteObject(String mountedRoot, String bucket, String fileName) {
String filePath = bucket + "/" + fileName;
try {
ScspResponse bwResponse = myClient(mountedRoot).delete("", filePath, domainQueryArg(), new ScspHeaders());
if (bwResponse.getHttpStatusCode() != HTTP_OK) {
s_logger.error("CAStor delete object responded with error " + bwResponse.getHttpStatusCode());
throw new OutOfStorageException("CAStor unable to delete object " + filePath + ": " +
bwResponse.getHttpStatusCode());
}
} catch (ScspExecutionException e) {
s_logger.error("CAStor unable to delete object " + filePath, e);
throw new OutOfStorageException("CAStor unable to delete object " + filePath + ": " + e.getMessage());
}
}
public class ScspDataSource implements DataSource {
GetMethod method;
public ScspDataSource(GetMethod m) {
method = m;
}
@Override
public String getContentType() {
Header h = method.getResponseHeader("Content-type");
return h==null ? null : h.getValue();
}
@Override
public InputStream getInputStream() throws IOException {
try {
return method.getResponseBodyAsStream();
} catch (Exception e) {
s_logger.error("CAStor loadObjectRange getInputStream error", e);
return null;
}
}
@Override
public String getName() {
assert(false);
return null;
}
@Override
public OutputStream getOutputStream() throws IOException {
assert(false);
return null;
}
}
@Override
public DataHandler loadObjectRange(String mountedRoot, String bucket, String fileName, long startPos, long endPos) {
try {
HttpClient httpClient = new HttpClient();
// Create a method instance.
GetMethod method = new GetMethod(castorURL(mountedRoot, bucket, fileName));
method.addRequestHeader("Range", "bytes=" + startPos + "-" + endPos);
int statusCode = httpClient.executeMethod(method);
if (statusCode < HTTP_OK || statusCode >= HTTP_UNSUCCESSFUL) {
s_logger.error("CAStor loadObjectRange response: "+ statusCode);
throw new FileNotExistException("CAStor loadObjectRange response: " + statusCode);
}
return new DataHandler(new ScspDataSource(method));
} catch (Exception e) {
s_logger.error("CAStor loadObjectRange failure", e);
throw new FileNotExistException("CAStor loadObjectRange failure: " + e);
}
}
@Override
public String getBucketFolderDir(String mountedRoot, String bucket) {
// This method shouldn't be needed and doesn't need to use
// mountedRoot (which is CAStor config values here), right?
String bucketFolder = getBucketFolderName(bucket);
return bucketFolder;
}
private String getBucketFolderName(String bucket) {
// temporary
String name = bucket.replace(' ', '_');
name = bucket.replace('\\', '-');
name = bucket.replace('/', '-');
return name;
}
}

View File

@ -24,9 +24,11 @@ public interface SHost {
public static final int STORAGE_HOST_TYPE_LOCAL = 0;
public static final int STORAGE_HOST_TYPE_NFS = 1;
public static final int STORAGE_HOST_TYPE_CASTOR = 2;
public static enum StorageHostType {
STORAGE_HOST_TYPE_LOCAL, //0
STORAGE_HOST_TYPE_NFS //1
STORAGE_HOST_TYPE_NFS, //1
STORAGE_HOST_TYPE_CASTOR //2
}
/* private Long id;

View File

@ -243,7 +243,13 @@ public class ServiceProvider {
//PersistContext.flush();
String localStorageRoot = properties.getProperty("storage.root");
if (localStorageRoot != null) setupLocalStorage(localStorageRoot);
if (localStorageRoot != null) {
if (localStorageRoot.toLowerCase().startsWith("castor")) {
setupCAStorStorage(localStorageRoot);
} else {
setupLocalStorage(localStorageRoot);
}
}
multipartDir = properties.getProperty("storage.multipartDir");
@ -318,7 +324,20 @@ public class ServiceProvider {
}
}
public void shutdown() {
private void setupCAStorStorage(String storageRoot) {
SHostVO shost = shostDao.getLocalStorageHost(mhost.getId(), storageRoot);
if(shost == null) {
shost = new SHostVO();
shost.setMhost(mhost);
shost.setMhostid(mhost.getId());
shost.setHostType(SHost.STORAGE_HOST_TYPE_CASTOR);
shost.setHost(NetHelper.getHostName());
shost.setExportRoot(storageRoot);
shostDao.persist(shost);
}
}
public void shutdown() {
timer.cancel();
if(logger.isInfoEnabled())

View File

@ -39,6 +39,7 @@ import org.apache.log4j.Logger;
import org.json.simple.parser.ParseException;
import com.cloud.bridge.io.S3FileSystemBucketAdapter;
import com.cloud.bridge.io.S3CAStorBucketAdapter;
import com.cloud.bridge.model.BucketPolicyVO;
import com.cloud.bridge.model.MHostMountVO;
import com.cloud.bridge.model.MHostVO;
@ -115,6 +116,7 @@ public class S3Engine {
public S3Engine() {
bucketAdapters.put(SHost.STORAGE_HOST_TYPE_LOCAL, new S3FileSystemBucketAdapter());
bucketAdapters.put(SHost.STORAGE_HOST_TYPE_CASTOR, new S3CAStorBucketAdapter());
}
@ -1398,6 +1400,10 @@ public class S3Engine {
return new OrderedPair<SHostVO, String>(shost, shost.getExportRoot());
}
if(shost.getHostType() == SHost.STORAGE_HOST_TYPE_CASTOR ) {
return new OrderedPair<SHostVO, String>(shost, shost.getExportRoot());
}
MHostMountVO mount = mountDao.getHostMount(ServiceProvider.getInstance().getManagementHostId(), shost.getId());
if(mount != null) {
return new OrderedPair<SHostVO, String>(shost, mount.getMountPath());

View File

@ -157,7 +157,6 @@
<jar jarfile="${dist.files.dir}/cloud-ec2.aar" basedir="${target.dir}/classes/cloud-awsapi.jar" excludes="**/*">
<metainf dir="${base.dir}/awsapi/resource/AmazonEC2">
<include name="services.xml" />
<include name="AmazonEC2.wsdl" />
</metainf>
</jar>
</target>

View File

@ -180,9 +180,9 @@
</path>
<target name="compile-plugins" description="Compile all of the jars corresponding to plugins" depends="compile-utils, compile-api, compile-core, compile-server, compile-hypervisors, compile-deployment-planners, compile-host-allocators, compile-network-elements, compile-user-authenticators, compile-storage-allocators, compile-file-systems "/>
<target name="compile-plugins" description="Compile all of the jars corresponding to plugins" depends="compile-utils, compile-api, compile-core, compile-server, compile-hypervisors, compile-deployment-planners, compile-host-allocators, compile-network-elements, compile-user-authenticators, compile-storage-allocators"/>
<target name="build-plugins" depends="build-hypervisors, build-network-elements, build-deployment-planners, build-host-allocators, build-storage-allocators, build-user-authenticators, build-file-systems" description="Builds all jar's for the plug-in's"/>
<target name="build-plugins" depends="build-hypervisors, build-network-elements, build-deployment-planners, build-host-allocators, build-storage-allocators, build-user-authenticators" description="Builds all jar's for the plug-in's"/>
<target name="clean-plugins" description="Clean all of the generated files by the plugins">
<delete file="${build.log}" />
@ -194,8 +194,8 @@
<!-- ===================== Hypervisors ========================= -->
<target name="compile-hypervisors" depends="compile-kvm, compile-ovm, compile-xen, compile-vmware" description="Compile all hypervisors"/>
<target name="build-hypervisors" depends="build-kvm, build-ovm, build-xen, build-vmware" description="Builds all hypervisors"/>
<target name="compile-hypervisors" depends="compile-kvm, compile-ovm, compile-xen" description="Compile all hypervisors"/>
<target name="build-hypervisors" depends="build-kvm, build-ovm, build-xen " description="Builds all hypervisors"/>
<target name="compile-kvm" depends="-init, compile-core, compile-agent" description="Compile KVM">
<ant antfile="${base.dir}/plugins/hypervisors/kvm/build.xml" target="build"/>
@ -280,8 +280,8 @@
<!-- ===================== Network Elements ===================== -->
<target name="compile-network-elements" depends="compile-netscaler, compile-f5, compile-srx, compile-ovs, compile-elb, compile-nicira-nvp" description="Compile all network elements"/>
<target name="build-network-elements" depends="build-netscaler, build-f5, build-srx, build-ovs, build-elb, build-nicira-nvp" description="build all network elements"/>
<target name="compile-network-elements" depends="compile-ovs, compile-elb, compile-nicira-nvp" description="Compile all network elements"/>
<target name="build-network-elements" depends="build-ovs, build-elb, build-nicira-nvp" description="build all network elements"/>
<target name="compile-netscaler" depends="-init, compile-server" description="Compile NetScaler plugin">
<ant antfile="${base.dir}/plugins/network-elements/netscaler/build.xml" target="build"/>

View File

@ -204,7 +204,7 @@
<path refid="deps.classpath" />
<path refid="dist.classpath" />
</path>
<target name="compile-core" depends="-init, compile-utils, compile-api, compile-vmware-base" description="Compile the core business logic.">
<target name="compile-core" depends="-init, compile-utils, compile-api" description="Compile the core business logic.">
<compile-java jar.name="${core.jar}" top.dir="${core.dir}" classpath="core.classpath" />
</target>

View File

@ -73,6 +73,11 @@
<artifactId>cloud-plugin-hypervisor-ovm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-allocator-random</artifactId>
@ -99,43 +104,6 @@
<version>5.1.21</version>
<scope>runtime</scope>
</dependency>
<!-- Non-OSS deps -->
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-vmware</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-srx</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<version>${project.version}</version>
<exclusions>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-netapp</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-f5</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-netscaler</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>

View File

@ -114,7 +114,6 @@ under the License.
</adapters>
<adapters key="com.cloud.ha.Investigator">
<adapter name="SimpleInvestigator" class="com.cloud.ha.CheckOnAgentInvestigator"/>
<adapter name="VmwareInvestigator" class="com.cloud.ha.VmwareInvestigator"/>
<adapter name="XenServerInvestigator" class="com.cloud.ha.XenServerInvestigator"/>
<adapter name="PingInvestigator" class="com.cloud.ha.UserVmDomRInvestigator"/>
<adapter name="ManagementIPSysVMInvestigator" class="com.cloud.ha.ManagementIPSystemVMInvestigator"/>
@ -122,7 +121,6 @@ under the License.
<adapters key="com.cloud.ha.FenceBuilder">
<adapter name="XenServerFenceBuilder" class="com.cloud.ha.XenServerFencer"/>
<adapter name="KVMFenceBuilder" class="com.cloud.ha.KVMFencer"/>
<adapter name="VmwareFenceBuilder" class="com.cloud.ha.VmwareFencer"/>
<adapter name="OvmFenceBuilder" class="com.cloud.ovm.hypervisor.OvmFencer"/>
</adapters>
<adapters key="com.cloud.hypervisor.HypervisorGuru">
@ -133,7 +131,6 @@ under the License.
<adapter name="XCP Agent" class="com.cloud.hypervisor.xen.discoverer.XcpServerDiscoverer"/>
<adapter name="SecondaryStorage" class="com.cloud.storage.secondary.SecondaryStorageDiscoverer"/>
<adapter name="KVM Agent" class="com.cloud.hypervisor.kvm.discoverer.KvmServerDiscoverer"/>
<adapter name="VShpereServer" class="com.cloud.hypervisor.vmware.VmwareServerDiscoverer"/>
<adapter name="Bare Metal Agent" class="com.cloud.baremetal.BareMetalDiscoverer"/>
<adapter name="SCVMMServer" class="com.cloud.hypervisor.hyperv.HypervServerDiscoverer"/>
<adapter name="Ovm Discover" class="com.cloud.ovm.hypervisor.OvmDiscoverer" />
@ -153,15 +150,11 @@ under the License.
<adapter name="DomainChecker" class="com.cloud.acl.DomainChecker"/>
</adapters>
<adapters key="com.cloud.network.element.NetworkElement">
<adapter name="JuniperSRX" class="com.cloud.network.element.JuniperSRXExternalFirewallElement"/>
<adapter name="Netscaler" class="com.cloud.network.element.NetscalerElement"/>
<adapter name="F5BigIp" class="com.cloud.network.element.F5ExternalLoadBalancerElement"/>
<adapter name="VirtualRouter" class="com.cloud.network.element.VirtualRouterElement"/>
<adapter name="Ovs" class="com.cloud.network.element.OvsElement"/>
<adapter name="ExternalDhcpServer" class="com.cloud.network.element.ExternalDhcpElement"/>
<adapter name="BareMetal" class="com.cloud.network.element.BareMetalElement"/>
<adapter name="SecurityGroupProvider" class="com.cloud.network.element.SecurityGroupElement"/>
<adapter name="CiscoNexus1000vVSM" class="com.cloud.network.element.CiscoNexusVSMElement"/>
<adapter name="VpcVirtualRouter" class="com.cloud.network.element.VpcVirtualRouterElement"/>
<adapter name="NiciraNvp" class="com.cloud.network.element.NiciraNvpElement"/>
</adapters>
@ -171,7 +164,6 @@ under the License.
<adapters key="com.cloud.hypervisor.HypervisorGuru">
<adapter name="XenServerGuru" class="com.cloud.hypervisor.XenServerGuru"/>
<adapter name="KVMGuru" class="com.cloud.hypervisor.KVMGuru"/>
<adapter name="VMwareGuru" class="com.cloud.hypervisor.guru.VMwareGuru"/>
<adapter name="BareMetalGuru" class="com.cloud.baremetal.BareMetalGuru"/>
<adapter name="HypervGuru" class="com.cloud.hypervisor.guru.HypervGuru"/>
<adapter name="OvmGuru" class="com.cloud.ovm.hypervisor.OvmGuru" />
@ -179,25 +171,14 @@ under the License.
<adapters key="com.cloud.agent.StartupCommandProcessor">
<adapter name="BasicAgentAuthorizer" class="com.cloud.agent.manager.authn.impl.BasicAgentAuthManager"/>
</adapters>
<manager name="VmwareManager" key="com.cloud.hypervisor.vmware.manager.VmwareManager" class="com.cloud.hypervisor.vmware.manager.VmwareManagerImpl"/>
<manager name="OvsTunnelManager" key="com.cloud.network.ovs.OvsTunnelManager" class="com.cloud.network.ovs.OvsTunnelManagerImpl"/>
<manager name="ElasticLoadBalancerManager" key="com.cloud.network.lb.ElasticLoadBalancerManager" class="com.cloud.network.lb.ElasticLoadBalancerManagerImpl"/>
<pluggableservice name="VirtualRouterElementService" key="com.cloud.network.element.VirtualRouterElementService" class="com.cloud.network.element.VirtualRouterElement"/>
<pluggableservice name="NetscalerExternalLoadBalancerElementService" key="com.cloud.network.element.NetscalerLoadBalancerElementService" class="com.cloud.network.element.NetscalerElement"/>
<pluggableservice name="F5ExternalLoadBalancerElementService" key="com.cloud.network.element.F5ExternalLoadBalancerElementService" class="com.cloud.network.element.F5ExternalLoadBalancerElement"/>
<pluggableservice name="JuniperSRXFirewallElementService" key="com.cloud.network.element.JuniperSRXFirewallElementService" class="com.cloud.network.element.JuniperSRXExternalFirewallElement"/>
<pluggableservice name="CiscoNexusVSMElementService" key="com.cloud.network.element.CiscoNexusVSMElementService" class="com.cloud.network.element.CiscoNexusVSMElement"/>
<pluggableservice name="NiciraNvpElementService" key="com.cloud.network.element.NiciraNvpElementService" class="com.cloud.network.element.NiciraNvpElement"/>
<dao name="NetScalerPodDao" class="com.cloud.network.dao.NetScalerPodDaoImpl" singleton="false"/>
<dao name="CiscoNexusVSMDeviceDao" class="com.cloud.network.dao.CiscoNexusVSMDeviceDaoImpl" singleton="false"/>
<dao name="OvsTunnelInterfaceDao" class="com.cloud.network.ovs.dao.OvsTunnelInterfaceDaoImpl" singleton="false"/>
<dao name="OvsTunnelAccountDao" class="com.cloud.network.ovs.dao.OvsTunnelNetworkDaoImpl" singleton="false"/>
<dao name="NiciraNvpDao" class="com.cloud.network.dao.NiciraNvpDaoImpl" singleton="false"/>
<dao name="NiciraNvpNicMappingDao" class="com.cloud.network.dao.NiciraNvpNicMappingDaoImpl" singleton="false"/>
<dao name="NetappPool" class="com.cloud.netapp.dao.PoolDaoImpl" singleton="false"/>
<dao name="NetappVolume" class="com.cloud.netapp.dao.VolumeDaoImpl" singleton="false"/>
<dao name="NetappLun" class="com.cloud.netapp.dao.LunDaoImpl" singleton="false"/>
<manager name="NetappManager" key="com.cloud.netapp.NetappManager" class="com.cloud.netapp.NetappManagerImpl"/>
<dao name="ElasticLbVmMapDao" class="com.cloud.network.lb.dao.ElasticLbVmMapDaoImpl" singleton="false"/>
</management-server>
@ -253,4 +234,4 @@ under the License.
<dao name="UserCredentialsDao" class="com.cloud.bridge.persist.dao.UserCredentialsDaoImpl" singleton="false"/>
</awsapi-s3server>
</components.xml>
</components.xml>

View File

@ -433,24 +433,18 @@ fi
%files server
%defattr(0644,root,root,0755)
%{_javadir}/%{name}-server.jar
%{_javadir}/%{name}-vmware-base.jar
%{_javadir}/%{name}-ovm.jar
%{_javadir}/%{name}-dp-user-concentrated-pod.jar
%{_javadir}/%{name}-dp-user-dispersing.jar
%{_javadir}/%{name}-host-allocator-random.jar
%{_javadir}/%{name}-plugin-f5.jar
%{_javadir}/%{name}-plugin-netscaler.jar
%{_javadir}/%{name}-plugin-ovs.jar
%{_javadir}/%{name}-plugin-srx.jar
%{_javadir}/%{name}-storage-allocator-random.jar
%{_javadir}/%{name}-user-authenticator-ldap.jar
%{_javadir}/%{name}-user-authenticator-md5.jar
%{_javadir}/%{name}-user-authenticator-plaintext.jar
%{_javadir}/%{name}-vmware.jar
%{_javadir}/%{name}-xen.jar
%{_javadir}/%{name}-plugin-nicira-nvp.jar
%{_javadir}/%{name}-plugin-elb.jar
%{_javadir}/%{name}-plugin-netapp.jar
%{_javadir}/%{name}-plugin-nicira-nvp.jar
%config(noreplace) %{_sysconfdir}/%{name}/server/*
%files agent-scripts
@ -467,8 +461,6 @@ fi
%{_javadir}/commons-dbcp-1.4.jar
%{_javadir}/commons-pool-1.6.jar
%{_javadir}/gson-1.7.1.jar
%{_javadir}/netscaler-1.0.jar
%{_javadir}/netscaler-sdx-1.0.jar
%{_javadir}/backport-util-concurrent-3.1.jar
%{_javadir}/ehcache-1.5.0.jar
%{_javadir}/httpcore-4.0.jar
@ -481,19 +473,15 @@ fi
%{_javadir}/hibernate-commons-annotations-3.2.0.Final.jar
%{_javadir}/hibernate-annotations-3.5.1-Final.jar
%{_javadir}/asm-3.1.jar
%{_javadir}/xapi-5.6.100-1-20120825.123319-1.jar
%{_javadir}/xapi-5.6.100-1-SNAPSHOT.jar
%{_javadir}/log4j-*.jar
%{_javadir}/trilead-ssh2-build213-svnkit-1.3-patch.jar
%{_javadir}/cglib-2.2.jar
%{_javadir}/xmlrpc-common-3.*.jar
%{_javadir}/xmlrpc-client-3.*.jar
%{_javadir}/axis-1.4.jar
%{_javadir}/wsdl4j-1.6.2.jar
%{_javadir}/bcprov-jdk16-1.46.jar
%{_javadir}/jsch-0.1.42.jar
%{_javadir}/icontrol-1.0.jar
%{_javadir}/manageontap-1.0.jar
%{_javadir}/vmware*.jar
%{_javadir}/jasypt-1.*.jar
%{_javadir}/commons-configuration-1.8.jar
%{_javadir}/commons-lang-2.6.jar
@ -551,6 +539,7 @@ fi
%defattr(0644,root,root,0755)
%{_javadir}/%{name}-agent.jar
%{_javadir}/%{name}-plugin-hypervisor-kvm.jar
%{_javadir}/libvirt-0.4.8.jar
%files agent
%defattr(0644,root,root,0755)

View File

@ -48,11 +48,6 @@
<artifactId>cloud-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-vmware-base</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-agent</artifactId>

View File

@ -4,7 +4,6 @@ mvn install:install-file -Dfile=cloud-iControl.jar -DgroupId=com.cloud.com.
mvn install:install-file -Dfile=cloud-netscaler.jar -DgroupId=com.cloud.com.citrix -DartifactId=netscaler -Dversion=1.0 -Dpackaging=jar
mvn install:install-file -Dfile=cloud-netscaler-sdx.jar -DgroupId=com.cloud.com.citrix -DartifactId=netscaler-sdx -Dversion=1.0 -Dpackaging=jar
mvn install:install-file -Dfile=cloud-manageontap.jar -DgroupId=com.cloud.com.netapp -DartifactId=manageontap -Dversion=1.0 -Dpackaging=jar
mvn install:install-file -Dfile=libvirt-0.4.8.jar -DgroupId=org.libvirt -DartifactId=libvirt -Dversion=0.4.8 -Dpackaging=jar
mvn install:install-file -Dfile=vmware-vim.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim -Dversion=1.0 -Dpackaging=jar
mvn install:install-file -Dfile=vmware-vim25.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-vim25 -Dversion=1.0 -Dpackaging=jar
mvn install:install-file -Dfile=vmware-apputils.jar -DgroupId=com.cloud.com.vmware -DartifactId=vmware-apputils -Dversion=1.0 -Dpackaging=jar

35
deps/pom.xml vendored
View File

@ -73,6 +73,11 @@
<artifactId>cloud-plugin-hypervisor-ovm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-storage-allocator-random</artifactId>
@ -105,36 +110,6 @@
<scope>runtime</scope>
</dependency>
<!-- Non-OSS deps -->
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-vmware</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-srx</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-hypervisor-kvm</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-netapp</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-f5</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-netscaler</artifactId>
<version>${project.version}</version>
</dependency>
<!-- for awsapi build -->
<dependency>
<groupId>org.apache.axis2</groupId>

View File

@ -48,11 +48,6 @@
<artifactId>cloud-core</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-vmware-base</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-agent</artifactId>

View File

@ -542,10 +542,10 @@ echo "*************CONFIGURING VPN********************"
vpn_config
echo "*************FIX DHCP ISSUE********************"
dhcp_fix
#dhcp_fix
echo "*************INSTALL XS TOOLS********************"
install_xs_tool
#install_xs_tool
echo "*************CLEANING UP********************"
cleanup

View File

@ -27,6 +27,12 @@
<version>4.0.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<repositories>
<repository>
<id>libvirt-org</id>
<url>http://libvirt.org/maven2</url>
</repository>
</repositories>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>

View File

@ -1,641 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.hypervisor.kvm.resource;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.InetAddress;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import javax.ejb.Local;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.AttachIsoCommand;
import com.cloud.agent.api.AttachVolumeCommand;
import com.cloud.agent.api.CheckHealthAnswer;
import com.cloud.agent.api.CheckHealthCommand;
import com.cloud.agent.api.CheckStateAnswer;
import com.cloud.agent.api.CheckStateCommand;
import com.cloud.agent.api.CheckVirtualMachineAnswer;
import com.cloud.agent.api.CheckVirtualMachineCommand;
import com.cloud.agent.api.CleanupNetworkRulesCmd;
import com.cloud.agent.api.Command;
import com.cloud.agent.api.GetHostStatsAnswer;
import com.cloud.agent.api.GetHostStatsCommand;
import com.cloud.agent.api.GetStorageStatsAnswer;
import com.cloud.agent.api.GetStorageStatsCommand;
import com.cloud.agent.api.GetVmStatsAnswer;
import com.cloud.agent.api.GetVmStatsCommand;
import com.cloud.agent.api.ModifySshKeysCommand;
import com.cloud.agent.api.ModifyStoragePoolAnswer;
import com.cloud.agent.api.ModifyStoragePoolCommand;
import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.PingTestCommand;
import com.cloud.agent.api.ReadyAnswer;
import com.cloud.agent.api.ReadyCommand;
import com.cloud.agent.api.RebootAnswer;
import com.cloud.agent.api.RebootCommand;
import com.cloud.agent.api.SecurityGroupRuleAnswer;
import com.cloud.agent.api.SecurityGroupRulesCmd;
import com.cloud.agent.api.StartAnswer;
import com.cloud.agent.api.StartCommand;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.agent.api.StartupRoutingCommand.VmState;
import com.cloud.agent.api.StartupStorageCommand;
import com.cloud.agent.api.StopAnswer;
import com.cloud.agent.api.StopCommand;
import com.cloud.agent.api.StoragePoolInfo;
import com.cloud.agent.api.routing.SavePasswordCommand;
import com.cloud.agent.api.routing.VmDataCommand;
import com.cloud.agent.api.storage.CreateAnswer;
import com.cloud.agent.api.storage.CreateCommand;
import com.cloud.agent.api.storage.DestroyCommand;
import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer;
import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand;
import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.agent.api.to.VolumeTO;
import com.cloud.agent.dhcp.DhcpSnooper;
import com.cloud.agent.dhcp.FakeDhcpSnooper;
import com.cloud.agent.mockvm.MockVm;
import com.cloud.agent.mockvm.MockVmMgr;
import com.cloud.agent.mockvm.VmMgr;
import com.cloud.agent.vmdata.JettyVmDataServer;
import com.cloud.agent.vmdata.VmDataServer;
import com.cloud.host.Host.Type;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Networks.RouterPrivateIpStrategy;
import com.cloud.network.Networks.TrafficType;
import com.cloud.resource.ServerResource;
import com.cloud.resource.ServerResourceBase;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.Volume;
import com.cloud.storage.template.TemplateInfo;
import com.cloud.utils.PropertiesUtil;
import com.cloud.utils.exception.CloudRuntimeException;
import com.cloud.utils.script.Script;
import com.cloud.vm.VirtualMachine.State;
/**
* Pretends to be a computing resource
*
*/
@Local(value = { ServerResource.class })
public class FakeComputingResource extends ServerResourceBase implements
ServerResource {
private static final Logger s_logger = Logger
.getLogger(FakeComputingResource.class);
private Map<String, Object> _params;
private VmMgr _vmManager = new MockVmMgr();
protected HashMap<String, State> _vms = new HashMap<String, State>(20);
protected DhcpSnooper _dhcpSnooper = new FakeDhcpSnooper();
protected VmDataServer _vmDataServer = new JettyVmDataServer();
@Override
public Type getType() {
return Type.Routing;
}
@Override
public StartupCommand[] initialize() {
Map<String, VmState> changes = null;
final List<Object> info = getHostInfo();
final StartupRoutingCommand cmd = new StartupRoutingCommand(
(Integer) info.get(0), (Long) info.get(1), (Long) info.get(2),
(Long) info.get(4), (String) info.get(3), HypervisorType.KVM,
RouterPrivateIpStrategy.HostLocal, changes);
fillNetworkInformation(cmd);
cmd.getHostDetails().putAll(getVersionStrings());
cmd.setCluster(getConfiguredProperty("cluster", "1"));
StoragePoolInfo pi = initializeLocalStorage();
StartupStorageCommand sscmd = new StartupStorageCommand();
sscmd.setPoolInfo(pi);
sscmd.setGuid(pi.getUuid());
sscmd.setDataCenter((String) _params.get("zone"));
sscmd.setResourceType(Storage.StorageResourceType.STORAGE_POOL);
return new StartupCommand[] { cmd, sscmd };
}
private Map<String, String> getVersionStrings() {
Map<String, String> result = new HashMap<String, String>();
String hostOs = (String) _params.get("Host.OS");
String hostOsVer = (String) _params.get("Host.OS.Version");
String hostOsKernVer = (String) _params.get("Host.OS.Kernel.Version");
result.put("Host.OS", hostOs == null ? "Fedora" : hostOs);
result.put("Host.OS.Version", hostOsVer == null ? "14" : hostOsVer);
result.put("Host.OS.Kernel.Version",
hostOsKernVer == null ? "2.6.35.6-45.fc14.x86_64"
: hostOsKernVer);
return result;
}
protected void fillNetworkInformation(final StartupCommand cmd) {
cmd.setPrivateIpAddress((String) _params.get("private.ip.address"));
cmd.setPrivateMacAddress((String) _params.get("private.mac.address"));
cmd.setPrivateNetmask((String) _params.get("private.ip.netmask"));
cmd.setStorageIpAddress((String) _params.get("private.ip.address"));
cmd.setStorageMacAddress((String) _params.get("private.mac.address"));
cmd.setStorageNetmask((String) _params.get("private.ip.netmask"));
cmd.setGatewayIpAddress((String) _params.get("gateway.ip.address"));
}
protected StoragePoolInfo initializeLocalStorage() {
String hostIp = (String) _params.get("private.ip.address");
String localStoragePath = (String) _params.get("local.storage.path");
String lh = hostIp + localStoragePath;
String uuid = UUID.nameUUIDFromBytes(lh.getBytes()).toString();
String capacity = (String) _params.get("local.storage.capacity");
String available = (String) _params.get("local.storage.avail");
return new StoragePoolInfo(uuid, hostIp, localStoragePath,
localStoragePath, StoragePoolType.Filesystem,
Long.parseLong(capacity), Long.parseLong(available));
}
@Override
public PingCommand getCurrentStatus(long id) {
final HashMap<String, State> newStates = new HashMap<String, State>();
_dhcpSnooper.syncIpAddr();
return new PingRoutingCommand(com.cloud.host.Host.Type.Routing, id,
newStates);
}
@Override
public Answer executeRequest(Command cmd) {
try {
if (cmd instanceof ReadyCommand) {
return execute((ReadyCommand) cmd);
} else if (cmd instanceof ModifySshKeysCommand) {
return execute((ModifySshKeysCommand) cmd);// TODO: remove
} else if (cmd instanceof GetHostStatsCommand) {
return execute((GetHostStatsCommand) cmd);
} else if (cmd instanceof PrimaryStorageDownloadCommand) {
return execute((PrimaryStorageDownloadCommand) cmd);
} else if (cmd instanceof StopCommand) {
return execute((StopCommand) cmd);
} else if (cmd instanceof GetVmStatsCommand) {
return execute((GetVmStatsCommand) cmd);
} else if (cmd instanceof RebootCommand) {
return execute((RebootCommand) cmd);
} else if (cmd instanceof CheckStateCommand) {
return executeRequest(cmd);
} else if (cmd instanceof CheckHealthCommand) {
return execute((CheckHealthCommand) cmd);
} else if (cmd instanceof PingTestCommand) {
return execute((PingTestCommand) cmd);
} else if (cmd instanceof CheckVirtualMachineCommand) {
return execute((CheckVirtualMachineCommand) cmd);
} else if (cmd instanceof ReadyCommand) {
return execute((ReadyCommand) cmd);
} else if (cmd instanceof StopCommand) {
return execute((StopCommand) cmd);
} else if (cmd instanceof CreateCommand) {
return execute((CreateCommand) cmd);
} else if (cmd instanceof DestroyCommand) {
return execute((DestroyCommand) cmd);
} else if (cmd instanceof PrimaryStorageDownloadCommand) {
return execute((PrimaryStorageDownloadCommand) cmd);
} else if (cmd instanceof GetStorageStatsCommand) {
return execute((GetStorageStatsCommand) cmd);
} else if (cmd instanceof ModifyStoragePoolCommand) {
return execute((ModifyStoragePoolCommand) cmd);
} else if (cmd instanceof SecurityGroupRulesCmd) {
return execute((SecurityGroupRulesCmd) cmd);
} else if (cmd instanceof StartCommand) {
return execute((StartCommand) cmd);
} else if (cmd instanceof CleanupNetworkRulesCmd) {
return execute((CleanupNetworkRulesCmd) cmd);
} else if (cmd instanceof SavePasswordCommand) {
return execute((SavePasswordCommand) cmd);
} else if (cmd instanceof VmDataCommand) {
return execute((VmDataCommand) cmd);
} else {
s_logger.warn("Unsupported command ");
return Answer.createUnsupportedCommandAnswer(cmd);
}
} catch (final IllegalArgumentException e) {
return new Answer(cmd, false, e.getMessage());
}
}
private Answer execute(CleanupNetworkRulesCmd cmd) {
return new Answer(cmd);
}
private Answer execute(SecurityGroupRulesCmd cmd) {
s_logger.info("Programmed network rules for vm " + cmd.getVmName()
+ " guestIp=" + cmd.getGuestIp() + ",ingress numrules="
+ cmd.getIngressRuleSet().length + ",egress numrules="
+ cmd.getEgressRuleSet().length);
return new SecurityGroupRuleAnswer(cmd);
}
private Answer execute(ModifyStoragePoolCommand cmd) {
long capacity = getConfiguredProperty("local.storage.capacity",
10000000000L);
long used = 10000000L;
long available = capacity - used;
if (cmd.getAdd()) {
ModifyStoragePoolAnswer answer = new ModifyStoragePoolAnswer(cmd,
capacity, used, new HashMap<String, TemplateInfo>());
if (s_logger.isInfoEnabled())
s_logger.info("Sending ModifyStoragePoolCommand answer with capacity: "
+ capacity
+ ", used: "
+ used
+ ", available: "
+ available);
return answer;
} else {
if (s_logger.isInfoEnabled())
s_logger.info("ModifyNetfsStoragePoolCmd is not add command, cmd: "
+ cmd.toString());
return new Answer(cmd);
}
}
private Answer execute(GetStorageStatsCommand cmd) {
return new GetStorageStatsAnswer(cmd, getConfiguredProperty(
"local.storage.capacity", 100000000000L), 0L);
}
protected synchronized ReadyAnswer execute(ReadyCommand cmd) {
return new ReadyAnswer(cmd);
}
private Answer execute(PrimaryStorageDownloadCommand cmd) {
return new PrimaryStorageDownloadAnswer(cmd.getLocalPath(), 16000000L);
}
private Answer execute(ModifySshKeysCommand cmd) {
return new Answer(cmd, true, null);
}
@Override
protected String getDefaultScriptsDir() {
return null;
}
protected String getConfiguredProperty(String key, String defaultValue) {
String val = (String) _params.get(key);
return val == null ? defaultValue : val;
}
protected Long getConfiguredProperty(String key, Long defaultValue) {
String val = (String) _params.get(key);
if (val != null) {
Long result = Long.parseLong(val);
return result;
}
return defaultValue;
}
protected List<Object> getHostInfo() {
final ArrayList<Object> info = new ArrayList<Object>();
long speed = getConfiguredProperty("cpuspeed", 4000L);
long cpus = getConfiguredProperty("cpus", 4L);
long ram = getConfiguredProperty("memory", 16000L * 1024L * 1024L);
long dom0ram = Math.min(ram / 10, 768 * 1024 * 1024L);
String cap = getConfiguredProperty("capabilities", "hvm");
info.add((int) cpus);
info.add(speed);
info.add(ram);
info.add(cap);
info.add(dom0ram);
return info;
}
private Map<String, Object> getSimulatorProperties()
throws ConfigurationException {
final File file = PropertiesUtil.findConfigFile("simulator.properties");
if (file == null) {
throw new ConfigurationException(
"Unable to find simulator.properties.");
}
s_logger.info("simulator.properties found at " + file.getAbsolutePath());
Properties properties = new Properties();
try {
properties.load(new FileInputStream(file));
final Map<String, Object> params = PropertiesUtil.toMap(properties);
return params;
} catch (final FileNotFoundException ex) {
throw new CloudRuntimeException("Cannot find the file: "
+ file.getAbsolutePath(), ex);
} catch (final IOException ex) {
throw new CloudRuntimeException("IOException in reading "
+ file.getAbsolutePath(), ex);
}
}
@Override
public boolean configure(String name, Map<String, Object> params)
throws ConfigurationException {
Map<String, Object> simProps = getSimulatorProperties();
params.putAll(simProps);
setParams(params);
_vmManager.configure(params);
_dhcpSnooper.configure(name, params);
_vmDataServer.configure(name, params);
return true;
}
public void setParams(Map<String, Object> _params) {
this._params = _params;
}
public Map<String, Object> getParams() {
return _params;
}
protected synchronized StartAnswer execute(StartCommand cmd) {
VmMgr vmMgr = getVmManager();
VirtualMachineTO vmSpec = cmd.getVirtualMachine();
String vmName = vmSpec.getName();
State state = State.Stopped;
try {
if (!_vms.containsKey(vmName)) {
synchronized (_vms) {
_vms.put(vmName, State.Starting);
}
MockVm vm = vmMgr.createVmFromSpec(vmSpec);
vmMgr.createVbd(vmSpec, vmName, vm);
vmMgr.createVif(vmSpec, vmName, vm);
state = State.Running;
for (NicTO nic : cmd.getVirtualMachine().getNics()) {
if (nic.getType() == TrafficType.Guest) {
InetAddress addr = _dhcpSnooper.getIPAddr(nic.getMac(),
vmName);
nic.setIp(addr.getHostAddress());
}
}
_vmDataServer.handleVmStarted(cmd.getVirtualMachine());
return new StartAnswer(cmd);
} else {
String msg = "There is already a VM having the same name "
+ vmName;
s_logger.warn(msg);
return new StartAnswer(cmd, msg);
}
} catch (Exception ex) {
} finally {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
return new StartAnswer(cmd);
}
protected synchronized StopAnswer execute(StopCommand cmd) {
VmMgr vmMgr = getVmManager();
StopAnswer answer = null;
String vmName = cmd.getVmName();
Integer port = vmMgr.getVncPort(vmName);
State state = null;
synchronized (_vms) {
state = _vms.get(vmName);
_vms.put(vmName, State.Stopping);
}
try {
String result = vmMgr.stopVM(vmName, false);
if (result != null) {
s_logger.info("Trying destroy on " + vmName);
if (result == Script.ERR_TIMEOUT) {
result = vmMgr.stopVM(vmName, true);
}
s_logger.warn("Couldn't stop " + vmName);
if (result != null) {
return new StopAnswer(cmd, result, false);
}
}
answer = new StopAnswer(cmd, null, port, true);
String result2 = vmMgr.cleanupVnet(cmd.getVnet());
if (result2 != null) {
result = result2 + (result != null ? ("\n" + result) : "");
answer = new StopAnswer(cmd, result, port, true);
}
_dhcpSnooper.cleanup(vmName, null);
return answer;
} finally {
if (answer == null || !answer.getResult()) {
synchronized (_vms) {
_vms.put(vmName, state);
}
}
}
}
protected Answer execute(final VmDataCommand cmd) {
return _vmDataServer.handleVmDataCommand(cmd);
}
protected Answer execute(final SavePasswordCommand cmd) {
return new Answer(cmd);
}
protected Answer execute(RebootCommand cmd) {
VmMgr vmMgr = getVmManager();
vmMgr.rebootVM(cmd.getVmName());
return new RebootAnswer(cmd, "success", true);
}
private Answer execute(PingTestCommand cmd) {
return new Answer(cmd);
}
protected GetVmStatsAnswer execute(GetVmStatsCommand cmd) {
return null;
}
private VmMgr getVmManager() {
return _vmManager;
}
protected Answer execute(GetHostStatsCommand cmd) {
VmMgr vmMgr = getVmManager();
return new GetHostStatsAnswer(cmd, vmMgr.getHostCpuUtilization(),
vmMgr.getHostFreeMemory(), vmMgr.getHostTotalMemory(), 0, 0,
"SimulatedHost");
}
protected CheckStateAnswer execute(CheckStateCommand cmd) {
State state = getVmManager().checkVmState(cmd.getVmName());
return new CheckStateAnswer(cmd, state);
}
protected CheckHealthAnswer execute(CheckHealthCommand cmd) {
return new CheckHealthAnswer(cmd, true);
}
protected CheckVirtualMachineAnswer execute(
final CheckVirtualMachineCommand cmd) {
VmMgr vmMgr = getVmManager();
final String vmName = cmd.getVmName();
final State state = vmMgr.checkVmState(vmName);
Integer vncPort = null;
if (state == State.Running) {
vncPort = vmMgr.getVncPort(vmName);
synchronized (_vms) {
_vms.put(vmName, State.Running);
}
}
return new CheckVirtualMachineAnswer(cmd, state, vncPort);
}
protected Answer execute(final AttachVolumeCommand cmd) {
return new Answer(cmd);
}
protected Answer execute(final AttachIsoCommand cmd) {
return new Answer(cmd);
}
protected CreateAnswer execute(final CreateCommand cmd) {
try {
VolumeTO vol = new VolumeTO(cmd.getVolumeId(), Volume.Type.ROOT,
com.cloud.storage.Storage.StoragePoolType.LVM, cmd
.getPool().getUuid(), "dummy", "/mountpoint",
"dummyPath", 1000L, null);
return new CreateAnswer(cmd, vol);
} catch (Throwable th) {
return new CreateAnswer(cmd, new Exception("Unexpected exception"));
}
}
protected HashMap<String, State> sync() {
Map<String, State> newStates;
Map<String, State> oldStates = null;
HashMap<String, State> changes = new HashMap<String, State>();
synchronized (_vms) {
newStates = getVmManager().getVmStates();
oldStates = new HashMap<String, State>(_vms.size());
oldStates.putAll(_vms);
for (Map.Entry<String, State> entry : newStates.entrySet()) {
String vm = entry.getKey();
State newState = entry.getValue();
State oldState = oldStates.remove(vm);
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm + ": xen has state " + newState
+ " and we have state "
+ (oldState != null ? oldState.toString() : "null"));
}
if (oldState == null) {
_vms.put(vm, newState);
changes.put(vm, newState);
} else if (oldState == State.Starting) {
if (newState == State.Running) {
_vms.put(vm, newState);
} else if (newState == State.Stopped) {
s_logger.debug("Ignoring vm " + vm
+ " because of a lag in starting the vm.");
}
} else if (oldState == State.Stopping) {
if (newState == State.Stopped) {
_vms.put(vm, newState);
} else if (newState == State.Running) {
s_logger.debug("Ignoring vm " + vm
+ " because of a lag in stopping the vm. ");
}
} else if (oldState != newState) {
_vms.put(vm, newState);
changes.put(vm, newState);
}
}
for (Map.Entry<String, State> entry : oldStates.entrySet()) {
String vm = entry.getKey();
State oldState = entry.getValue();
if (s_logger.isTraceEnabled()) {
s_logger.trace("VM " + vm
+ " is now missing from xen so reporting stopped");
}
if (oldState == State.Stopping) {
s_logger.debug("Ignoring VM " + vm
+ " in transition state stopping.");
_vms.remove(vm);
} else if (oldState == State.Starting) {
s_logger.debug("Ignoring VM " + vm
+ " in transition state starting.");
} else if (oldState == State.Stopped) {
_vms.remove(vm);
} else {
changes.put(entry.getKey(), State.Stopped);
}
}
}
return changes;
}
protected Answer execute(DestroyCommand cmd) {
return new Answer(cmd, true, null);
}
}

View File

@ -37,6 +37,7 @@
<module>host-allocators/random</module>
<module>hypervisors/ovm</module>
<module>hypervisors/xen</module>
<module>hypervisors/kvm</module>
<module>network-elements/elastic-loadbalancer</module>
<module>network-elements/ovs</module>
<module>network-elements/nicira-nvp</module>

View File

@ -154,7 +154,6 @@
<module>usage</module>
<module>utils</module>
<module>deps/XenServerJava</module>
<module>vmware-base</module>
<module>plugins</module>
<module>awsapi</module>
<module>patches</module>

View File

@ -2400,6 +2400,10 @@ this distribution.
id='citrix.com'
name='Citrix Systems, Inc'
url='http://www.citrix.com/' />
<organisation
id='caringo.com'
name='Caringo, Inc.'
url='http://www.caringo.com/' />
</organisations>
<primary-license id='ApacheLicenseVersion2'>
<copyright-notice>Copyright (c) 2012 The Apache Software Foundation</copyright-notice>
@ -2516,6 +2520,7 @@ Updated November 2009 with contributions from: btburnett3, Anthony Aragues and X
</by-organisation>
</with-license>
<with-license id='MIT'>
<copyright-notice />
<by-organisation id='person:ole.laursen'>
<resource name="jquery.colorhelpers.js" notice='jquery.colorhelpers.notice' />
</by-organisation>
@ -2523,6 +2528,7 @@ Updated November 2009 with contributions from: btburnett3, Anthony Aragues and X
</within>
<within dir='ui/lib/jquery-ui'>
<with-license id='MIT'>
<copyright-notice />
<by-organisation id='jqueryui.com'>
<resource name="css/jquery-ui.css" />
<resource name="js/jquery-ui.js" />
@ -2531,10 +2537,10 @@ Updated November 2009 with contributions from: btburnett3, Anthony Aragues and X
</with-license>
</within>
<within dir='ui/lib/qunit'>
<copyright-notice>
Copyright © 2012 John Resig, Jörn Zaefferer
</copyright-notice>
<with-license id='MIT'>
<copyright-notice>
Copyright © 2012 John Resig, Jörn Zaefferer
</copyright-notice>
<by-organisation id='person:jorn.zaefferer'>
<resource name="qunit.css" source='http://docs.jquery.com/QUnit' />
<resource name="qunit.js" source='http://docs.jquery.com/QUnit' />
@ -2585,6 +2591,9 @@ Copyright © 2012 The Apache Software Foundation
</by-organisation>
</with-license>
<with-license id="ApacheLicenseVersion1.1">
<copyright-notice>
Copyright © 2012 The Apache Software Foundation
</copyright-notice>
<by-organisation id="apache.org">
<resource name="cloud-commons-discovery.jar" source="http://commons.apache.org/discovery/" />
</by-organisation>
@ -2640,6 +2649,14 @@ Copyright © 2012 The Eclipse Foundation.
</with-license>
<with-license id='BSD3ClauseGeneric'>
<copyright-notice>
Copyright © 2009, Caringo, Inc.
</copyright-notice>
<by-organisation id='caringo.com'>
<resource name='CAStorSDK.jar' source='http://www.castor.org/download.html' />
</by-organisation>
</with-license>
<with-license id='BSD3ClauseGeneric'>
<copyright-notice>
Copyright © 2002-2011 Atsuhiko Yamanaka, JCraft,Inc.
</copyright-notice>
<by-organisation id='jcraft.com'>
@ -2656,11 +2673,13 @@ Copyright © IBM Corp 2006
</by-organisation>
</with-license>
<with-license id='xstream.bsd'>
<copyright-notice />
<by-organisation id='xstream.codehaus.com'>
<resource name='cloud-xstream-1.3.1.jar' source='http://xstream.codehaus.org/repository.html' />
</by-organisation>
</with-license>
<with-license id='bouncy.mit'>
<copyright-notice />
<by-organisation id='bouncy.castle'>
<resource name='cloud-bcprov-jdk16-1.45.jar' source='http://repo1.maven.org/maven2/org/bouncycastle/bcprov-jdk16/1.45/bcprov-jdk16-1.45-sources.jar' />
</by-organisation>
@ -2675,12 +2694,14 @@ All rights reserved.
</by-organisation>
</with-license>
<with-license id='ApacheLicenseVersion2'>
<copyright-notice />
<by-organisation id='jetty.codehaus.com'>
<resource name='jetty-6.1.26.jar' source='http://repo1.maven.org/maven2/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26-sources.jar' />
<resource name='jetty-util-6.1.26.jar' source='http://repo1.maven.org/maven2/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26-sources.jar' />
</by-organisation>
</with-license>
<with-license id='CPL1'>
<copyright-notice />
<by-organisation id='junit.org'>
<resource name='cloud-junit.jar' source='http://kentbeck.github.com/junit/' />
</by-organisation>
@ -2731,6 +2752,7 @@ Copyright © 2004-2008 The Apache Software Foundation
</by-organisation>
</with-license>
<with-license id='antlr2'>
<copyright-notice />
<by-organisation id='antlr2.org'>
<resource name='antlr-2.7.6.jar' source='http://repo1.maven.org/maven2/antlr/antlr/2.7.6/antlr-2.7.6-sources.jar' />
</by-organisation>
@ -2788,6 +2810,7 @@ Copyright © 2009 Google Inc.
</by-organisation>
</with-license>
<with-license id='dom4j.license'>
<copyright-notice />
<by-organisation id='dom4j.sourceforge.net'>
<!-- TODO: Need to have a review of this license! -->
<resource name='dom4j-1.6.1.jar' source='http://dom4j.sourceforge.net/source-repository.html' />
@ -2820,6 +2843,7 @@ Copyright © 2002-2011 Atsuhiko Yamanaka, JCraft,Inc.
</by-organisation>
</with-license>
<with-license id='ApacheLicenseVersion2'>
<copyright-notice />
<by-organisation id='json-simple'>
<resource name='json_simple-1.1.jar' source='http://code.google.com/p/json-simple/source/checkout' />
</by-organisation>
@ -2833,11 +2857,13 @@ Copyright © 1997-2010 Oracle and/or its affiliates. All rights reserved.
</by-organisation>
</with-license>
<with-license id='CPL1'>
<copyright-notice />
<by-organisation id='junit.org'>
<resource name='junit-4.8.1.jar' source='http://kentbeck.github.com/junit/' />
</by-organisation>
</with-license>
<with-license id='MPL1'>
<copyright-notice />
<license-parameters>
<parameter><name>PROJECT</name><value>Javassist</value></parameter>
<parameter><name>INITIAL_DEVELOPER</name><value>Shigeru Chiba</value></parameter>

View File

@ -636,6 +636,7 @@ def rpm(context):
shutil.move(tarball,_join(sourcedir,tarball))
specfile = "%s.spec"%APPNAME
Utils.exec_command("mvn install -P deps")
checkdeps = lambda: c(["rpmbuild","--define","_topdir %s"%outputdir,"--nobuild",specfile]+packagever+releasever)
dorpm = lambda: c(["rpmbuild","--define","_topdir %s"%outputdir,"-bb",specfile]+buildnumber+prerelease+packagever+releasever)
try: checkdeps()

View File

@ -162,9 +162,9 @@ def build_dependences ():
start_path = bld.path.find_dir ("deps")
bld.install_files('${JAVADIR}',start_path.ant_glob(["commons-codec-1.6.jar", "ejb-api-3.0.jar", "xmlrpc-client-3.1.3.jar", "commons-dbcp-1.4.jar", "commons-pool-1.6.jar", "gson-1.7.1.jar",
bld.install_files('${JAVADIR}',start_path.ant_glob(["libvirt-0.4.8.jar", "commons-codec-1.6.jar", "ejb-api-3.0.jar", "xmlrpc-client-3.1.3.jar", "commons-dbcp-1.4.jar", "commons-pool-1.6.jar", "gson-1.7.1.jar",
"netscaler-1.0.jar", "netscaler-sdx-1.0.jar", "backport-util-concurrent-3.1.jar", "ehcache-1.5.0.jar", "httpcore-4.0.jar", "log4j-1.2.16.jar", "trilead-ssh2-build213-svnkit-1.3-patch.jar", "cglib-2.2.jar", "xmlrpc-common-3.*.jar",
"xmlrpc-client-3.*.jar", "axis-1.4.jar", "wsdl4j-1.6.2.jar", "bcprov-jdk16-1.46.jar", "jsch-0.1.42.jar", "icontrol-1.0.jar", "manageontap-1.0.jar", "vmware-vim-1.0.jar", "vmware-vim25-1.0.jar", "jasypt-1.9.0.jar", "commons-configuration-1.8.jar", "commons-lang-2.6.jar", "mail-1.4.jar", "activation-1.1.jar", "mysql-connector-java-5.1.21.jar", "hibernate-jpa-2.0-api-1.0.0.Final.jar", "hibernate-entitymanager-3.5.1-Final.jar", "hibernate-core-3.5.1-Final.jar", "hibernate-commons-annotations-3.2.0.Final.jar", "hibernate-annotations-3.5.1-Final.jar", "asm-3.1.jar", "xapi-5.6.100-1-20120825.123319-1.jar"], excl = excludes), cwd=start_path)
"xmlrpc-client-3.*.jar", "axis-1.4.jar", "wsdl4j-1.6.2.jar", "bcprov-jdk16-1.46.jar", "jsch-0.1.42.jar", "jasypt-1.9.0.jar", "commons-configuration-1.8.jar", "commons-lang-2.6.jar", "mail-1.4.jar", "activation-1.1.jar", "mysql-connector-java-5.1.21.jar", "hibernate-jpa-2.0-api-1.0.0.Final.jar", "hibernate-entitymanager-3.5.1-Final.jar", "hibernate-core-3.5.1-Final.jar", "hibernate-commons-annotations-3.2.0.Final.jar", "hibernate-annotations-3.5.1-Final.jar", "asm-3.1.jar", "xapi-5.6.100-1-SNAPSHOT.jar"], excl = excludes), cwd=start_path)
#def build_console_proxy ():
# binary unsubstitutable files: