mirror of
https://github.com/apache/cloudstack.git
synced 2025-10-26 08:42:29 +01:00
Merge branch 'master' into vpc
Conflicts: server/src/com/cloud/network/rules/RulesManagerImpl.java server/src/com/cloud/vm/UserVmManagerImpl.java server/src/com/cloud/vm/VirtualMachineGuru.java
This commit is contained in:
commit
a39fd61249
@ -352,12 +352,12 @@ public class MockVmManagerImpl implements MockVmManager {
|
||||
_mockAgentMgr.handleSystemVMStop(vm.getId());
|
||||
}
|
||||
|
||||
return new StopAnswer(cmd, null, new Integer(0), new Long(100), new Long(200));
|
||||
return new StopAnswer(cmd, null, new Integer(0), true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Answer rebootVM(RebootCommand cmd) {
|
||||
return new RebootAnswer(cmd, "Rebooted "+cmd.getVmName());
|
||||
return new RebootAnswer(cmd, "Rebooted "+cmd.getVmName(), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -233,10 +233,10 @@ public class AgentRoutingResource extends AgentStorageResource {
|
||||
Answer result = _simMgr.simulate(cmd, hostGuid);
|
||||
|
||||
if (!result.getResult()) {
|
||||
return new StopAnswer(cmd, result.getDetails());
|
||||
return new StopAnswer(cmd, result.getDetails(), false);
|
||||
}
|
||||
|
||||
answer = new StopAnswer(cmd, null, 0, new Long(100), new Long(200));
|
||||
answer = new StopAnswer(cmd, null, 0, true);
|
||||
Pair<Long, Long> data = _runningVms.get(vmName);
|
||||
if (data != null) {
|
||||
this.usedCpu -= data.first();
|
||||
|
||||
@ -1,12 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
### BEGIN INIT INFO
|
||||
# Provides: cloud agent
|
||||
# Required-Start: $network
|
||||
# Required-Stop: $network
|
||||
# Required-Start: $network $local_fs
|
||||
# Required-Stop: $network $local_fs
|
||||
# Default-Start: 3 4 5
|
||||
# Default-Stop: 0 1 2 6
|
||||
# X-Interactive: true
|
||||
# Short-Description: Start/stop apache2 web server
|
||||
# Short-Description: Start/stop Apache CloudStack Agent
|
||||
# Description: This scripts Starts/Stops the Apache CloudStack agent
|
||||
## The CloudStack Agent is a part of the Apache CloudStack project and is used
|
||||
## for managing KVM-based Hypervisors and performing secondary storage tasks inside
|
||||
## the Secondary Storage System Virtual Machine.
|
||||
## JSVC (Java daemonizing) is used for starting and stopping the agent
|
||||
### END INIT INFO
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
@ -26,21 +31,17 @@
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# WARNING: if this script is changed, then all other initscripts MUST BE changed to match it as well
|
||||
|
||||
. /lib/lsb/init-functions
|
||||
. /etc/default/rcS
|
||||
|
||||
whatami=cloud-agent
|
||||
|
||||
# set environment variables
|
||||
|
||||
SHORTNAME="$whatami"
|
||||
PIDFILE=@PIDDIR@/"$whatami".pid
|
||||
SHORTNAME="cloud-agent"
|
||||
PIDFILE=@PIDDIR@/"$SHORTNAME".pid
|
||||
LOCKFILE=@LOCKDIR@/"$SHORTNAME"
|
||||
LOGFILE=@AGENTLOG@
|
||||
PROGNAME="Cloud Agent"
|
||||
PROGNAME="CloudStack Agent"
|
||||
CLASS="com.cloud.agent.AgentShell"
|
||||
PROG="jsvc"
|
||||
DAEMON="/usr/bin/jsvc"
|
||||
SHUTDOWN_WAIT="30"
|
||||
|
||||
unset OPTIONS
|
||||
[ -r @SYSCONFDIR@/default/"$SHORTNAME" ] && source @SYSCONFDIR@/default/"$SHORTNAME"
|
||||
@ -81,53 +82,53 @@ wait_for_network() {
|
||||
}
|
||||
|
||||
start() {
|
||||
log_daemon_msg $"Starting $PROGNAME" "$SHORTNAME"
|
||||
if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
|
||||
log_progress_msg "apparently already running"
|
||||
log_end_msg 0
|
||||
exit 0
|
||||
fi
|
||||
if hostname --fqdn >/dev/null 2>&1 ; then
|
||||
true
|
||||
else
|
||||
log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
|
||||
log_end_msg 1
|
||||
exit 1
|
||||
fi
|
||||
if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
|
||||
log_daemon_msg "$PROGNAME apparently already running"
|
||||
log_end_msg 0
|
||||
exit 0
|
||||
fi
|
||||
|
||||
wait_for_network
|
||||
log_daemon_msg "Starting $PROGNAME" "$SHORTNAME"
|
||||
if hostname --fqdn >/dev/null 2>&1 ; then
|
||||
true
|
||||
else
|
||||
log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
|
||||
log_end_msg 1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if jsvc -cp "$CLASSPATH" -pidfile "$PIDFILE" $CLASS
|
||||
RETVAL=$?
|
||||
then
|
||||
rc=0
|
||||
sleep 1
|
||||
if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
|
||||
log_failure_msg "$PROG failed to start"
|
||||
rc=1
|
||||
fi
|
||||
else
|
||||
rc=1
|
||||
fi
|
||||
wait_for_network
|
||||
|
||||
if [ $rc -eq 0 ]; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_end_msg 1
|
||||
rm -f "$PIDFILE"
|
||||
fi
|
||||
if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" $CLASS
|
||||
RETVAL=$?
|
||||
then
|
||||
rc=0
|
||||
sleep 1
|
||||
if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
|
||||
log_failure_msg "$PROG failed to start"
|
||||
rc=1
|
||||
fi
|
||||
else
|
||||
rc=1
|
||||
fi
|
||||
|
||||
if [ $rc -eq 0 ]; then
|
||||
log_end_msg 0
|
||||
else
|
||||
log_end_msg 1
|
||||
rm -f "$PIDFILE"
|
||||
fi
|
||||
}
|
||||
|
||||
stop() {
|
||||
SHUTDOWN_WAIT="30"
|
||||
count="0"
|
||||
|
||||
echo -n $"Stopping $PROGNAME" "$SHORTNAME"
|
||||
jsvc -pidfile "$PIDFILE" -stop $CLASS
|
||||
log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME"
|
||||
killproc -p $PIDFILE $DAEMON
|
||||
|
||||
until [ "$count" -gt "$SHUTDOWN_WAIT" ]
|
||||
do
|
||||
agentPid=`ps aux|grep [j]svc|grep cloud-agent`
|
||||
agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
|
||||
if [ "$?" -gt "0" ];then
|
||||
break
|
||||
fi
|
||||
@ -135,40 +136,38 @@ stop() {
|
||||
let count="${count}+1"
|
||||
done
|
||||
|
||||
agentPid=`ps aux|grep [j]svc|grep cloud-agent`
|
||||
agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
|
||||
if [ "$?" -eq "0" ]; then
|
||||
agentPid=`ps aux|grep [j]svc|awk '{print $2}'`
|
||||
if [ "$agentPid" != "" ]; then
|
||||
kill -9 $agentPid
|
||||
fi
|
||||
agentPid=$(ps aux|grep [j]svc|awk '{print $2}')
|
||||
if [ "$agentPid" != "" ]; then
|
||||
log_warning_msg "$PROG still running, forcing kill"
|
||||
kill -9 $agentPid
|
||||
fi
|
||||
fi
|
||||
|
||||
log_end_msg $?
|
||||
rm -f "$PIDFILE"
|
||||
}
|
||||
|
||||
|
||||
# See how we were called.
|
||||
case "$1" in
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
status)
|
||||
start)
|
||||
start
|
||||
;;
|
||||
stop)
|
||||
stop
|
||||
;;
|
||||
status)
|
||||
status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
|
||||
RETVAL=$?
|
||||
;;
|
||||
restart)
|
||||
stop
|
||||
sleep 3
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo $"Usage: $whatami {start|stop|restart|status|help}"
|
||||
RETVAL=3
|
||||
RETVAL=$?
|
||||
;;
|
||||
restart | force-reload)
|
||||
stop
|
||||
sleep 3
|
||||
start
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|force-reload|status}"
|
||||
RETVAL=3
|
||||
esac
|
||||
|
||||
exit $RETVAL
|
||||
|
||||
|
||||
@ -1,32 +0,0 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
description "Stop CloudStack VMs on shutdown"
|
||||
author "Manuel Amador (Rudd-O) <manuel@vmops.com>"
|
||||
|
||||
start on stopping libvirt-bin
|
||||
|
||||
task
|
||||
script
|
||||
curr_runlevel=`runlevel | tail -c 2`
|
||||
if [ "$curr_runlevel" = "6" -o "$curr_runlevel" = "0" ] ; then
|
||||
for a in `virsh list | awk ' /^ +[0-9]+ [vri]-([0-9]+?)-/ { print $2 } '` ; do
|
||||
echo Destroying CloudStack VM $a
|
||||
virsh destroy $a
|
||||
done
|
||||
fi
|
||||
end script
|
||||
@ -443,8 +443,6 @@ public class FakeComputingResource extends ServerResourceBase implements
|
||||
String vmName = cmd.getVmName();
|
||||
|
||||
Integer port = vmMgr.getVncPort(vmName);
|
||||
Long bytesReceived = null;
|
||||
Long bytesSent = null;
|
||||
|
||||
State state = null;
|
||||
synchronized (_vms) {
|
||||
@ -462,17 +460,16 @@ public class FakeComputingResource extends ServerResourceBase implements
|
||||
s_logger.warn("Couldn't stop " + vmName);
|
||||
|
||||
if (result != null) {
|
||||
return new StopAnswer(cmd, result);
|
||||
return new StopAnswer(cmd, result, false);
|
||||
}
|
||||
}
|
||||
|
||||
answer = new StopAnswer(cmd, null, port, bytesSent, bytesReceived);
|
||||
answer = new StopAnswer(cmd, null, port, true);
|
||||
|
||||
String result2 = vmMgr.cleanupVnet(cmd.getVnet());
|
||||
if (result2 != null) {
|
||||
result = result2 + (result != null ? ("\n" + result) : "");
|
||||
answer = new StopAnswer(cmd, result, port, bytesSent,
|
||||
bytesReceived);
|
||||
answer = new StopAnswer(cmd, result, port, true);
|
||||
}
|
||||
|
||||
_dhcpSnooper.cleanup(vmName, null);
|
||||
@ -498,7 +495,7 @@ public class FakeComputingResource extends ServerResourceBase implements
|
||||
protected Answer execute(RebootCommand cmd) {
|
||||
VmMgr vmMgr = getVmManager();
|
||||
vmMgr.rebootVM(cmd.getVmName());
|
||||
return new RebootAnswer(cmd, "success", 0L, 0L);
|
||||
return new RebootAnswer(cmd, "success", true);
|
||||
}
|
||||
|
||||
private Answer execute(PingTestCommand cmd) {
|
||||
|
||||
@ -53,7 +53,7 @@ public class KVMHAChecker extends KVMHABase implements Callable<Boolean> {
|
||||
cmd.add("-h", _hostIP);
|
||||
cmd.add("-r");
|
||||
cmd.add("-t",
|
||||
String.valueOf((_heartBeatUpdateFreq + _heartBeatUpdateTimeout) / 1000 * 2));
|
||||
String.valueOf(_heartBeatUpdateFreq/1000));
|
||||
OutputInterpreter.OneLineParser parser = new OutputInterpreter.OneLineParser();
|
||||
String result = cmd.execute(parser);
|
||||
s_logger.debug("pool: " + pool._poolIp);
|
||||
|
||||
@ -30,12 +30,15 @@ import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLConnection;
|
||||
import java.text.DateFormat;
|
||||
import java.text.MessageFormat;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -154,6 +157,7 @@ import com.cloud.agent.resource.computing.KVMHABase.NfsStoragePool;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.ConsoleDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.DevicesDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.DiskDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.DiskDef.diskProtocol;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.FeaturesDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.GraphicDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtVMDef.GuestDef;
|
||||
@ -1298,6 +1302,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
|
||||
KVMStoragePool primaryPool = _storagePoolMgr.getStoragePool(cmd
|
||||
.getPool().getUuid());
|
||||
|
||||
if (primaryPool.getType() == StoragePoolType.RBD) {
|
||||
s_logger.debug("Snapshots are not supported on RBD volumes");
|
||||
return new ManageSnapshotAnswer(cmd, false,
|
||||
"Snapshots are not supported on RBD volumes");
|
||||
}
|
||||
|
||||
KVMPhysicalDisk disk = primaryPool.getPhysicalDisk(cmd
|
||||
.getVolumePath());
|
||||
if (state == DomainInfo.DomainState.VIR_DOMAIN_RUNNING
|
||||
@ -1644,6 +1655,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
+ templateInstallFolder;
|
||||
_storage.mkdirs(tmpltPath);
|
||||
|
||||
if (primary.getType() != StoragePoolType.RBD) {
|
||||
Script command = new Script(_createTmplPath, _cmdsTimeout, s_logger);
|
||||
command.add("-f", disk.getPath());
|
||||
command.add("-t", tmpltPath);
|
||||
@ -1655,6 +1667,32 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
s_logger.debug("failed to create template: " + result);
|
||||
return new CreatePrivateTemplateAnswer(cmd, false, result);
|
||||
}
|
||||
} else {
|
||||
s_logger.debug("Converting RBD disk " + disk.getPath() + " into template " + cmd.getUniqueName());
|
||||
Script.runSimpleBashScript("qemu-img convert"
|
||||
+ " -f raw -O qcow2 "
|
||||
+ KVMPhysicalDisk.RBDStringBuilder(primary.getSourceHost(),
|
||||
primary.getSourcePort(),
|
||||
primary.getAuthUserName(),
|
||||
primary.getAuthSecret(),
|
||||
disk.getPath())
|
||||
+ " " + tmpltPath + "/" + cmd.getUniqueName() + ".qcow2");
|
||||
File templateProp = new File(tmpltPath + "/template.properties");
|
||||
if (!templateProp.exists()) {
|
||||
templateProp.createNewFile();
|
||||
}
|
||||
|
||||
String templateContent = "filename=" + cmd.getUniqueName() + ".qcow2" + System.getProperty("line.separator");
|
||||
|
||||
DateFormat dateFormat = new SimpleDateFormat("MM_dd_yyyy");
|
||||
Date date = new Date();
|
||||
templateContent += "snapshot.name=" + dateFormat.format(date) + System.getProperty("line.separator");
|
||||
|
||||
FileOutputStream templFo = new FileOutputStream(templateProp);
|
||||
templFo.write(templateContent.getBytes());
|
||||
templFo.flush();
|
||||
templFo.close();
|
||||
}
|
||||
|
||||
Map<String, Object> params = new HashMap<String, Object>();
|
||||
params.put(StorageLayer.InstanceConfigKey, _storage);
|
||||
@ -1756,8 +1794,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
|
||||
protected Answer execute(ModifyStoragePoolCommand cmd) {
|
||||
KVMStoragePool storagepool = _storagePoolMgr.createStoragePool(cmd
|
||||
.getPool().getUuid(), cmd.getPool().getHost(), cmd.getPool()
|
||||
.getPath(), cmd.getPool().getType());
|
||||
.getPool().getUuid(), cmd.getPool().getHost(), cmd.getPool().getPort(),
|
||||
cmd.getPool().getPath(), cmd.getPool().getUserInfo(), cmd.getPool().getType());
|
||||
if (storagepool == null) {
|
||||
return new Answer(cmd, false, " Failed to create storage pool");
|
||||
}
|
||||
@ -2234,8 +2272,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
}
|
||||
|
||||
private Answer execute(RebootCommand cmd) {
|
||||
Long bytesReceived = null;
|
||||
Long bytesSent = null;
|
||||
|
||||
synchronized (_vms) {
|
||||
_vms.put(cmd.getVmName(), State.Starting);
|
||||
@ -2252,13 +2288,12 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
|
||||
}
|
||||
get_rule_logs_for_vms();
|
||||
return new RebootAnswer(cmd, null, bytesSent, bytesReceived,
|
||||
vncPort);
|
||||
return new RebootAnswer(cmd, null, vncPort);
|
||||
} else {
|
||||
return new RebootAnswer(cmd, result);
|
||||
return new RebootAnswer(cmd, result, false);
|
||||
}
|
||||
} catch (LibvirtException e) {
|
||||
return new RebootAnswer(cmd, e.getMessage());
|
||||
return new RebootAnswer(cmd, e.getMessage(), false);
|
||||
} finally {
|
||||
synchronized (_vms) {
|
||||
_vms.put(cmd.getVmName(), State.Running);
|
||||
@ -2267,16 +2302,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
}
|
||||
|
||||
protected Answer execute(RebootRouterCommand cmd) {
|
||||
Long bytesSent = 0L;
|
||||
Long bytesRcvd = 0L;
|
||||
if (VirtualMachineName.isValidRouterName(cmd.getVmName())) {
|
||||
long[] stats = getNetworkStats(cmd.getPrivateIpAddress());
|
||||
bytesSent = stats[0];
|
||||
bytesRcvd = stats[1];
|
||||
}
|
||||
RebootAnswer answer = (RebootAnswer) execute((RebootCommand) cmd);
|
||||
answer.setBytesSent(bytesSent);
|
||||
answer.setBytesReceived(bytesRcvd);
|
||||
String result = _virtRouterResource.connect(cmd.getPrivateIpAddress());
|
||||
if (result == null) {
|
||||
networkUsage(cmd.getPrivateIpAddress(), "create", null);
|
||||
@ -2309,9 +2335,6 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
protected Answer execute(StopCommand cmd) {
|
||||
final String vmName = cmd.getVmName();
|
||||
|
||||
Long bytesReceived = new Long(0);
|
||||
Long bytesSent = new Long(0);
|
||||
|
||||
State state = null;
|
||||
synchronized (_vms) {
|
||||
state = _vms.get(vmName);
|
||||
@ -2337,9 +2360,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
result = result2 + result;
|
||||
}
|
||||
state = State.Stopped;
|
||||
return new StopAnswer(cmd, result, 0, bytesSent, bytesReceived);
|
||||
return new StopAnswer(cmd, result, 0, true);
|
||||
} catch (LibvirtException e) {
|
||||
return new StopAnswer(cmd, e.getMessage());
|
||||
return new StopAnswer(cmd, e.getMessage(), false);
|
||||
} finally {
|
||||
synchronized (_vms) {
|
||||
if (state != null) {
|
||||
@ -2626,7 +2649,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
} else {
|
||||
int devId = (int) volume.getDeviceId();
|
||||
|
||||
if (volume.getType() == Volume.Type.DATADISK) {
|
||||
if (pool.getType() == StoragePoolType.RBD) {
|
||||
/*
|
||||
For RBD pools we use the secret mechanism in libvirt.
|
||||
We store the secret under the UUID of the pool, that's why
|
||||
we pass the pool's UUID as the authSecret
|
||||
*/
|
||||
disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""), pool.getSourceHost(), pool.getSourcePort(),
|
||||
pool.getAuthUserName(), pool.getUuid(),
|
||||
devId, diskBusType, diskProtocol.RBD);
|
||||
} else if (volume.getType() == Volume.Type.DATADISK) {
|
||||
disk.defFileBasedDisk(physicalDisk.getPath(), devId,
|
||||
DiskDef.diskBus.VIRTIO,
|
||||
DiskDef.diskFmtType.QCOW2);
|
||||
@ -2984,8 +3016,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
try {
|
||||
|
||||
KVMStoragePool localStoragePool = _storagePoolMgr
|
||||
.createStoragePool(_localStorageUUID, "localhost",
|
||||
_localStoragePath, StoragePoolType.Filesystem);
|
||||
.createStoragePool(_localStorageUUID, "localhost", -1,
|
||||
_localStoragePath, "", StoragePoolType.Filesystem);
|
||||
com.cloud.agent.api.StoragePoolInfo pi = new com.cloud.agent.api.StoragePoolInfo(
|
||||
localStoragePool.getUuid(), cmd.getPrivateIpAddress(),
|
||||
_localStoragePath, _localStoragePath,
|
||||
@ -4085,5 +4117,4 @@ public class LibvirtComputingResource extends ServerResourceBase implements
|
||||
|
||||
return new Answer(cmd, success, "");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -0,0 +1,106 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
package com.cloud.agent.resource.computing;
|
||||
|
||||
public class LibvirtSecretDef {
|
||||
|
||||
public enum usage {
|
||||
VOLUME("volume"), CEPH("ceph");
|
||||
String _usage;
|
||||
|
||||
usage(String usage) {
|
||||
_usage = usage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return _usage;
|
||||
}
|
||||
}
|
||||
|
||||
private usage _usage;
|
||||
private boolean _ephemeral;
|
||||
private boolean _private;
|
||||
private String _uuid;
|
||||
private String _description;
|
||||
private String _cephName;
|
||||
private String _volumeVolume;
|
||||
|
||||
public LibvirtSecretDef (usage usage, String uuid) {
|
||||
_usage = usage;
|
||||
_uuid = uuid;
|
||||
}
|
||||
|
||||
public LibvirtSecretDef (usage usage, String uuid, String description) {
|
||||
_usage = usage;
|
||||
_uuid = uuid;
|
||||
_description = description;
|
||||
}
|
||||
|
||||
public boolean getEphemeral() {
|
||||
return _ephemeral;
|
||||
}
|
||||
|
||||
public boolean getPrivate() {
|
||||
return _private;
|
||||
}
|
||||
|
||||
public String getUuid() {
|
||||
return _uuid;
|
||||
}
|
||||
|
||||
public String getDescription() {
|
||||
return _description;
|
||||
}
|
||||
|
||||
public String getVolumeVolume() {
|
||||
return _volumeVolume;
|
||||
}
|
||||
|
||||
public String getCephName() {
|
||||
return _cephName;
|
||||
}
|
||||
|
||||
public void setVolumeVolume(String volume) {
|
||||
_volumeVolume = volume;
|
||||
}
|
||||
|
||||
public void setCephName(String name) {
|
||||
_cephName = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder secretBuilder = new StringBuilder();
|
||||
secretBuilder.append("<secret ephemeral='" + (_ephemeral ? "yes" : "no") + "' private='" + (_private ? "yes" : "no") + "'>\n");
|
||||
secretBuilder.append("<uuid>" + _uuid + "</uuid>\n");
|
||||
if (_description != null) {
|
||||
secretBuilder.append("<description>" + _description + "</description>\n");
|
||||
}
|
||||
secretBuilder.append("<usage type='" + _usage + "'>\n");
|
||||
if (_usage == _usage.VOLUME) {
|
||||
secretBuilder.append("<volume>" + _volumeVolume + "</volume>\n");
|
||||
}
|
||||
if (_usage == _usage.CEPH) {
|
||||
secretBuilder.append("<name>" + _cephName + "</name>\n");
|
||||
}
|
||||
secretBuilder.append("</usage>\n");
|
||||
secretBuilder.append("</secret>\n");
|
||||
return secretBuilder.toString();
|
||||
}
|
||||
|
||||
}
|
||||
@ -18,7 +18,7 @@ package com.cloud.agent.resource.computing;
|
||||
|
||||
public class LibvirtStoragePoolDef {
|
||||
public enum poolType {
|
||||
ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir");
|
||||
ISCSI("iscsi"), NETFS("netfs"), LOGICAL("logical"), DIR("dir"), RBD("rbd");
|
||||
String _poolType;
|
||||
|
||||
poolType(String poolType) {
|
||||
@ -31,12 +31,41 @@ public class LibvirtStoragePoolDef {
|
||||
}
|
||||
}
|
||||
|
||||
public enum authType {
|
||||
CHAP("chap"), CEPH("ceph");
|
||||
String _authType;
|
||||
|
||||
authType(String authType) {
|
||||
_authType = authType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return _authType;
|
||||
}
|
||||
}
|
||||
|
||||
private poolType _poolType;
|
||||
private String _poolName;
|
||||
private String _uuid;
|
||||
private String _sourceHost;
|
||||
private int _sourcePort;
|
||||
private String _sourceDir;
|
||||
private String _targetPath;
|
||||
private String _authUsername;
|
||||
private authType _authType;
|
||||
private String _secretUuid;
|
||||
|
||||
public LibvirtStoragePoolDef(poolType type, String poolName, String uuid,
|
||||
String host, int port, String dir, String targetPath) {
|
||||
_poolType = type;
|
||||
_poolName = poolName;
|
||||
_uuid = uuid;
|
||||
_sourceHost = host;
|
||||
_sourcePort = port;
|
||||
_sourceDir = dir;
|
||||
_targetPath = targetPath;
|
||||
}
|
||||
|
||||
public LibvirtStoragePoolDef(poolType type, String poolName, String uuid,
|
||||
String host, String dir, String targetPath) {
|
||||
@ -48,6 +77,20 @@ public class LibvirtStoragePoolDef {
|
||||
_targetPath = targetPath;
|
||||
}
|
||||
|
||||
public LibvirtStoragePoolDef(poolType type, String poolName, String uuid,
|
||||
String sourceHost, int sourcePort, String dir, String authUsername,
|
||||
authType authType, String secretUuid) {
|
||||
_poolType = type;
|
||||
_poolName = poolName;
|
||||
_uuid = uuid;
|
||||
_sourceHost = sourceHost;
|
||||
_sourcePort = sourcePort;
|
||||
_sourceDir = dir;
|
||||
_authUsername = authUsername;
|
||||
_authType = authType;
|
||||
_secretUuid = secretUuid;
|
||||
}
|
||||
|
||||
public String getPoolName() {
|
||||
return _poolName;
|
||||
}
|
||||
@ -60,6 +103,10 @@ public class LibvirtStoragePoolDef {
|
||||
return _sourceHost;
|
||||
}
|
||||
|
||||
public int getSourcePort() {
|
||||
return _sourcePort;
|
||||
}
|
||||
|
||||
public String getSourceDir() {
|
||||
return _sourceDir;
|
||||
}
|
||||
@ -68,6 +115,18 @@ public class LibvirtStoragePoolDef {
|
||||
return _targetPath;
|
||||
}
|
||||
|
||||
public String getAuthUserName() {
|
||||
return _authUsername;
|
||||
}
|
||||
|
||||
public String getSecretUUID() {
|
||||
return _secretUuid;
|
||||
}
|
||||
|
||||
public authType getAuthType() {
|
||||
return _authType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder storagePoolBuilder = new StringBuilder();
|
||||
@ -81,9 +140,22 @@ public class LibvirtStoragePoolDef {
|
||||
storagePoolBuilder.append("<dir path='" + _sourceDir + "'/>\n");
|
||||
storagePoolBuilder.append("</source>\n");
|
||||
}
|
||||
storagePoolBuilder.append("<target>\n");
|
||||
storagePoolBuilder.append("<path>" + _targetPath + "</path>\n");
|
||||
storagePoolBuilder.append("</target>\n");
|
||||
if (_poolType == poolType.RBD) {
|
||||
storagePoolBuilder.append("<source>\n");
|
||||
storagePoolBuilder.append("<host name='" + _sourceHost + "' port='" + _sourcePort + "'/>\n");
|
||||
storagePoolBuilder.append("<name>" + _sourceDir + "</name>\n");
|
||||
if (_authUsername != null) {
|
||||
storagePoolBuilder.append("<auth username='" + _authUsername + "' type='" + _authType + "'>\n");
|
||||
storagePoolBuilder.append("<secret uuid='" + _secretUuid + "'/>\n");
|
||||
storagePoolBuilder.append("</auth>\n");
|
||||
}
|
||||
storagePoolBuilder.append("</source>\n");
|
||||
}
|
||||
if (_poolType != poolType.RBD) {
|
||||
storagePoolBuilder.append("<target>\n");
|
||||
storagePoolBuilder.append("<path>" + _targetPath + "</path>\n");
|
||||
storagePoolBuilder.append("</target>\n");
|
||||
}
|
||||
storagePoolBuilder.append("</pool>\n");
|
||||
return storagePoolBuilder.toString();
|
||||
}
|
||||
|
||||
@ -51,15 +51,34 @@ public class LibvirtStoragePoolXMLParser {
|
||||
Element source = (Element) rootElement.getElementsByTagName(
|
||||
"source").item(0);
|
||||
String host = getAttrValue("host", "name", source);
|
||||
String path = getAttrValue("dir", "path", source);
|
||||
|
||||
Element target = (Element) rootElement.getElementsByTagName(
|
||||
"target").item(0);
|
||||
String targetPath = getTagValue("path", target);
|
||||
if (type.equalsIgnoreCase("rbd")) {
|
||||
int port = Integer.parseInt(getAttrValue("host", "port", source));
|
||||
String pool = getTagValue("name", source);
|
||||
|
||||
return new LibvirtStoragePoolDef(
|
||||
LibvirtStoragePoolDef.poolType.valueOf(type.toUpperCase()),
|
||||
poolName, uuid, host, path, targetPath);
|
||||
Element auth = (Element) source.getElementsByTagName(
|
||||
"auth").item(0);
|
||||
|
||||
if (auth != null) {
|
||||
String authUsername = auth.getAttribute("username");
|
||||
String authType = auth.getAttribute("type");
|
||||
return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.poolType.valueOf(type.toUpperCase()),
|
||||
poolName, uuid, host, port, pool, authUsername, LibvirtStoragePoolDef.authType.valueOf(authType.toUpperCase()), uuid);
|
||||
} else {
|
||||
return new LibvirtStoragePoolDef(LibvirtStoragePoolDef.poolType.valueOf(type.toUpperCase()),
|
||||
poolName, uuid, host, port, pool, "");
|
||||
}
|
||||
} else {
|
||||
String path = getAttrValue("dir", "path", source);
|
||||
|
||||
Element target = (Element) rootElement.getElementsByTagName(
|
||||
"target").item(0);
|
||||
String targetPath = getTagValue("path", target);
|
||||
|
||||
return new LibvirtStoragePoolDef(
|
||||
LibvirtStoragePoolDef.poolType.valueOf(type.toUpperCase()),
|
||||
poolName, uuid, host, path, targetPath);
|
||||
}
|
||||
} catch (ParserConfigurationException e) {
|
||||
s_logger.debug(e.toString());
|
||||
} catch (SAXException e) {
|
||||
|
||||
@ -338,7 +338,7 @@ public class LibvirtVMDef {
|
||||
}
|
||||
|
||||
enum diskType {
|
||||
FILE("file"), BLOCK("block"), DIRECTROY("dir");
|
||||
FILE("file"), BLOCK("block"), DIRECTROY("dir"), NETWORK("network");
|
||||
String _diskType;
|
||||
|
||||
diskType(String type) {
|
||||
@ -351,6 +351,20 @@ public class LibvirtVMDef {
|
||||
}
|
||||
}
|
||||
|
||||
enum diskProtocol {
|
||||
RBD("rbd"), SHEEPDOG("sheepdog");
|
||||
String _diskProtocol;
|
||||
|
||||
diskProtocol(String protocol) {
|
||||
_diskProtocol = protocol;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return _diskProtocol;
|
||||
}
|
||||
}
|
||||
|
||||
enum diskBus {
|
||||
IDE("ide"), SCSI("scsi"), VIRTIO("virtio"), XEN("xen"), USB("usb"), UML(
|
||||
"uml"), FDC("fdc");
|
||||
@ -382,7 +396,12 @@ public class LibvirtVMDef {
|
||||
|
||||
private deviceType _deviceType; /* floppy, disk, cdrom */
|
||||
private diskType _diskType;
|
||||
private diskProtocol _diskProtocol;
|
||||
private String _sourcePath;
|
||||
private String _sourceHost;
|
||||
private int _sourcePort;
|
||||
private String _authUserName;
|
||||
private String _authSecretUUID;
|
||||
private String _diskLabel;
|
||||
private diskBus _bus;
|
||||
private diskFmtType _diskFmtType; /* qcow2, raw etc. */
|
||||
@ -461,6 +480,38 @@ public class LibvirtVMDef {
|
||||
_bus = bus;
|
||||
}
|
||||
|
||||
public void defNetworkBasedDisk(String diskName, String sourceHost, int sourcePort,
|
||||
String authUserName, String authSecretUUID,
|
||||
int devId, diskBus bus, diskProtocol protocol) {
|
||||
_diskType = diskType.NETWORK;
|
||||
_deviceType = deviceType.DISK;
|
||||
_diskFmtType = diskFmtType.RAW;
|
||||
_sourcePath = diskName;
|
||||
_sourceHost = sourceHost;
|
||||
_sourcePort = sourcePort;
|
||||
_authUserName = authUserName;
|
||||
_authSecretUUID = authSecretUUID;
|
||||
_diskLabel = getDevLabel(devId, bus);
|
||||
_bus = bus;
|
||||
_diskProtocol = protocol;
|
||||
}
|
||||
|
||||
public void defNetworkBasedDisk(String diskName, String sourceHost, int sourcePort,
|
||||
String authUserName, String authSecretUUID,
|
||||
String diskLabel, diskBus bus, diskProtocol protocol) {
|
||||
_diskType = diskType.NETWORK;
|
||||
_deviceType = deviceType.DISK;
|
||||
_diskFmtType = diskFmtType.RAW;
|
||||
_sourcePath = diskName;
|
||||
_sourceHost = sourceHost;
|
||||
_sourcePort = sourcePort;
|
||||
_authUserName = authUserName;
|
||||
_authSecretUUID = authSecretUUID;
|
||||
_diskLabel = diskLabel;
|
||||
_bus = bus;
|
||||
_diskProtocol = protocol;
|
||||
}
|
||||
|
||||
public void setReadonly() {
|
||||
_readonly = true;
|
||||
}
|
||||
@ -527,6 +578,18 @@ public class LibvirtVMDef {
|
||||
diskBuilder.append(" dev='" + _sourcePath + "'");
|
||||
}
|
||||
diskBuilder.append("/>\n");
|
||||
} else if (_diskType == diskType.NETWORK) {
|
||||
diskBuilder.append("<source ");
|
||||
diskBuilder.append(" protocol='" + _diskProtocol + "'");
|
||||
diskBuilder.append(" name='" + _sourcePath + "'");
|
||||
diskBuilder.append(">\n");
|
||||
diskBuilder.append("<host name='" + _sourceHost + "' port='" + _sourcePort + "'/>\n");
|
||||
diskBuilder.append("</source>\n");
|
||||
if (_authUserName != null) {
|
||||
diskBuilder.append("<auth username='" + _authUserName + "'>\n");
|
||||
diskBuilder.append("<secret type='ceph' uuid='" + _authSecretUUID + "'/>\n");
|
||||
diskBuilder.append("</auth>\n");
|
||||
}
|
||||
}
|
||||
diskBuilder.append("<target dev='" + _diskLabel + "'");
|
||||
if (_bus != null) {
|
||||
@ -898,5 +961,4 @@ public class LibvirtVMDef {
|
||||
|
||||
System.out.println(vm.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -34,6 +34,22 @@ public class KVMPhysicalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
public static String RBDStringBuilder(String monHost, int monPort,
|
||||
String authUserName, String authSecret, String image) {
|
||||
String rbdOpts;
|
||||
|
||||
rbdOpts = "rbd:" + image;
|
||||
rbdOpts += ":mon_host=" + monHost + "\\\\:" + monPort;
|
||||
if (authUserName == null) {
|
||||
rbdOpts += ":auth_supported=none";
|
||||
} else {
|
||||
rbdOpts += ":auth_supported=cephx";
|
||||
rbdOpts += ":id=" + authUserName;
|
||||
rbdOpts += ":key=" + authSecret;
|
||||
}
|
||||
return rbdOpts;
|
||||
}
|
||||
|
||||
private PhysicalDiskFormat format;
|
||||
private long size;
|
||||
private long virtualSize;
|
||||
|
||||
@ -45,6 +45,16 @@ public interface KVMStoragePool {
|
||||
|
||||
public String getLocalPath();
|
||||
|
||||
public String getSourceHost();
|
||||
|
||||
public String getSourceDir();
|
||||
|
||||
public int getSourcePort();
|
||||
|
||||
public String getAuthUserName();
|
||||
|
||||
public String getAuthSecret();
|
||||
|
||||
public StoragePoolType getType();
|
||||
|
||||
public boolean delete();
|
||||
|
||||
@ -52,10 +52,10 @@ public class KVMStoragePoolManager {
|
||||
return this._storageAdaptor.getStoragePoolByUri(uri);
|
||||
}
|
||||
|
||||
public KVMStoragePool createStoragePool(String name, String host,
|
||||
String path, StoragePoolType type) {
|
||||
public KVMStoragePool createStoragePool(String name, String host, int port, String path,
|
||||
String userInfo, StoragePoolType type) {
|
||||
KVMStoragePool pool = this._storageAdaptor.createStoragePool(name,
|
||||
host, path, type);
|
||||
host, port, path, userInfo, type);
|
||||
if (type == StoragePoolType.NetworkFilesystem) {
|
||||
KVMHABase.NfsStoragePool nfspool = new KVMHABase.NfsStoragePool(
|
||||
pool.getUuid(), host, path, pool.getLocalPath(),
|
||||
@ -73,11 +73,16 @@ public class KVMStoragePoolManager {
|
||||
return true;
|
||||
}
|
||||
|
||||
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
|
||||
String name, KVMStoragePool destPool) {
|
||||
return this._storageAdaptor.createDiskFromTemplate(template, name,
|
||||
KVMPhysicalDisk.PhysicalDiskFormat.QCOW2,
|
||||
template.getSize(), destPool);
|
||||
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template, String name,
|
||||
KVMStoragePool destPool) {
|
||||
if (destPool.getType() == StoragePoolType.RBD) {
|
||||
return this._storageAdaptor.createDiskFromTemplate(template, name,
|
||||
KVMPhysicalDisk.PhysicalDiskFormat.RAW, template.getSize(), destPool);
|
||||
} else {
|
||||
return this._storageAdaptor.createDiskFromTemplate(template, name,
|
||||
KVMPhysicalDisk.PhysicalDiskFormat.QCOW2,
|
||||
template.getSize(), destPool);
|
||||
}
|
||||
}
|
||||
|
||||
public KVMPhysicalDisk createTemplateFromDisk(KVMPhysicalDisk disk,
|
||||
|
||||
@ -23,8 +23,10 @@ import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.commons.codec.binary.Base64;
|
||||
import org.libvirt.Connect;
|
||||
import org.libvirt.LibvirtException;
|
||||
import org.libvirt.Secret;
|
||||
import org.libvirt.StoragePool;
|
||||
import org.libvirt.StoragePoolInfo;
|
||||
import org.libvirt.StorageVol;
|
||||
@ -32,10 +34,13 @@ import org.libvirt.StoragePoolInfo.StoragePoolState;
|
||||
|
||||
import com.cloud.agent.api.ManageSnapshotCommand;
|
||||
import com.cloud.agent.resource.computing.LibvirtConnection;
|
||||
import com.cloud.agent.resource.computing.LibvirtSecretDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtSecretDef.usage;
|
||||
import com.cloud.agent.resource.computing.LibvirtStoragePoolDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtStoragePoolXMLParser;
|
||||
import com.cloud.agent.resource.computing.LibvirtStorageVolumeDef;
|
||||
import com.cloud.agent.resource.computing.LibvirtStoragePoolDef.poolType;
|
||||
import com.cloud.agent.resource.computing.LibvirtStoragePoolDef.authType;
|
||||
import com.cloud.agent.resource.computing.LibvirtStorageVolumeDef.volFormat;
|
||||
import com.cloud.agent.resource.computing.LibvirtStorageVolumeXMLParser;
|
||||
import com.cloud.agent.storage.KVMPhysicalDisk.PhysicalDiskFormat;
|
||||
@ -143,7 +148,6 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
synchronized (getStoragePool(uuid)) {
|
||||
sp = conn.storagePoolDefineXML(spd.toString(), 0);
|
||||
|
||||
if (sp == null) {
|
||||
s_logger.debug("Failed to define storage pool");
|
||||
return null;
|
||||
@ -270,6 +274,60 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
}
|
||||
|
||||
private StoragePool createRBDStoragePool(Connect conn, String uuid,
|
||||
String host, int port, String userInfo, String path) {
|
||||
|
||||
LibvirtStoragePoolDef spd;
|
||||
StoragePool sp = null;
|
||||
|
||||
String[] userInfoTemp = userInfo.split(":");
|
||||
if (userInfoTemp.length == 2) {
|
||||
s_logger.debug("libvirt secret information found. id: " + userInfoTemp[0] + " secret: " + userInfoTemp[1]);
|
||||
LibvirtSecretDef sd = new LibvirtSecretDef(usage.CEPH, uuid);
|
||||
|
||||
Secret s = null;
|
||||
|
||||
sd.setCephName(userInfoTemp[0]);
|
||||
|
||||
try {
|
||||
s_logger.debug(sd.toString());
|
||||
s = conn.secretDefineXML(sd.toString());
|
||||
s.setValue(Base64.decodeBase64(userInfoTemp[1]));
|
||||
} catch (LibvirtException e) {
|
||||
s_logger.debug(e.toString());
|
||||
if (s != null) {
|
||||
try {
|
||||
s.undefine();
|
||||
s.free();
|
||||
} catch (LibvirtException l) {
|
||||
s_logger.debug("Failed to define secret with: " + l.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
spd = new LibvirtStoragePoolDef(poolType.RBD, uuid, uuid, host, port, path, userInfoTemp[0], authType.CEPH, uuid);
|
||||
} else {
|
||||
spd = new LibvirtStoragePoolDef(poolType.RBD, uuid, uuid, host, port, path, "");
|
||||
}
|
||||
|
||||
try {
|
||||
s_logger.debug(spd.toString());
|
||||
sp = conn.storagePoolDefineXML(spd.toString(), 0);
|
||||
sp.create(0);
|
||||
return sp;
|
||||
} catch (LibvirtException e) {
|
||||
s_logger.debug(e.toString());
|
||||
if (sp != null) {
|
||||
try {
|
||||
sp.undefine();
|
||||
sp.free();
|
||||
} catch (LibvirtException l) {
|
||||
s_logger.debug("Failed to define RBD storage pool with: " + l.toString());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public StorageVol copyVolume(StoragePool destPool,
|
||||
LibvirtStorageVolumeDef destVol, StorageVol srcVol, int timeout)
|
||||
throws LibvirtException {
|
||||
@ -422,11 +480,36 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
if (spd.getPoolType() == LibvirtStoragePoolDef.poolType.NETFS
|
||||
|| spd.getPoolType() == LibvirtStoragePoolDef.poolType.DIR) {
|
||||
type = StoragePoolType.Filesystem;
|
||||
} else if (spd.getPoolType() == LibvirtStoragePoolDef.poolType.RBD) {
|
||||
type = StoragePoolType.RBD;
|
||||
}
|
||||
LibvirtStoragePool pool = new LibvirtStoragePool(uuid,
|
||||
storage.getName(), type, this, storage);
|
||||
pool.setLocalPath(spd.getTargetPath());
|
||||
getStats(pool);
|
||||
|
||||
LibvirtStoragePool pool = new LibvirtStoragePool(uuid, storage.getName(),
|
||||
type, this, storage);
|
||||
|
||||
if (pool.getType() != StoragePoolType.RBD) {
|
||||
pool.setLocalPath(spd.getTargetPath());
|
||||
} else {
|
||||
pool.setLocalPath("");
|
||||
pool.setSourceHost(spd.getSourceHost());
|
||||
pool.setSourcePort(spd.getSourcePort());
|
||||
pool.setSourceDir(spd.getSourceDir());
|
||||
String authUsername = spd.getAuthUserName();
|
||||
if (authUsername != null) {
|
||||
Secret secret = conn.secretLookupByUUIDString(spd.getSecretUUID());
|
||||
String secretValue = new String(Base64.encodeBase64(secret.getByteValue()));
|
||||
pool.setAuthUsername(authUsername);
|
||||
pool.setAuthSecret(secretValue);
|
||||
}
|
||||
}
|
||||
|
||||
if (pool.getType() == StoragePoolType.RBD) {
|
||||
pool.setCapacity(storage.getInfo().capacity);
|
||||
pool.setUsed(storage.getInfo().allocation);
|
||||
} else {
|
||||
getStats(pool);
|
||||
}
|
||||
|
||||
return pool;
|
||||
} catch (LibvirtException e) {
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
@ -448,6 +531,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
disk.setVirtualSize(vol.getInfo().capacity);
|
||||
if (voldef.getFormat() == null) {
|
||||
disk.setFormat(pool.getDefaultFormat());
|
||||
} else if (pool.getType() == StoragePoolType.RBD) {
|
||||
disk.setFormat(KVMPhysicalDisk.PhysicalDiskFormat.RAW);
|
||||
} else if (voldef.getFormat() == LibvirtStorageVolumeDef.volFormat.QCOW2) {
|
||||
disk.setFormat(KVMPhysicalDisk.PhysicalDiskFormat.QCOW2);
|
||||
} else if (voldef.getFormat() == LibvirtStorageVolumeDef.volFormat.RAW) {
|
||||
@ -461,8 +546,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
}
|
||||
|
||||
@Override
|
||||
public KVMStoragePool createStoragePool(String name, String host,
|
||||
String path, StoragePoolType type) {
|
||||
public KVMStoragePool createStoragePool(String name, String host, int port,
|
||||
String path, String userInfo, StoragePoolType type) {
|
||||
StoragePool sp = null;
|
||||
Connect conn = null;
|
||||
try {
|
||||
@ -487,6 +572,8 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
} else if (type == StoragePoolType.SharedMountPoint
|
||||
|| type == StoragePoolType.Filesystem) {
|
||||
sp = CreateSharedStoragePool(conn, name, host, path);
|
||||
} else if (type == StoragePoolType.RBD) {
|
||||
sp = createRBDStoragePool(conn, name, host, port, userInfo, path);
|
||||
}
|
||||
}
|
||||
|
||||
@ -499,15 +586,23 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
LibvirtStoragePoolDef spd = getStoragePoolDef(conn, sp);
|
||||
LibvirtStoragePool pool = new LibvirtStoragePool(name,
|
||||
sp.getName(), type, this, sp);
|
||||
pool.setLocalPath(spd.getTargetPath());
|
||||
|
||||
getStats(pool);
|
||||
if (pool.getType() != StoragePoolType.RBD) {
|
||||
pool.setLocalPath(spd.getTargetPath());
|
||||
} else {
|
||||
pool.setLocalPath("");
|
||||
}
|
||||
|
||||
if (pool.getType() == StoragePoolType.RBD) {
|
||||
pool.setCapacity(sp.getInfo().capacity);
|
||||
pool.setUsed(sp.getInfo().allocation);
|
||||
} else {
|
||||
getStats(pool);
|
||||
}
|
||||
return pool;
|
||||
} catch (LibvirtException e) {
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -520,6 +615,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
}
|
||||
|
||||
StoragePool sp = null;
|
||||
Secret s = null;
|
||||
|
||||
try {
|
||||
sp = conn.storagePoolLookupByUUIDString(uuid);
|
||||
@ -527,10 +623,23 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some storage pools, like RBD also have 'secret' information stored in libvirt
|
||||
* Destroy them if they exist
|
||||
*/
|
||||
try {
|
||||
s = conn.secretLookupByUUIDString(uuid);
|
||||
} catch (LibvirtException e) {
|
||||
}
|
||||
|
||||
try {
|
||||
sp.destroy();
|
||||
sp.undefine();
|
||||
sp.free();
|
||||
if (s != null) {
|
||||
s.undefine();
|
||||
s.free();
|
||||
}
|
||||
return true;
|
||||
} catch (LibvirtException e) {
|
||||
throw new CloudRuntimeException(e.toString());
|
||||
@ -543,6 +652,11 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
LibvirtStoragePool libvirtPool = (LibvirtStoragePool) pool;
|
||||
StoragePool virtPool = libvirtPool.getPool();
|
||||
LibvirtStorageVolumeDef.volFormat libvirtformat = null;
|
||||
|
||||
if (pool.getType() == StoragePoolType.RBD) {
|
||||
format = PhysicalDiskFormat.RAW;
|
||||
}
|
||||
|
||||
if (format == PhysicalDiskFormat.QCOW2) {
|
||||
libvirtformat = LibvirtStorageVolumeDef.volFormat.QCOW2;
|
||||
} else if (format == PhysicalDiskFormat.RAW) {
|
||||
@ -580,19 +694,58 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
@Override
|
||||
public KVMPhysicalDisk createDiskFromTemplate(KVMPhysicalDisk template,
|
||||
String name, PhysicalDiskFormat format, long size,
|
||||
KVMStoragePool destPool) {
|
||||
KVMPhysicalDisk disk = destPool.createPhysicalDisk(UUID.randomUUID()
|
||||
.toString(), format, template.getVirtualSize());
|
||||
String name, PhysicalDiskFormat format, long size, KVMStoragePool destPool) {
|
||||
|
||||
String newUuid = UUID.randomUUID().toString();
|
||||
KVMStoragePool srcPool = template.getPool();
|
||||
KVMPhysicalDisk disk = null;
|
||||
|
||||
/*
|
||||
With RBD you can't run qemu-img convert with an existing RBD image as destination
|
||||
qemu-img will exit with the error that the destination already exists.
|
||||
So for RBD we don't create the image, but let qemu-img do that for us.
|
||||
|
||||
We then create a KVMPhysicalDisk object that we can return
|
||||
*/
|
||||
|
||||
if (destPool.getType() != StoragePoolType.RBD) {
|
||||
disk = destPool.createPhysicalDisk(newUuid, format, template.getVirtualSize());
|
||||
|
||||
if (format == PhysicalDiskFormat.QCOW2) {
|
||||
Script.runSimpleBashScript("qemu-img create -f "
|
||||
+ template.getFormat() + " -b " + template.getPath() + " "
|
||||
+ disk.getPath());
|
||||
} else if (format == PhysicalDiskFormat.RAW) {
|
||||
Script.runSimpleBashScript("qemu-img convert -f "
|
||||
+ template.getFormat() + " -O raw " + template.getPath()
|
||||
+ " " + disk.getPath());
|
||||
} else {
|
||||
disk = new KVMPhysicalDisk(destPool.getSourceDir() + "/" + newUuid, newUuid, destPool);
|
||||
disk.setFormat(format);
|
||||
disk.setSize(template.getVirtualSize());
|
||||
disk.setVirtualSize(disk.getSize());
|
||||
|
||||
if (srcPool.getType() != StoragePoolType.RBD) {
|
||||
Script.runSimpleBashScript("qemu-img convert"
|
||||
+ " -f " + template.getFormat()
|
||||
+ " -O " + format
|
||||
+ " " + template.getPath()
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(),
|
||||
destPool.getSourcePort(),
|
||||
destPool.getAuthUserName(),
|
||||
destPool.getAuthSecret(),
|
||||
disk.getPath()));
|
||||
} else {
|
||||
template.setFormat(PhysicalDiskFormat.RAW);
|
||||
Script.runSimpleBashScript("qemu-img convert"
|
||||
+ " -f " + template.getFormat()
|
||||
+ " -O " + format
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(),
|
||||
srcPool.getSourcePort(),
|
||||
srcPool.getAuthUserName(),
|
||||
srcPool.getAuthSecret(),
|
||||
template.getPath())
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(),
|
||||
destPool.getSourcePort(),
|
||||
destPool.getAuthUserName(),
|
||||
destPool.getAuthSecret(),
|
||||
disk.getPath()));
|
||||
}
|
||||
}
|
||||
return disk;
|
||||
}
|
||||
@ -625,14 +778,60 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
@Override
|
||||
public KVMPhysicalDisk copyPhysicalDisk(KVMPhysicalDisk disk, String name,
|
||||
KVMStoragePool destPool) {
|
||||
KVMPhysicalDisk newDisk = destPool.createPhysicalDisk(name,
|
||||
disk.getVirtualSize());
|
||||
String sourcePath = disk.getPath();
|
||||
String destPath = newDisk.getPath();
|
||||
|
||||
Script.runSimpleBashScript("qemu-img convert -f " + disk.getFormat()
|
||||
+ " -O " + newDisk.getFormat() + " " + sourcePath + " "
|
||||
+ destPath);
|
||||
/*
|
||||
With RBD you can't run qemu-img convert with an existing RBD image as destination
|
||||
qemu-img will exit with the error that the destination already exists.
|
||||
So for RBD we don't create the image, but let qemu-img do that for us.
|
||||
|
||||
We then create a KVMPhysicalDisk object that we can return
|
||||
*/
|
||||
|
||||
KVMPhysicalDisk newDisk;
|
||||
if (destPool.getType() != StoragePoolType.RBD) {
|
||||
newDisk = destPool.createPhysicalDisk(name, disk.getVirtualSize());
|
||||
} else {
|
||||
newDisk = new KVMPhysicalDisk(destPool.getSourceDir() + "/" + name, name, destPool);
|
||||
newDisk.setFormat(PhysicalDiskFormat.RAW);
|
||||
newDisk.setSize(disk.getVirtualSize());
|
||||
newDisk.setVirtualSize(disk.getSize());
|
||||
}
|
||||
|
||||
KVMStoragePool srcPool = disk.getPool();
|
||||
String destPath = newDisk.getPath();
|
||||
String sourcePath = disk.getPath();
|
||||
PhysicalDiskFormat sourceFormat = disk.getFormat();
|
||||
PhysicalDiskFormat destFormat = newDisk.getFormat();
|
||||
|
||||
if ((srcPool.getType() != StoragePoolType.RBD) && (destPool.getType() != StoragePoolType.RBD)) {
|
||||
Script.runSimpleBashScript("qemu-img convert -f " + sourceFormat
|
||||
+ " -O " + destFormat
|
||||
+ " " + sourcePath
|
||||
+ " " + destPath);
|
||||
} else if ((srcPool.getType() != StoragePoolType.RBD) && (destPool.getType() == StoragePoolType.RBD)) {
|
||||
Script.runSimpleBashScript("qemu-img convert -f " + sourceFormat
|
||||
+ " -O " + destFormat
|
||||
+ " " + sourcePath
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(),
|
||||
destPool.getSourcePort(),
|
||||
destPool.getAuthUserName(),
|
||||
destPool.getAuthSecret(),
|
||||
destPath));
|
||||
} else {
|
||||
Script.runSimpleBashScript("qemu-img convert -f " + sourceFormat
|
||||
+ " -O " + destFormat
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(srcPool.getSourceHost(),
|
||||
srcPool.getSourcePort(),
|
||||
srcPool.getAuthUserName(),
|
||||
srcPool.getAuthSecret(),
|
||||
sourcePath)
|
||||
+ " " + KVMPhysicalDisk.RBDStringBuilder(destPool.getSourceHost(),
|
||||
destPool.getSourcePort(),
|
||||
destPool.getAuthUserName(),
|
||||
destPool.getAuthSecret(),
|
||||
destPath));
|
||||
}
|
||||
|
||||
return newDisk;
|
||||
}
|
||||
|
||||
@ -658,7 +857,7 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
protocal = StoragePoolType.NetworkFilesystem;
|
||||
}
|
||||
|
||||
return createStoragePool(uuid, sourceHost, sourcePath, protocal);
|
||||
return createStoragePool(uuid, sourceHost, 0, sourcePath, "", protocal);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -699,5 +898,4 @@ public class LibvirtStorageAdaptor implements StorageAdaptor {
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -34,6 +34,11 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
||||
protected StoragePoolType type;
|
||||
protected StorageAdaptor _storageAdaptor;
|
||||
protected StoragePool _pool;
|
||||
protected String authUsername;
|
||||
protected String authSecret;
|
||||
protected String sourceHost;
|
||||
protected int sourcePort;
|
||||
protected String sourceDir;
|
||||
|
||||
public LibvirtStoragePool(String uuid, String name, StoragePoolType type,
|
||||
StorageAdaptor adaptor, StoragePool pool) {
|
||||
@ -137,6 +142,51 @@ public class LibvirtStoragePool implements KVMStoragePool {
|
||||
this.localPath = localPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAuthUserName() {
|
||||
return this.authUsername;
|
||||
}
|
||||
|
||||
public void setAuthUsername(String authUsername) {
|
||||
this.authUsername = authUsername;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAuthSecret() {
|
||||
return this.authSecret;
|
||||
}
|
||||
|
||||
public void setAuthSecret(String authSecret) {
|
||||
this.authSecret = authSecret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceHost() {
|
||||
return this.sourceHost;
|
||||
}
|
||||
|
||||
public void setSourceHost(String host) {
|
||||
this.sourceHost = host;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSourcePort() {
|
||||
return this.sourcePort;
|
||||
}
|
||||
|
||||
public void setSourcePort(int port) {
|
||||
this.sourcePort = port;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSourceDir() {
|
||||
return this.sourceDir;
|
||||
}
|
||||
|
||||
public void setSourceDir(String dir) {
|
||||
this.sourceDir = dir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoragePoolType getType() {
|
||||
return this.type;
|
||||
|
||||
@ -30,8 +30,8 @@ public interface StorageAdaptor {
|
||||
public KVMPhysicalDisk getPhysicalDisk(String volumeUuid,
|
||||
KVMStoragePool pool);
|
||||
|
||||
public KVMStoragePool createStoragePool(String name, String host,
|
||||
String path, StoragePoolType type);
|
||||
public KVMStoragePool createStoragePool(String name, String host, int port,
|
||||
String path, String userInfo, StoragePoolType type);
|
||||
|
||||
public boolean deleteStoragePool(String uuid);
|
||||
|
||||
|
||||
@ -17,52 +17,25 @@
|
||||
package com.cloud.agent.api;
|
||||
|
||||
public class RebootAnswer extends Answer {
|
||||
Long bytesSent;
|
||||
Long bytesReceived;
|
||||
Integer vncPort;
|
||||
|
||||
protected RebootAnswer() {
|
||||
}
|
||||
|
||||
public RebootAnswer(RebootCommand cmd, String details, Long bytesSent, Long bytesReceived, Integer vncport) {
|
||||
public RebootAnswer(RebootCommand cmd, String details, Integer vncport) {
|
||||
super(cmd, true, details);
|
||||
this.bytesReceived = bytesReceived;
|
||||
this.bytesSent = bytesSent;
|
||||
this.vncPort = vncport;
|
||||
}
|
||||
|
||||
public RebootAnswer(RebootCommand cmd, String details, Long bytesSent, Long bytesReceived) {
|
||||
super(cmd, true, details);
|
||||
this.bytesReceived = bytesReceived;
|
||||
this.bytesSent = bytesSent;
|
||||
public RebootAnswer(RebootCommand cmd, String details, boolean success) {
|
||||
super(cmd, success, details);
|
||||
this.vncPort = null;
|
||||
}
|
||||
|
||||
public RebootAnswer(RebootCommand cmd, String details) {
|
||||
super(cmd, false, details);
|
||||
bytesSent = null;
|
||||
bytesReceived = null;
|
||||
}
|
||||
|
||||
public RebootAnswer(RebootCommand cmd, Exception e) {
|
||||
super(cmd, e);
|
||||
}
|
||||
|
||||
public void setBytesReceived(Long bytesReceived) {
|
||||
this.bytesReceived = bytesReceived;
|
||||
}
|
||||
|
||||
public Long getBytesReceived() {
|
||||
return bytesReceived;
|
||||
}
|
||||
|
||||
public void setBytesSent(Long bytesSent) {
|
||||
this.bytesSent = bytesSent;
|
||||
}
|
||||
|
||||
public Long getBytesSent() {
|
||||
return bytesSent;
|
||||
}
|
||||
public Integer getVncPort() {
|
||||
return vncPort;
|
||||
}
|
||||
|
||||
@ -22,13 +22,13 @@ public class StopAnswer extends RebootAnswer {
|
||||
protected StopAnswer() {
|
||||
}
|
||||
|
||||
public StopAnswer(StopCommand cmd, String details, Integer vncPort, Long bytesSent, Long bytesReceived) {
|
||||
super(cmd, details, bytesSent, bytesReceived);
|
||||
public StopAnswer(StopCommand cmd, String details, Integer vncPort, boolean success) {
|
||||
super(cmd, details, success);
|
||||
this.vncPort = vncPort;
|
||||
}
|
||||
|
||||
public StopAnswer(StopCommand cmd, String details) {
|
||||
super(cmd, details);
|
||||
public StopAnswer(StopCommand cmd, String details, boolean success) {
|
||||
super(cmd, details, success);
|
||||
vncPort = null;
|
||||
|
||||
}
|
||||
|
||||
@ -23,7 +23,6 @@ public class StopCommand extends RebootCommand {
|
||||
private boolean isProxy=false;
|
||||
private String urlPort=null;
|
||||
private String publicConsoleProxyIpAddress=null;
|
||||
private String privateRouterIpAddress=null;
|
||||
|
||||
protected StopCommand() {
|
||||
}
|
||||
@ -45,12 +44,6 @@ public class StopCommand extends RebootCommand {
|
||||
this.vnet = vnet;
|
||||
}
|
||||
|
||||
public StopCommand(VirtualMachine vm, String vmName, String vnet, String privateRouterIpAddress) {
|
||||
super(vmName);
|
||||
this.vnet = vnet;
|
||||
this.privateRouterIpAddress = privateRouterIpAddress;
|
||||
}
|
||||
|
||||
public StopCommand(String vmName) {
|
||||
super(vmName);
|
||||
}
|
||||
@ -76,7 +69,4 @@ public class StopCommand extends RebootCommand {
|
||||
return this.publicConsoleProxyIpAddress;
|
||||
}
|
||||
|
||||
public String getPrivateRouterIpAddress() {
|
||||
return privateRouterIpAddress;
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,6 +24,7 @@ public class StorageFilerTO {
|
||||
String uuid;
|
||||
String host;
|
||||
String path;
|
||||
String userInfo;
|
||||
int port;
|
||||
StoragePoolType type;
|
||||
|
||||
@ -34,6 +35,7 @@ public class StorageFilerTO {
|
||||
this.path = pool.getPath();
|
||||
this.type = pool.getPoolType();
|
||||
this.uuid = pool.getUuid();
|
||||
this.userInfo = pool.getUserInfo();
|
||||
}
|
||||
|
||||
public long getId() {
|
||||
@ -52,6 +54,10 @@ public class StorageFilerTO {
|
||||
return path;
|
||||
}
|
||||
|
||||
public String getUserInfo() {
|
||||
return userInfo;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
@ -52,7 +52,7 @@ public class CreateFirewallRuleCmd extends BaseAsyncCreateCmd implements Firewal
|
||||
// ///////////////////////////////////////////////////
|
||||
|
||||
@IdentityMapper(entityTableName="user_ip_address")
|
||||
@Parameter(name = ApiConstants.IP_ADDRESS_ID, type = CommandType.LONG, description = "the IP address id of the port forwarding rule")
|
||||
@Parameter(name = ApiConstants.IP_ADDRESS_ID, type = CommandType.LONG, required=true, description = "the IP address id of the port forwarding rule")
|
||||
private Long ipAddressId;
|
||||
|
||||
@Parameter(name = ApiConstants.PROTOCOL, type = CommandType.STRING, required = true, description = "the protocol for the firewall rule. Valid values are TCP/UDP/ICMP.")
|
||||
|
||||
@ -96,6 +96,7 @@ public class Storage {
|
||||
Iscsi(true), // for e.g., ZFS Comstar
|
||||
ISO(false), // for iso image
|
||||
LVM(false), // XenServer local LVM SR
|
||||
RBD(true),
|
||||
SharedMountPoint(true),
|
||||
VMFS(true), // VMware VMFS storage
|
||||
PreSetup(true), // for XenServer, Storage Pool is set up by customers.
|
||||
|
||||
@ -85,6 +85,11 @@ public interface StoragePool {
|
||||
*/
|
||||
String getPath();
|
||||
|
||||
/**
|
||||
* @return the user information / credentials for the storage host
|
||||
*/
|
||||
String getUserInfo();
|
||||
|
||||
/**
|
||||
* @return the storage pool represents a shared storage resource
|
||||
*/
|
||||
|
||||
@ -169,7 +169,7 @@
|
||||
<pluggableservice name="F5ExternalLoadBalancerElementService" key="com.cloud.network.element.F5ExternalLoadBalancerElementService" class="com.cloud.network.element.F5ExternalLoadBalancerElement"/>
|
||||
<pluggableservice name="JuniperSRXFirewallElementService" key="com.cloud.network.element.JuniperSRXFirewallElementService" class="com.cloud.network.element.JuniperSRXExternalFirewallElement"/>
|
||||
<pluggableservice name="CiscoNexusVSMElementService" key="com.cloud.network.element.CiscoNexusVSMElementService" class="com.cloud.network.element.CiscoNexusVSMElement"/>
|
||||
<pluggableservice name="NiciraNvpElementService" key="com.coud.network.element.NiciraNvpElementService" class="com.cloud.network.element.NiciraNvpElement"/>
|
||||
<pluggableservice name="NiciraNvpElementService" key="com.cloud.network.element.NiciraNvpElementService" class="com.cloud.network.element.NiciraNvpElement"/>
|
||||
<dao name="NetScalerPodDao" class="com.cloud.network.dao.NetScalerPodDaoImpl" singleton="false"/>
|
||||
<dao name="CiscoNexusVSMDeviceDao" class="com.cloud.network.dao.CiscoNexusVSMDeviceDaoImpl" singleton="false"/>
|
||||
<dao name="OvsTunnelInterfaceDao" class="com.cloud.network.ovs.dao.OvsTunnelInterfaceDaoImpl" singleton="false"/>
|
||||
|
||||
@ -1,19 +1,3 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
/*
|
||||
* The contents of this file are subject to the "END USER LICENSE AGREEMENT FOR F5
|
||||
* Software Development Kit for iControl"; you may not use this file except in
|
||||
|
||||
@ -157,6 +157,9 @@ public class StoragePoolVO implements StoragePool, Identity {
|
||||
@Column(name="port")
|
||||
private int port;
|
||||
|
||||
@Column(name="user_info")
|
||||
private String userInfo;
|
||||
|
||||
@Column(name="cluster_id")
|
||||
private Long clusterId;
|
||||
|
||||
@ -180,6 +183,11 @@ public class StoragePoolVO implements StoragePool, Identity {
|
||||
return path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUserInfo() {
|
||||
return userInfo;
|
||||
}
|
||||
|
||||
public StoragePoolVO(long poolId, String name, String uuid, StoragePoolType type,
|
||||
long dataCenterId, Long podId, long availableBytes, long capacityBytes, String hostAddress, int port, String hostPath) {
|
||||
this.name = name;
|
||||
@ -209,6 +217,16 @@ public class StoragePoolVO implements StoragePool, Identity {
|
||||
this.uuid = UUID.randomUUID().toString();
|
||||
}
|
||||
|
||||
public StoragePoolVO(StoragePoolType type, String hostAddress, int port, String path, String userInfo) {
|
||||
this.poolType = type;
|
||||
this.hostAddress = hostAddress;
|
||||
this.port = port;
|
||||
this.path = path;
|
||||
this.userInfo = userInfo;
|
||||
this.setStatus(StoragePoolStatus.Up);
|
||||
this.uuid = UUID.randomUUID().toString();
|
||||
}
|
||||
|
||||
public void setStatus(StoragePoolStatus status)
|
||||
{
|
||||
this.status = status;
|
||||
@ -234,6 +252,10 @@ public class StoragePoolVO implements StoragePool, Identity {
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public void setUserInfo(String userInfo) {
|
||||
this.userInfo = userInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPort() {
|
||||
return port;
|
||||
|
||||
6
debian/changelog
vendored
6
debian/changelog
vendored
@ -1,3 +1,9 @@
|
||||
cloud (3.0.2) unstable; urgency=low
|
||||
|
||||
* Bumping the package version to the latest release.
|
||||
|
||||
-- Wido den Hollander <wido@widodh.nl> Wed, 25 Jul 2012 14:53:31 +0200
|
||||
|
||||
cloud (2.2.2) unstable; urgency=low
|
||||
|
||||
* Bumping version number for next CloudStack release
|
||||
|
||||
1
debian/cloud-agent.install
vendored
1
debian/cloud-agent.install
vendored
@ -3,6 +3,5 @@
|
||||
/etc/cloud/agent/environment.properties
|
||||
/etc/cloud/agent/log4j-cloud.xml
|
||||
/etc/init.d/cloud-agent
|
||||
/usr/bin/agent-runner
|
||||
/usr/bin/cloud-setup-agent
|
||||
/var/log/cloud/agent
|
||||
|
||||
1
debian/cloud-usage.install
vendored
1
debian/cloud-usage.install
vendored
@ -1,6 +1,5 @@
|
||||
/usr/share/java/cloud-usage.jar
|
||||
/etc/init.d/cloud-usage
|
||||
/usr/bin/usage-runner
|
||||
/var/log/cloud/usage
|
||||
/etc/cloud/usage/usage-components.xml
|
||||
/etc/cloud/usage/log4j-cloud_usage.xml
|
||||
|
||||
28
debian/control
vendored
28
debian/control
vendored
@ -11,11 +11,11 @@ Provides: vmops-deps
|
||||
Conflicts: vmops-deps
|
||||
Replaces: vmops-deps
|
||||
Architecture: any
|
||||
Depends: openjdk-6-jre, cloud-agent-deps
|
||||
Depends: openjdk-6-jre
|
||||
Description: CloudStack library dependencies
|
||||
This package contains a number of third-party dependencies
|
||||
not shipped by distributions, required to run the CloudStack
|
||||
Cloud Stack.
|
||||
Management Server.
|
||||
|
||||
Package: cloud-agent-deps
|
||||
Provides: cloud-agent-deps
|
||||
@ -26,8 +26,7 @@ Depends: openjdk-6-jre
|
||||
Description: CloudStack agent library dependencies
|
||||
This package contains a number of third-party dependencies
|
||||
not shipped by distributions, required to run the CloudStack
|
||||
Cloud Stack.
|
||||
|
||||
Agent.
|
||||
|
||||
Package: cloud-utils
|
||||
Provides: vmops-utils
|
||||
@ -37,7 +36,7 @@ Architecture: any
|
||||
Depends: openjdk-6-jre, python
|
||||
Description: CloudStack utility library
|
||||
The CloudStack utility libraries provide a set of Java classes used
|
||||
in the CloudStack Cloud Stack.
|
||||
in the CloudStack environment.
|
||||
|
||||
Package: cloud-client-ui
|
||||
Provides: vmops-client-ui
|
||||
@ -59,7 +58,7 @@ Architecture: any
|
||||
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), libservlet2.5-java
|
||||
Description: CloudStack server library
|
||||
The CloudStack server libraries provide a set of Java classes used
|
||||
in the CloudStack Cloud Stack.
|
||||
in the CloudStack management server.
|
||||
|
||||
Package: cloud-agent-scripts
|
||||
Provides: vmops-agent-scripts, vmops-console, cloud-console, vmops-console-proxy
|
||||
@ -68,11 +67,8 @@ Replaces: vmops-agent-scripts, vmops-console, cloud-console, vmops-console-proxy
|
||||
Architecture: any
|
||||
Depends: openjdk-6-jre, python, bash, bzip2, gzip, unzip, nfs-common, openssh-client
|
||||
Description: CloudStack agent scripts
|
||||
The CloudStack agent is in charge of managing shared computing resources in
|
||||
a CloudStack Cloud Stack-powered cloud. Install this package if this computer
|
||||
will participate in your cloud -- this is a requirement for the CloudStack
|
||||
agent.
|
||||
|
||||
This package contains a number of scripts needed for the CloudStack Agent on KVM
|
||||
HyperVisor hosts. The CloudStack Agent depends on this package.
|
||||
|
||||
Package: cloud-core
|
||||
Provides: vmops-core
|
||||
@ -90,7 +86,7 @@ Provides: vmops-client
|
||||
Conflicts: vmops-client
|
||||
Replaces: vmops-client
|
||||
Architecture: any
|
||||
Depends: openjdk-6-jre, cloud-deps (= ${source:Version}), cloud-utils (= ${source:Version}), cloud-server (= ${source:Version}), cloud-client-ui (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-agent-scripts (= ${source:Version}), cloud-python (= ${source:Version}), tomcat6, libws-commons-util-java, libcommons-dbcp-java, libcommons-collections-java, libcommons-httpclient-java, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools, genisoimage, cloud-system-iso
|
||||
Depends: openjdk-6-jre, cloud-deps (= ${source:Version}), cloud-utils (= ${source:Version}), cloud-server (= ${source:Version}), cloud-client-ui (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-python (= ${source:Version}), tomcat6, libws-commons-util-java, libcommons-dbcp-java, libcommons-collections-java, libcommons-httpclient-java, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools, genisoimage, cloud-system-iso
|
||||
Description: CloudStack client
|
||||
The CloudStack management server is the central point of coordination,
|
||||
management, and intelligence in the CloudStack Cloud Stack. This package
|
||||
@ -123,17 +119,17 @@ Provides: vmops-agent
|
||||
Conflicts: vmops-agent
|
||||
Replaces: vmops-agent
|
||||
Architecture: any
|
||||
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), python, cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-agent-scripts (= ${source:Version}), libcommons-httpclient-java, libcommons-collections-java, libcommons-dbcp-java, libcommons-pool-java, libcommons-logging-java, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, libcglib-java, libcommons-httpclient-java, libservlet2.5-java, liblog4j1.2-java, libjna-java, wget, jsvc
|
||||
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), python, cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-agent-scripts (= ${source:Version}), libcommons-httpclient-java, libcommons-collections-java, libcommons-dbcp-java, libcommons-pool-java, libcommons-logging-java, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, libcglib-java, libcommons-httpclient-java, libservlet2.5-java, liblog4j1.2-java, libjna-java, wget, jsvc, lsb-base (>= 3.2)
|
||||
Description: CloudStack agent
|
||||
The CloudStack agent is in charge of managing shared computing resources in
|
||||
a CloudStack Cloud Stack-powered cloud. Install this package if this computer
|
||||
will participate in your cloud.
|
||||
a CloudStack powered cloud. Install this package if this computer
|
||||
will participate in your cloud as a KVM HyperVisor.
|
||||
|
||||
Package: cloud-system-iso
|
||||
Architecture: any
|
||||
Description: CloudStack system iso
|
||||
The CloudStack agent is in charge of managing shared computing resources in
|
||||
a CloudStack Cloud Stack-powered cloud. Install this package if this computer
|
||||
a CloudStack powered cloud. Install this package if this computer
|
||||
will participate in your cloud.
|
||||
|
||||
Package: cloud-usage
|
||||
|
||||
@ -105,7 +105,7 @@
|
||||
|
||||
|
||||
<target name="init" description="Initialize binaries directory">
|
||||
<mkdir dir="${classes.dir}/${user-concentrated-pod.jar}"/>
|
||||
<mkdir dir="${classes.dir}/${dp-user-concentrated-pod.jar}"/>
|
||||
<mkdir dir="${jar.dir}"/>
|
||||
</target>
|
||||
|
||||
|
||||
@ -716,7 +716,7 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
|
||||
vm = OvmVm.getDetails(_conn, vmName);
|
||||
} catch (XmlRpcException e) {
|
||||
s_logger.debug("Unable to get details of vm: " + vmName + ", treating it as stopped", e);
|
||||
return new StopAnswer(cmd, "success", 0, 0L, 0L);
|
||||
return new StopAnswer(cmd, "success", 0, true);
|
||||
}
|
||||
|
||||
deleteAllNetworkRulesForVm(vmName);
|
||||
@ -724,10 +724,10 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
|
||||
cleanup(vm);
|
||||
|
||||
state = State.Stopped;
|
||||
return new StopAnswer(cmd, "success", 0, 0L, 0L);
|
||||
return new StopAnswer(cmd, "success", 0, true);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Stop " + vmName + "failed", e);
|
||||
return new StopAnswer(cmd, e.getMessage());
|
||||
return new StopAnswer(cmd, e.getMessage(), false);
|
||||
} finally {
|
||||
synchronized(_vms) {
|
||||
if (state != null) {
|
||||
@ -749,10 +749,10 @@ public class OvmResourceBase implements ServerResource, HypervisorResource {
|
||||
try {
|
||||
Map<String, String> res = OvmVm.reboot(_conn, vmName);
|
||||
Integer vncPort = Integer.parseInt(res.get("vncPort"));
|
||||
return new RebootAnswer(cmd, null, null, null, vncPort);
|
||||
return new RebootAnswer(cmd, null, vncPort);
|
||||
} catch (Exception e) {
|
||||
s_logger.debug("Reboot " + vmName + " failed", e);
|
||||
return new RebootAnswer(cmd, e.getMessage());
|
||||
return new RebootAnswer(cmd, e.getMessage(), false);
|
||||
} finally {
|
||||
synchronized(_vms) {
|
||||
_vms.put(cmd.getVmName(), State.Running);
|
||||
|
||||
@ -2027,16 +2027,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
vmMo.setCustomFieldValue(CustomFieldConstants.CLOUD_NIC_MASK, "0");
|
||||
|
||||
if (getVmState(vmMo) != State.Stopped) {
|
||||
Long bytesSent = 0L;
|
||||
Long bytesRcvd = 0L;
|
||||
|
||||
if (VirtualMachineName.isValidRouterName(cmd.getVmName())) {
|
||||
if (cmd.getPrivateRouterIpAddress() != null) {
|
||||
long[] stats = getNetworkStats(cmd.getPrivateRouterIpAddress());
|
||||
bytesSent = stats[0];
|
||||
bytesRcvd = stats[1];
|
||||
}
|
||||
}
|
||||
|
||||
// before we stop VM, remove all possible snapshots on the VM to let
|
||||
// disk chain be collapsed
|
||||
@ -2044,11 +2034,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
vmMo.removeAllSnapshots();
|
||||
if (vmMo.safePowerOff(_shutdown_waitMs)) {
|
||||
state = State.Stopped;
|
||||
return new StopAnswer(cmd, "Stop VM " + cmd.getVmName() + " Succeed", 0, bytesSent, bytesRcvd);
|
||||
return new StopAnswer(cmd, "Stop VM " + cmd.getVmName() + " Succeed", 0, true);
|
||||
} else {
|
||||
String msg = "Have problem in powering off VM " + cmd.getVmName() + ", let the process continue";
|
||||
s_logger.warn(msg);
|
||||
return new StopAnswer(cmd, msg, 0, 0L, 0L);
|
||||
return new StopAnswer(cmd, msg, 0, true);
|
||||
}
|
||||
} else {
|
||||
state = State.Stopped;
|
||||
@ -2056,7 +2046,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
String msg = "VM " + cmd.getVmName() + " is already in stopped state";
|
||||
s_logger.info(msg);
|
||||
return new StopAnswer(cmd, msg, 0, 0L, 0L);
|
||||
return new StopAnswer(cmd, msg, 0, true);
|
||||
} finally {
|
||||
synchronized (_vms) {
|
||||
_vms.put(cmd.getVmName(), state);
|
||||
@ -2069,7 +2059,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
String msg = "VM " + cmd.getVmName() + " is no longer in vSphere";
|
||||
s_logger.info(msg);
|
||||
return new StopAnswer(cmd, msg, 0, 0L, 0L);
|
||||
return new StopAnswer(cmd, msg, 0, true);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (e instanceof RemoteException) {
|
||||
@ -2079,7 +2069,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
String msg = "StopCommand failed due to " + VmwareHelper.getExceptionMessage(e);
|
||||
s_logger.error(msg);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2088,17 +2078,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
s_logger.info("Executing resource RebootRouterCommand: " + _gson.toJson(cmd));
|
||||
}
|
||||
|
||||
Long bytesSent = 0L;
|
||||
Long bytesRcvd = 0L;
|
||||
if (VirtualMachineName.isValidRouterName(cmd.getVmName())) {
|
||||
long[] stats = getNetworkStats(cmd.getPrivateIpAddress());
|
||||
bytesSent = stats[0];
|
||||
bytesRcvd = stats[1];
|
||||
}
|
||||
|
||||
RebootAnswer answer = (RebootAnswer) execute((RebootCommand) cmd);
|
||||
answer.setBytesSent(bytesSent);
|
||||
answer.setBytesReceived(bytesRcvd);
|
||||
|
||||
if (answer.getResult()) {
|
||||
String connectResult = connect(cmd.getVmName(), cmd.getPrivateIpAddress());
|
||||
@ -2124,7 +2104,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
if (vmMo != null) {
|
||||
try {
|
||||
vmMo.rebootGuest();
|
||||
return new RebootAnswer(cmd, "reboot succeeded", null, null);
|
||||
return new RebootAnswer(cmd, "reboot succeeded", true);
|
||||
} catch(ToolsUnavailable e) {
|
||||
s_logger.warn("VMware tools is not installed at guest OS, we will perform hard reset for reboot");
|
||||
} catch(Exception e) {
|
||||
@ -2133,16 +2113,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
// continue to try with hard-reset
|
||||
if (vmMo.reset()) {
|
||||
return new RebootAnswer(cmd, "reboot succeeded", null, null);
|
||||
return new RebootAnswer(cmd, "reboot succeeded", true);
|
||||
}
|
||||
|
||||
String msg = "Reboot failed in vSphere. vm: " + cmd.getVmName();
|
||||
s_logger.warn(msg);
|
||||
return new RebootAnswer(cmd, msg);
|
||||
return new RebootAnswer(cmd, msg, false);
|
||||
} else {
|
||||
String msg = "Unable to find the VM in vSphere to reboot. vm: " + cmd.getVmName();
|
||||
s_logger.warn(msg);
|
||||
return new RebootAnswer(cmd, msg);
|
||||
return new RebootAnswer(cmd, msg, false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (e instanceof RemoteException) {
|
||||
@ -2152,7 +2132,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
|
||||
String msg = "RebootCommand failed due to " + VmwareHelper.getExceptionMessage(e);
|
||||
s_logger.error(msg);
|
||||
return new RebootAnswer(cmd, msg);
|
||||
return new RebootAnswer(cmd, msg, false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2786,8 +2766,18 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
|
||||
if (s_logger.isInfoEnabled()) {
|
||||
s_logger.info("Executing resource PingTestCommand: " + _gson.toJson(cmd));
|
||||
}
|
||||
|
||||
String controlIp = cmd.getRouterIp();
|
||||
String args = " -c 1 -n -q " + cmd.getPrivateIp();
|
||||
try {
|
||||
VmwareManager mgr = getServiceContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
|
||||
Pair<Boolean, String> result = SshHelper.sshExecute(controlIp, DEFAULT_DOMR_SSHPORT, "root", mgr.getSystemVMKeyFile(), null, "/bin/ping" + args);
|
||||
if(result.first())
|
||||
return new Answer(cmd);
|
||||
} catch (Exception e) {
|
||||
s_logger.error("Unable to execute ping command on DomR (" + controlIp + "), domR may not be ready yet. failure due to "
|
||||
+ VmwareHelper.getExceptionMessage(e), e);
|
||||
}
|
||||
return new Answer(cmd,false,"PingTestCommand failed");
|
||||
}
|
||||
|
||||
protected Answer execute(CheckOnHostCommand cmd) {
|
||||
|
||||
@ -216,7 +216,6 @@ import com.cloud.utils.net.NetUtils;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachine.State;
|
||||
import com.cloud.vm.VirtualMachineName;
|
||||
import com.trilead.ssh2.SCPClient;
|
||||
import com.xensource.xenapi.Bond;
|
||||
import com.xensource.xenapi.Connection;
|
||||
@ -2855,10 +2854,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
vms = VM.getByNameLabel(conn, cmd.getVmName());
|
||||
} catch (XenAPIException e0) {
|
||||
s_logger.debug("getByNameLabel failed " + e0.toString());
|
||||
return new RebootAnswer(cmd, "getByNameLabel failed " + e0.toString());
|
||||
return new RebootAnswer(cmd, "getByNameLabel failed " + e0.toString(), false);
|
||||
} catch (Exception e0) {
|
||||
s_logger.debug("getByNameLabel failed " + e0.getMessage());
|
||||
return new RebootAnswer(cmd, "getByNameLabel failed");
|
||||
return new RebootAnswer(cmd, "getByNameLabel failed", false);
|
||||
}
|
||||
for (VM vm : vms) {
|
||||
try {
|
||||
@ -2866,10 +2865,10 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} catch (Exception e) {
|
||||
String msg = e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
return new RebootAnswer(cmd, msg);
|
||||
return new RebootAnswer(cmd, msg, false);
|
||||
}
|
||||
}
|
||||
return new RebootAnswer(cmd, "reboot succeeded", null, null);
|
||||
return new RebootAnswer(cmd, "reboot succeeded", true);
|
||||
} finally {
|
||||
synchronized (_cluster.intern()) {
|
||||
s_vms.put(_cluster, _name, cmd.getVmName(), State.Running);
|
||||
@ -2880,16 +2879,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
|
||||
protected Answer execute(RebootRouterCommand cmd) {
|
||||
Connection conn = getConnection();
|
||||
Long bytesSent = 0L;
|
||||
Long bytesRcvd = 0L;
|
||||
if (VirtualMachineName.isValidRouterName(cmd.getVmName())) {
|
||||
long[] stats = getNetworkStats(conn, cmd.getPrivateIpAddress());
|
||||
bytesSent = stats[0];
|
||||
bytesRcvd = stats[1];
|
||||
}
|
||||
RebootAnswer answer = execute((RebootCommand) cmd);
|
||||
answer.setBytesSent(bytesSent);
|
||||
answer.setBytesReceived(bytesRcvd);
|
||||
if (answer.getResult()) {
|
||||
String cnct = connect(conn, cmd.getVmName(), cmd.getPrivateIpAddress());
|
||||
networkUsage(conn, cmd.getPrivateIpAddress(), "create", null);
|
||||
@ -3352,23 +3342,21 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
s_logger.info("VM does not exist on XenServer" + _host.uuid);
|
||||
s_vms.remove(_cluster, _name, vmName);
|
||||
}
|
||||
return new StopAnswer(cmd, "VM does not exist", 0 , 0L, 0L);
|
||||
return new StopAnswer(cmd, "VM does not exist", 0 , true);
|
||||
}
|
||||
Long bytesSent = 0L;
|
||||
Long bytesRcvd = 0L;
|
||||
for (VM vm : vms) {
|
||||
VM.Record vmr = vm.getRecord(conn);
|
||||
|
||||
if (vmr.isControlDomain) {
|
||||
String msg = "Tring to Shutdown control domain";
|
||||
s_logger.warn(msg);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
}
|
||||
|
||||
if (vmr.powerState == VmPowerState.RUNNING && !isRefNull(vmr.residentOn) && !vmr.residentOn.getUuid(conn).equals(_host.uuid)) {
|
||||
String msg = "Stop Vm " + vmName + " failed due to this vm is not running on this host: " + _host.uuid + " but host:" + vmr.residentOn.getUuid(conn);
|
||||
s_logger.warn(msg);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
}
|
||||
|
||||
State state = s_vms.getState(_cluster, vmName);
|
||||
@ -3382,13 +3370,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
if (vmr.powerState == VmPowerState.RUNNING) {
|
||||
/* when stop a vm, set affinity to current xenserver */
|
||||
vm.setAffinity(conn, vm.getResidentOn(conn));
|
||||
if (VirtualMachineName.isValidRouterName(vmName)) {
|
||||
if (cmd.getPrivateRouterIpAddress() != null) {
|
||||
long[] stats = getNetworkStats(conn, cmd.getPrivateRouterIpAddress());
|
||||
bytesSent = stats[0];
|
||||
bytesRcvd = stats[1];
|
||||
}
|
||||
}
|
||||
|
||||
if (_canBridgeFirewall) {
|
||||
String result = callHostPlugin(conn, "vmops", "destroy_network_rules_for_vm", "vmName", cmd
|
||||
.getVmName());
|
||||
@ -3403,7 +3385,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} catch (Exception e) {
|
||||
String msg = "Catch exception " + e.getClass().getName() + " when stop VM:" + cmd.getVmName() + " due to " + e.toString();
|
||||
s_logger.debug(msg);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
} finally {
|
||||
|
||||
try {
|
||||
@ -3428,7 +3410,7 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
disableVlanNetwork(conn, network);
|
||||
}
|
||||
}
|
||||
return new StopAnswer(cmd, "Stop VM " + vmName + " Succeed", 0, bytesSent, bytesRcvd);
|
||||
return new StopAnswer(cmd, "Stop VM " + vmName + " Succeed", 0, true);
|
||||
}
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "VM destroy failed in Stop " + vmName + " Command due to " + e.toString();
|
||||
@ -3448,16 +3430,16 @@ public abstract class CitrixResourceBase implements ServerResource, HypervisorRe
|
||||
} catch (XenAPIException e) {
|
||||
String msg = "Stop Vm " + vmName + " fail due to " + e.toString();
|
||||
s_logger.warn(msg, e);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
} catch (XmlRpcException e) {
|
||||
String msg = "Stop Vm " + vmName + " fail due to " + e.getMessage();
|
||||
s_logger.warn(msg, e);
|
||||
return new StopAnswer(cmd, msg);
|
||||
return new StopAnswer(cmd, msg, false);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Unable to stop " + vmName + " due to ", e);
|
||||
return new StopAnswer(cmd, e);
|
||||
}
|
||||
return new StopAnswer(cmd, "Stop VM failed");
|
||||
return new StopAnswer(cmd, "Stop VM failed", false);
|
||||
}
|
||||
|
||||
private List<VDI> getVdis(Connection conn, VM vm) {
|
||||
|
||||
@ -1032,4 +1032,9 @@ public class ElasticLoadBalancerManagerImpl implements
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<DomainRouterVO> profile) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -16,3 +16,8 @@ announce that they have accepted.
|
||||
Being a committer enables easier contribution to the
|
||||
project since there is no need to go via the patch
|
||||
submission process. This should enable better productivity.
|
||||
|
||||
Please join me in congratulating #########
|
||||
|
||||
--#####Name####
|
||||
on behalf of the CloudStack PPMC
|
||||
|
||||
@ -133,14 +133,14 @@ write_hbLog() {
|
||||
}
|
||||
|
||||
check_hbLog() {
|
||||
oldTimeStamp=$(cat $hbFile)
|
||||
sleep $interval &> /dev/null
|
||||
newTimeStamp=$(cat $hbFile)
|
||||
if [ $newTimeStamp -gt $oldTimeStamp ]
|
||||
now=$(date +%s)
|
||||
hb=$(cat $hbFile)
|
||||
diff=`expr $now - $hb`
|
||||
if [ $diff -gt $interval ]
|
||||
then
|
||||
return 0
|
||||
return 1
|
||||
fi
|
||||
return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
if [ "$rflag" == "1" ]
|
||||
|
||||
@ -428,10 +428,10 @@ public class BareMetalResourceBase implements ServerResource {
|
||||
|
||||
protected RebootAnswer execute(final RebootCommand cmd) {
|
||||
if (!doScript(_rebootCommand)) {
|
||||
return new RebootAnswer(cmd, "IPMI reboot failed");
|
||||
return new RebootAnswer(cmd, "IPMI reboot failed", false);
|
||||
}
|
||||
|
||||
return new RebootAnswer(cmd, "reboot succeeded", null, null);
|
||||
return new RebootAnswer(cmd, "reboot succeeded", true);
|
||||
}
|
||||
|
||||
protected StopAnswer execute(final StopCommand cmd) {
|
||||
@ -466,7 +466,7 @@ public class BareMetalResourceBase implements ServerResource {
|
||||
count++;
|
||||
}
|
||||
|
||||
return success ? new StopAnswer(cmd, "Success", null, Long.valueOf(0), Long.valueOf(0)) : new StopAnswer(cmd, "IPMI power off failed");
|
||||
return success ? new StopAnswer(cmd, "Success", null, true) : new StopAnswer(cmd, "IPMI power off failed", false);
|
||||
}
|
||||
|
||||
protected StartAnswer execute(StartCommand cmd) {
|
||||
|
||||
@ -375,4 +375,8 @@ public class AgentBasedConsoleProxyManager implements ConsoleProxyManager, Virtu
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<ConsoleProxyVO> profile) {
|
||||
}
|
||||
}
|
||||
|
||||
@ -2032,4 +2032,8 @@ public class ConsoleProxyManagerImpl implements ConsoleProxyManager, ConsoleProx
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<ConsoleProxyVO> profile) {
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,10 +17,12 @@
|
||||
package com.cloud.deploy;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import javax.ejb.Local;
|
||||
import javax.naming.ConfigurationException;
|
||||
@ -55,6 +57,7 @@ import com.cloud.org.Cluster;
|
||||
import com.cloud.org.Grouping;
|
||||
import com.cloud.resource.ResourceState;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolHostVO;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
@ -98,6 +101,7 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
||||
@Inject protected StoragePoolDao _storagePoolDao;
|
||||
@Inject protected CapacityDao _capacityDao;
|
||||
@Inject protected AccountManager _accountMgr;
|
||||
@Inject protected StorageManager _storageMgr;
|
||||
|
||||
@Inject(adapter=StoragePoolAllocator.class)
|
||||
protected Adapters<StoragePoolAllocator> _storagePoolAllocators;
|
||||
@ -638,25 +642,56 @@ public class FirstFitPlanner extends PlannerBase implements DeploymentPlanner {
|
||||
s_logger.debug("Trying to find a potenial host and associated storage pools from the suitable host/pool lists for this VM");
|
||||
|
||||
boolean hostCanAccessPool = false;
|
||||
boolean haveEnoughSpace = false;
|
||||
Map<Volume, StoragePool> storage = new HashMap<Volume, StoragePool>();
|
||||
TreeSet<Volume> volumesOrderBySizeDesc = new TreeSet<Volume>(new Comparator<Volume>() {
|
||||
@Override
|
||||
public int compare(Volume v1, Volume v2) {
|
||||
if(v1.getSize() < v2.getSize())
|
||||
return 1;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
});
|
||||
volumesOrderBySizeDesc.addAll(suitableVolumeStoragePools.keySet());
|
||||
boolean multipleVolume = volumesOrderBySizeDesc.size() > 1;
|
||||
for(Host potentialHost : suitableHosts){
|
||||
for(Volume vol : suitableVolumeStoragePools.keySet()){
|
||||
Map<StoragePool,List<Volume>> volumeAllocationMap = new HashMap<StoragePool,List<Volume>>();
|
||||
for(Volume vol : volumesOrderBySizeDesc){
|
||||
haveEnoughSpace = false;
|
||||
s_logger.debug("Checking if host: "+potentialHost.getId() +" can access any suitable storage pool for volume: "+ vol.getVolumeType());
|
||||
List<StoragePool> volumePoolList = suitableVolumeStoragePools.get(vol);
|
||||
hostCanAccessPool = false;
|
||||
for(StoragePool potentialSPool : volumePoolList){
|
||||
if(hostCanAccessSPool(potentialHost, potentialSPool)){
|
||||
storage.put(vol, potentialSPool);
|
||||
hostCanAccessPool = true;
|
||||
if(multipleVolume){
|
||||
List<Volume> requestVolumes = null;
|
||||
if(volumeAllocationMap.containsKey(potentialSPool))
|
||||
requestVolumes = volumeAllocationMap.get(potentialSPool);
|
||||
else
|
||||
requestVolumes = new ArrayList<Volume>();
|
||||
requestVolumes.add(vol);
|
||||
|
||||
if(!_storageMgr.storagePoolHasEnoughSpace(requestVolumes, potentialSPool))
|
||||
continue;
|
||||
volumeAllocationMap.put(potentialSPool,requestVolumes);
|
||||
}
|
||||
storage.put(vol, potentialSPool);
|
||||
haveEnoughSpace = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!hostCanAccessPool){
|
||||
break;
|
||||
}
|
||||
if(!haveEnoughSpace) {
|
||||
s_logger.warn("insufficient capacity to allocate all volumes");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(hostCanAccessPool){
|
||||
s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName()+ " and associated storage pools for this VM");
|
||||
if(hostCanAccessPool && haveEnoughSpace){
|
||||
s_logger.debug("Found a potential host " + "id: "+potentialHost.getId() + " name: " +potentialHost.getName() + " and associated storage pools for this VM");
|
||||
return new Pair<Host, Map<Volume, StoragePool>>(potentialHost, storage);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3449,6 +3449,7 @@ public class NetworkManagerImpl implements NetworkManager, NetworkService, Manag
|
||||
}
|
||||
}
|
||||
|
||||
sc.addAnd("id", SearchCriteria.Op.SC, accountSC);
|
||||
return _networksDao.search(sc, searchFilter);
|
||||
}
|
||||
|
||||
|
||||
@ -502,34 +502,13 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
||||
final UserStatisticsVO userStats = _userStatsDao.lock(router.getAccountId(), router.getDataCenterIdToDeployIn(),
|
||||
guestNtwkId, null, router.getId(), router.getType().toString());
|
||||
if (userStats != null) {
|
||||
final RebootAnswer sa = (RebootAnswer) answer;
|
||||
final Long received = sa.getBytesReceived();
|
||||
long netBytes = 0;
|
||||
if (received != null) {
|
||||
if (received.longValue() >= userStats.getCurrentBytesReceived()) {
|
||||
netBytes = received.longValue();
|
||||
} else {
|
||||
netBytes = userStats.getCurrentBytesReceived() + received;
|
||||
}
|
||||
} else {
|
||||
netBytes = userStats.getCurrentBytesReceived();
|
||||
}
|
||||
final long currentBytesRcvd = userStats.getCurrentBytesReceived();
|
||||
userStats.setCurrentBytesReceived(0);
|
||||
userStats.setNetBytesReceived(userStats.getNetBytesReceived() + netBytes);
|
||||
userStats.setNetBytesReceived(userStats.getNetBytesReceived() + currentBytesRcvd);
|
||||
|
||||
final Long sent = sa.getBytesSent();
|
||||
|
||||
if (sent != null) {
|
||||
if (sent.longValue() >= userStats.getCurrentBytesSent()) {
|
||||
netBytes = sent.longValue();
|
||||
} else {
|
||||
netBytes = userStats.getCurrentBytesSent() + sent;
|
||||
}
|
||||
} else {
|
||||
netBytes = userStats.getCurrentBytesSent();
|
||||
}
|
||||
userStats.setNetBytesSent(userStats.getNetBytesSent() + netBytes);
|
||||
final long currentBytesSent = userStats.getCurrentBytesSent();
|
||||
userStats.setCurrentBytesSent(0);
|
||||
userStats.setNetBytesSent(userStats.getNetBytesSent() + currentBytesSent);
|
||||
_userStatsDao.update(userStats.getId(), userStats);
|
||||
s_logger.debug("Successfully updated user statistics as a part of domR " + router + " reboot/stop");
|
||||
} else {
|
||||
@ -540,7 +519,7 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
||||
txn.commit();
|
||||
} catch (final Exception e) {
|
||||
txn.rollback();
|
||||
throw new CloudRuntimeException("Problem getting stats after reboot/stop ", e);
|
||||
throw new CloudRuntimeException("Problem updating stats after reboot/stop ", e);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3221,4 +3200,99 @@ public class VirtualNetworkApplianceManagerImpl implements VirtualNetworkApplian
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<DomainRouterVO> profile){
|
||||
//Collect network usage before stopping Vm
|
||||
VMInstanceVO vm = profile.getVirtualMachine();
|
||||
DomainRouterVO router = _routerDao.findById(vm.getId());
|
||||
if(router == null){
|
||||
return;
|
||||
}
|
||||
/*String privateIP = router.getPrivateIpAddress();
|
||||
|
||||
if (privateIP != null) {
|
||||
List<Long> routerGuestNtwkIds = _routerDao.getRouterNetworks(router.getId());
|
||||
|
||||
for (Long guestNtwkId : routerGuestNtwkIds) {
|
||||
boolean forVpc = router.getVpcId() != null;
|
||||
Network guestNtwk = _networkMgr.getNetwork(guestNtwkId);
|
||||
Nic guestNic = _nicDao.findByInstanceIdAndNetworkId(guestNtwk.getId(), router.getId());
|
||||
NicProfile guestNicProfile = new NicProfile(guestNic, guestNtwk, guestNic.getBroadcastUri(),
|
||||
guestNic.getIsolationUri(), _networkMgr.getNetworkRate(guestNtwk.getId(), router.getId()),
|
||||
_networkMgr.isSecurityGroupSupportedInNetwork(guestNtwk),
|
||||
_networkMgr.getNetworkTag(router.getHypervisorType(), guestNtwk));
|
||||
final NetworkUsageCommand usageCmd = new NetworkUsageCommand(privateIP, router.getHostName(),
|
||||
forVpc, _itMgr.toNicTO(guestNicProfile, router.getHypervisorType()));
|
||||
UserStatisticsVO previousStats = _statsDao.findBy(router.getAccountId(),
|
||||
router.getDataCenterIdToDeployIn(), guestNtwkId, null, router.getId(), router.getType().toString());
|
||||
NetworkUsageAnswer answer = null;
|
||||
try {
|
||||
answer = (NetworkUsageAnswer) _agentMgr.easySend(router.getHostId(), usageCmd);
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Error while collecting network stats from router: "+router.getInstanceName()+" from host: "+router.getHostId(), e);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (answer != null) {
|
||||
if (!answer.getResult()) {
|
||||
s_logger.warn("Error while collecting network stats from router: "+router.getInstanceName()+" from host: "+router.getHostId() + "; details: " + answer.getDetails());
|
||||
continue;
|
||||
}
|
||||
Transaction txn = Transaction.open(Transaction.CLOUD_DB);
|
||||
try {
|
||||
if ((answer.getBytesReceived() == 0) && (answer.getBytesSent() == 0)) {
|
||||
s_logger.debug("Recieved and Sent bytes are both 0. Not updating user_statistics");
|
||||
continue;
|
||||
}
|
||||
txn.start();
|
||||
UserStatisticsVO stats = _statsDao.lock(router.getAccountId(),
|
||||
router.getDataCenterIdToDeployIn(), guestNtwkId, null, router.getId(), router.getType().toString());
|
||||
if (stats == null) {
|
||||
s_logger.warn("unable to find stats for account: " + router.getAccountId());
|
||||
continue;
|
||||
}
|
||||
|
||||
if(previousStats != null
|
||||
&& ((previousStats.getCurrentBytesReceived() != stats.getCurrentBytesReceived())
|
||||
|| (previousStats.getCurrentBytesSent() != stats.getCurrentBytesSent()))){
|
||||
s_logger.debug("Router stats changed from the time NetworkUsageCommand was sent. " +
|
||||
"Ignoring current answer. Router: "+answer.getRouterName()+" Rcvd: " +
|
||||
answer.getBytesReceived()+ "Sent: " +answer.getBytesSent());
|
||||
continue;
|
||||
}
|
||||
|
||||
if (stats.getCurrentBytesReceived() > answer.getBytesReceived()) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Received # of bytes that's less than the last one. " +
|
||||
"Assuming something went wrong and persisting it. Router: " +
|
||||
answer.getRouterName()+" Reported: " + answer.getBytesReceived()
|
||||
+ " Stored: " + stats.getCurrentBytesReceived());
|
||||
}
|
||||
stats.setNetBytesReceived(stats.getNetBytesReceived() + stats.getCurrentBytesReceived());
|
||||
}
|
||||
stats.setCurrentBytesReceived(answer.getBytesReceived());
|
||||
if (stats.getCurrentBytesSent() > answer.getBytesSent()) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Received # of bytes that's less than the last one. " +
|
||||
"Assuming something went wrong and persisting it. Router: " +
|
||||
answer.getRouterName()+" Reported: " + answer.getBytesSent()
|
||||
+ " Stored: " + stats.getCurrentBytesSent());
|
||||
}
|
||||
stats.setNetBytesSent(stats.getNetBytesSent() + stats.getCurrentBytesSent());
|
||||
}
|
||||
stats.setCurrentBytesSent(answer.getBytesSent());
|
||||
_statsDao.update(stats.getId(), stats);
|
||||
txn.commit();
|
||||
} catch (Exception e) {
|
||||
txn.rollback();
|
||||
s_logger.warn("Unable to update user statistics for account: " + router.getAccountId()
|
||||
+ " Rx: " + answer.getBytesReceived() + "; Tx: " + answer.getBytesSent());
|
||||
} finally {
|
||||
txn.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
@ -27,7 +27,6 @@ import javax.naming.ConfigurationException;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.citrix.netscaler.nitro.resource.config.network.vlan;
|
||||
import com.cloud.api.commands.ListPortForwardingRulesCmd;
|
||||
import com.cloud.configuration.ConfigurationManager;
|
||||
import com.cloud.domain.dao.DomainDao;
|
||||
@ -372,10 +371,12 @@ public class RulesManagerImpl implements RulesManager, RulesService, Manager {
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_ENABLE_STATIC_NAT, eventDescription = "enabling static nat")
|
||||
public boolean enableStaticNat(long ipId, long vmId, long networkId, boolean isSystemVm)
|
||||
throws NetworkRuleConflictException, ResourceUnavailableException {
|
||||
UserContext ctx = UserContext.current();
|
||||
Account caller = ctx.getCaller();
|
||||
UserContext.current().setEventDetails("Ip Id: " + ipId);
|
||||
|
||||
// Verify input parameters
|
||||
|
||||
@ -1136,6 +1137,7 @@ public class RulesManagerImpl implements RulesManager, RulesService, Manager {
|
||||
}
|
||||
|
||||
@Override
|
||||
@ActionEvent(eventType = EventTypes.EVENT_DISABLE_STATIC_NAT, eventDescription = "disabling static nat", async=true)
|
||||
public boolean disableStaticNat(long ipId) throws ResourceUnavailableException, NetworkRuleConflictException, InsufficientAddressCapacityException {
|
||||
UserContext ctx = UserContext.current();
|
||||
Account caller = ctx.getCaller();
|
||||
|
||||
@ -235,4 +235,6 @@ public interface StorageManager extends StorageService, Manager {
|
||||
String getSupportedImageFormatForCluster(Long clusterId);
|
||||
|
||||
HypervisorType getHypervisorTypeFromFormat(ImageFormat format);
|
||||
|
||||
boolean storagePoolHasEnoughSpace(List<Volume> volume, StoragePool pool);
|
||||
}
|
||||
|
||||
@ -80,6 +80,7 @@ import com.cloud.api.commands.UpdateStoragePoolCmd;
|
||||
import com.cloud.api.commands.UploadVolumeCmd;
|
||||
import com.cloud.async.AsyncJobManager;
|
||||
import com.cloud.capacity.Capacity;
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.capacity.CapacityState;
|
||||
import com.cloud.capacity.CapacityVO;
|
||||
import com.cloud.capacity.dao.CapacityDao;
|
||||
@ -133,6 +134,7 @@ import com.cloud.resource.ResourceManager;
|
||||
import com.cloud.resource.ResourceState;
|
||||
import com.cloud.server.ManagementServer;
|
||||
import com.cloud.server.ResourceTag.TaggedResourceType;
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.service.ServiceOfferingVO;
|
||||
import com.cloud.service.dao.ServiceOfferingDao;
|
||||
import com.cloud.storage.Storage.ImageFormat;
|
||||
@ -274,6 +276,8 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
@Inject
|
||||
protected CapacityDao _capacityDao;
|
||||
@Inject
|
||||
protected CapacityManager _capacityMgr;
|
||||
@Inject
|
||||
protected DiskOfferingDao _diskOfferingDao;
|
||||
@Inject
|
||||
protected AccountDao _accountDao;
|
||||
@ -352,6 +356,9 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
private StateMachine2<Volume.State, Volume.Event, Volume> _volStateMachine;
|
||||
private int _customDiskOfferingMinSize = 1;
|
||||
private int _customDiskOfferingMaxSize = 1024;
|
||||
private double _storageUsedThreshold = 1.0d;
|
||||
private double _storageAllocatedThreshold = 1.0d;
|
||||
protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
|
||||
|
||||
public boolean share(VMInstanceVO vm, List<VolumeVO> vols, HostVO host, boolean cancelPreviousShare) throws StorageUnavailableException {
|
||||
|
||||
@ -955,6 +962,19 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
String time = configs.get("storage.cleanup.interval");
|
||||
_storageCleanupInterval = NumbersUtil.parseInt(time, 86400);
|
||||
|
||||
String storageUsedThreshold = configDao.getValue(Config.StorageCapacityDisableThreshold.key());
|
||||
if (storageUsedThreshold != null) {
|
||||
_storageUsedThreshold = Double.parseDouble(storageUsedThreshold);
|
||||
}
|
||||
|
||||
String storageAllocatedThreshold = configDao.getValue(Config.StorageAllocatedCapacityDisableThreshold.key());
|
||||
if (storageAllocatedThreshold != null) {
|
||||
_storageAllocatedThreshold = Double.parseDouble(storageAllocatedThreshold);
|
||||
}
|
||||
|
||||
String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
|
||||
_storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
|
||||
|
||||
s_logger.info("Storage cleanup enabled: " + _storageCleanupEnabled + ", interval: " + _storageCleanupInterval + ", template cleanup enabled: " + _templateCleanupEnabled);
|
||||
|
||||
String workers = configs.get("expunge.workers");
|
||||
@ -1257,10 +1277,10 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
if (uriPath == null) {
|
||||
throw new InvalidParameterValueException("host or path is null, should be sharedmountpoint://localhost/path");
|
||||
}
|
||||
} else if (uri.getScheme().equalsIgnoreCase("clvm")) {
|
||||
} else if (uri.getScheme().equalsIgnoreCase("rbd")) {
|
||||
String uriPath = uri.getPath();
|
||||
if (uriPath == null) {
|
||||
throw new InvalidParameterValueException("host or path is null, should be clvm://localhost/path");
|
||||
throw new InvalidParameterValueException("host or path is null, should be rbd://hostname/pool");
|
||||
}
|
||||
}
|
||||
} catch (URISyntaxException e) {
|
||||
@ -1283,6 +1303,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
String scheme = uri.getScheme();
|
||||
String storageHost = uri.getHost();
|
||||
String hostPath = uri.getPath();
|
||||
String userInfo = uri.getUserInfo();
|
||||
int port = uri.getPort();
|
||||
StoragePoolVO pool = null;
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
@ -1303,6 +1324,11 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
pool = new StoragePoolVO(StoragePoolType.Filesystem, "localhost", 0, hostPath);
|
||||
} else if (scheme.equalsIgnoreCase("sharedMountPoint")) {
|
||||
pool = new StoragePoolVO(StoragePoolType.SharedMountPoint, storageHost, 0, hostPath);
|
||||
} else if (scheme.equalsIgnoreCase("rbd")) {
|
||||
if (port == -1) {
|
||||
port = 6789;
|
||||
}
|
||||
pool = new StoragePoolVO(StoragePoolType.RBD, storageHost, port, hostPath.replaceFirst("/", ""), userInfo);
|
||||
} else if (scheme.equalsIgnoreCase("PreSetup")) {
|
||||
pool = new StoragePoolVO(StoragePoolType.PreSetup, storageHost, 0, hostPath);
|
||||
} else if (scheme.equalsIgnoreCase("iscsi")) {
|
||||
@ -1601,7 +1627,7 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
s_logger.debug("creating pool " + pool.getName() + " on host " + hostId);
|
||||
if (pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.Filesystem && pool.getPoolType() != StoragePoolType.IscsiLUN
|
||||
&& pool.getPoolType() != StoragePoolType.Iscsi && pool.getPoolType() != StoragePoolType.VMFS && pool.getPoolType() != StoragePoolType.SharedMountPoint
|
||||
&& pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2) {
|
||||
&& pool.getPoolType() != StoragePoolType.PreSetup && pool.getPoolType() != StoragePoolType.OCFS2 && pool.getPoolType() != StoragePoolType.RBD) {
|
||||
s_logger.warn(" Doesn't support storage pool type " + pool.getPoolType());
|
||||
return false;
|
||||
}
|
||||
@ -3907,4 +3933,80 @@ public class StorageManagerImpl implements StorageManager, Manager, ClusterManag
|
||||
}
|
||||
}
|
||||
|
||||
private boolean checkUsagedSpace(StoragePool pool){
|
||||
StatsCollector sc = StatsCollector.getInstance();
|
||||
if (sc != null) {
|
||||
long totalSize = pool.getCapacityBytes();
|
||||
StorageStats stats = sc.getStoragePoolStats(pool.getId());
|
||||
if(stats == null){
|
||||
stats = sc.getStorageStats(pool.getId());
|
||||
}
|
||||
if (stats != null) {
|
||||
double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Checking pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + ", disable threshold: " + _storageUsedThreshold);
|
||||
}
|
||||
if (usedPercentage >= _storageUsedThreshold) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Insufficient space on pool: " + pool.getId() + " since its usage percentage: " +usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + _storageUsedThreshold);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean storagePoolHasEnoughSpace(List<Volume> volumes, StoragePool pool) {
|
||||
if(volumes == null || volumes.isEmpty())
|
||||
return false;
|
||||
|
||||
if(!checkUsagedSpace(pool))
|
||||
return false;
|
||||
|
||||
// allocated space includes template of specified volume
|
||||
StoragePoolVO poolVO = _storagePoolDao.findById(pool.getId());
|
||||
long allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, null);
|
||||
long totalAskingSize = 0;
|
||||
for (Volume volume : volumes) {
|
||||
if(volume.getTemplateId()!=null){
|
||||
VMTemplateVO tmpl = _templateDao.findById(volume.getTemplateId());
|
||||
if (tmpl.getFormat() != ImageFormat.ISO){
|
||||
allocatedSizeWithtemplate = _capacityMgr.getAllocatedPoolCapacity(poolVO, tmpl);
|
||||
}
|
||||
}
|
||||
if(volume.getState() != Volume.State.Ready)
|
||||
totalAskingSize = totalAskingSize + volume.getSize();
|
||||
}
|
||||
|
||||
long totalOverProvCapacity;
|
||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
|
||||
totalOverProvCapacity = _storageOverprovisioningFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();// All this for the inaccuracy of floats for big number multiplication.
|
||||
}else {
|
||||
totalOverProvCapacity = pool.getCapacityBytes();
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Checking pool: " + pool.getId() + " for volume allocation " + volumes.toString() + ", maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize + ", allocated disable threshold: " + _storageAllocatedThreshold);
|
||||
}
|
||||
|
||||
double usedPercentage = (allocatedSizeWithtemplate + totalAskingSize) / (double)(totalOverProvCapacity);
|
||||
if (usedPercentage > _storageAllocatedThreshold){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + " since its allocated percentage: " +usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + _storageAllocatedThreshold + ", skipping this pool");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (totalOverProvCapacity < (allocatedSizeWithtemplate + totalAskingSize)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Insufficient un-allocated capacity on: " + pool.getId() + " for volume allocation: " + volumes.toString() + ", not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + allocatedSizeWithtemplate + ", askingSize : " + totalAskingSize);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
package com.cloud.storage.allocator;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
@ -27,7 +28,6 @@ import javax.naming.ConfigurationException;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
import com.cloud.capacity.CapacityManager;
|
||||
import com.cloud.configuration.Config;
|
||||
import com.cloud.configuration.dao.ConfigurationDao;
|
||||
import com.cloud.dc.ClusterVO;
|
||||
import com.cloud.dc.dao.ClusterDao;
|
||||
@ -41,13 +41,11 @@ import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolStatus;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.StorageStats;
|
||||
import com.cloud.storage.VMTemplateHostVO;
|
||||
import com.cloud.storage.VMTemplateStoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc;
|
||||
import com.cloud.storage.VMTemplateStorageResourceAssoc.Status;
|
||||
import com.cloud.storage.VMTemplateSwiftVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.Volume;
|
||||
import com.cloud.storage.Volume.Type;
|
||||
import com.cloud.storage.dao.StoragePoolDao;
|
||||
import com.cloud.storage.dao.StoragePoolHostDao;
|
||||
@ -58,7 +56,6 @@ import com.cloud.storage.dao.VolumeDao;
|
||||
import com.cloud.storage.swift.SwiftManager;
|
||||
import com.cloud.template.TemplateManager;
|
||||
import com.cloud.utils.NumbersUtil;
|
||||
import com.cloud.utils.Pair;
|
||||
import com.cloud.utils.component.AdapterBase;
|
||||
import com.cloud.utils.component.Inject;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
@ -83,8 +80,6 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
long _extraBytesPerVolume = 0;
|
||||
Random _rand;
|
||||
boolean _dontMatter;
|
||||
double _storageUsedThreshold = 1.0d;
|
||||
double _storageAllocatedThreshold = 1.0d;
|
||||
|
||||
@Override
|
||||
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
|
||||
@ -97,17 +92,6 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
|
||||
_extraBytesPerVolume = 0;
|
||||
|
||||
|
||||
String storageUsedThreshold = _configDao.getValue(Config.StorageCapacityDisableThreshold.key());
|
||||
if (storageUsedThreshold != null) {
|
||||
_storageUsedThreshold = Double.parseDouble(storageUsedThreshold);
|
||||
}
|
||||
|
||||
String storageAllocatedThreshold = _configDao.getValue(Config.StorageAllocatedCapacityDisableThreshold.key());
|
||||
if (storageAllocatedThreshold != null) {
|
||||
_storageAllocatedThreshold = Double.parseDouble(storageAllocatedThreshold);
|
||||
}
|
||||
|
||||
_rand = new Random(System.currentTimeMillis());
|
||||
|
||||
_dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
|
||||
@ -192,61 +176,16 @@ public abstract class AbstractStoragePoolAllocator extends AdapterBase implement
|
||||
return false;
|
||||
}
|
||||
|
||||
// check the used size against the total size, skip this host if it's greater than the configured
|
||||
// capacity check "storage.capacity.threshold"
|
||||
if (sc != null) {
|
||||
long totalSize = pool.getCapacityBytes();
|
||||
StorageStats stats = sc.getStoragePoolStats(pool.getId());
|
||||
if(stats == null){
|
||||
stats = sc.getStorageStats(pool.getId());
|
||||
}
|
||||
if (stats != null) {
|
||||
double usedPercentage = ((double)stats.getByteUsed() / (double)totalSize);
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Attempting to look for pool " + pool.getId() + " for storage, totalSize: " + pool.getCapacityBytes() + ", usedBytes: " + stats.getByteUsed() + ", usedPct: " + usedPercentage + ", disable threshold: " + _storageUsedThreshold);
|
||||
}
|
||||
if (usedPercentage >= _storageUsedThreshold) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Cannot allocate this pool " + pool.getId() + " for storage since its usage percentage: " +usedPercentage + " has crossed the pool.storage.capacity.disablethreshold: " + _storageUsedThreshold + ", skipping this pool");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
long totalAllocatedSize = _capacityMgr.getAllocatedPoolCapacity(pool, null);
|
||||
long askingSize = dskCh.getSize();
|
||||
|
||||
long totalOverProvCapacity;
|
||||
if (pool.getPoolType() == StoragePoolType.NetworkFilesystem) {
|
||||
totalOverProvCapacity = _storageOverprovisioningFactor.multiply(new BigDecimal(pool.getCapacityBytes())).longValue();// All this for the inaccuracy of floats for big number multiplication.
|
||||
}else {
|
||||
totalOverProvCapacity = pool.getCapacityBytes();
|
||||
}
|
||||
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Attempting to look for pool " + pool.getId() + " for storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + totalAllocatedSize + ", askingSize : " + askingSize + ", allocated disable threshold: " + _storageAllocatedThreshold);
|
||||
}
|
||||
|
||||
double usedPercentage = (totalAllocatedSize + askingSize) / (double)(totalOverProvCapacity);
|
||||
if (usedPercentage > _storageAllocatedThreshold){
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Cannot allocate this pool " + pool.getId() + " for storage since its allocated percentage: " +usedPercentage + " has crossed the allocated pool.storage.allocated.capacity.disablethreshold: " + _storageAllocatedThreshold + ", skipping this pool");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
if (totalOverProvCapacity < (totalAllocatedSize + askingSize)) {
|
||||
if (s_logger.isDebugEnabled()) {
|
||||
s_logger.debug("Cannot allocate this pool " + pool.getId() + " for storage, not enough storage, maxSize : " + totalOverProvCapacity + ", totalAllocatedSize : " + totalAllocatedSize + ", askingSize : " + askingSize);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
// check capacity
|
||||
Volume volume = _volumeDao.findById(dskCh.getVolumeId());
|
||||
List<Volume> requestVolumes = new ArrayList<Volume>();
|
||||
requestVolumes.add(volume);
|
||||
return _storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
|
||||
return storage.getStorageIpAddress();
|
||||
|
||||
@ -31,19 +31,26 @@ import org.apache.log4j.Logger;
|
||||
import com.cloud.deploy.DeploymentPlan;
|
||||
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
|
||||
import com.cloud.server.StatsCollector;
|
||||
import com.cloud.storage.DiskOfferingVO;
|
||||
import com.cloud.storage.dao.DiskOfferingDao;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
import com.cloud.storage.VMTemplateVO;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.user.Account;
|
||||
import com.cloud.vm.DiskProfile;
|
||||
import com.cloud.vm.VirtualMachine;
|
||||
import com.cloud.vm.VirtualMachineProfile;
|
||||
import com.cloud.utils.component.Inject;
|
||||
|
||||
@Local(value=StoragePoolAllocator.class)
|
||||
public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
||||
private static final Logger s_logger = Logger.getLogger(FirstFitStoragePoolAllocator.class);
|
||||
protected String _allocationAlgorithm = "random";
|
||||
|
||||
@Inject
|
||||
DiskOfferingDao _diskOfferingDao;
|
||||
|
||||
@Override
|
||||
public boolean allocatorIsCorrectType(DiskProfile dskCh) {
|
||||
return !localStorageAllocationNeeded(dskCh);
|
||||
@ -97,10 +104,16 @@ public class FirstFitStoragePoolAllocator extends AbstractStoragePoolAllocator {
|
||||
s_logger.debug("FirstFitStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
|
||||
}
|
||||
|
||||
DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
|
||||
for (StoragePoolVO pool: pools) {
|
||||
if(suitablePools.size() == returnUpTo){
|
||||
break;
|
||||
}
|
||||
if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
|
||||
s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
|
||||
suitablePools.add(pool);
|
||||
}
|
||||
|
||||
@ -63,6 +63,7 @@ public interface VMTemplateDao extends GenericDao<VMTemplateVO, Long> {
|
||||
|
||||
public List<VMTemplateVO> listByHypervisorType(List<HypervisorType> hyperTypes);
|
||||
public List<VMTemplateVO> publicIsoSearch(Boolean bootable, boolean listRemoved, Map<String, String> tags);
|
||||
public List<VMTemplateVO> userIsoSearch(boolean listRemoved);
|
||||
VMTemplateVO findSystemVMTemplate(long zoneId);
|
||||
VMTemplateVO findSystemVMTemplate(long zoneId, HypervisorType hType);
|
||||
|
||||
|
||||
@ -84,7 +84,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
DomainDao _domainDao;
|
||||
@Inject
|
||||
DataCenterDao _dcDao;
|
||||
|
||||
private final String SELECT_TEMPLATE_HOST_REF = "SELECT t.id, h.data_center_id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, " +
|
||||
"t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t";
|
||||
|
||||
@ -93,7 +92,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
|
||||
private final String SELECT_TEMPLATE_SWIFT_REF = "SELECT t.id, t.unique_name, t.name, t.public, t.featured, t.type, t.hvm, t.bits, t.url, t.format, t.created, t.account_id, "
|
||||
+ "t.checksum, t.display_text, t.enable_password, t.guest_os_id, t.bootable, t.prepopulate, t.cross_zones, t.hypervisor_type FROM vm_template t";
|
||||
|
||||
protected SearchBuilder<VMTemplateVO> TemplateNameSearch;
|
||||
protected SearchBuilder<VMTemplateVO> UniqueNameSearch;
|
||||
protected SearchBuilder<VMTemplateVO> tmpltTypeSearch;
|
||||
@ -106,6 +104,7 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
private SearchBuilder<VMTemplateVO> PublicSearch;
|
||||
private SearchBuilder<VMTemplateVO> NameAccountIdSearch;
|
||||
private SearchBuilder<VMTemplateVO> PublicIsoSearch;
|
||||
private SearchBuilder<VMTemplateVO> UserIsoSearch;
|
||||
private GenericSearchBuilder<VMTemplateVO, Long> CountTemplatesByAccount;
|
||||
|
||||
ResourceTagsDaoImpl _tagsDao = ComponentLocator.inject(ResourceTagsDaoImpl.class);
|
||||
@ -189,6 +188,22 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
return listBy(sc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<VMTemplateVO> userIsoSearch(boolean listRemoved){
|
||||
|
||||
SearchBuilder<VMTemplateVO> sb = null;
|
||||
sb = UserIsoSearch;
|
||||
SearchCriteria<VMTemplateVO> sc = sb.create();
|
||||
|
||||
sc.setParameters("format", Storage.ImageFormat.ISO);
|
||||
sc.setParameters("type", TemplateType.USER.toString());
|
||||
|
||||
if (!listRemoved) {
|
||||
sc.setParameters("removed", (Object)null);
|
||||
}
|
||||
|
||||
return listBy(sc);
|
||||
}
|
||||
@Override
|
||||
public List<VMTemplateVO> listAllSystemVMTemplates() {
|
||||
SearchCriteria<VMTemplateVO> sc = tmpltTypeSearch.create();
|
||||
@ -299,6 +314,11 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
PublicIsoSearch.and("bootable", PublicIsoSearch.entity().isBootable(), SearchCriteria.Op.EQ);
|
||||
PublicIsoSearch.and("removed", PublicIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ);
|
||||
|
||||
UserIsoSearch = createSearchBuilder();
|
||||
UserIsoSearch.and("format", UserIsoSearch.entity().getFormat(), SearchCriteria.Op.EQ);
|
||||
UserIsoSearch.and("type", UserIsoSearch.entity().getTemplateType(), SearchCriteria.Op.EQ);
|
||||
UserIsoSearch.and("removed", UserIsoSearch.entity().getRemoved(), SearchCriteria.Op.EQ);
|
||||
|
||||
tmpltTypeHyperSearch = createSearchBuilder();
|
||||
tmpltTypeHyperSearch.and("templateType", tmpltTypeHyperSearch.entity().getTemplateType(), SearchCriteria.Op.EQ);
|
||||
SearchBuilder<HostVO> hostHyperSearch = _hostDao.createSearchBuilder();
|
||||
@ -648,28 +668,44 @@ public class VMTemplateDaoImpl extends GenericDaoBase<VMTemplateVO, Long> implem
|
||||
|
||||
pstmt = txn.prepareStatement(sql);
|
||||
rs = pstmt.executeQuery();
|
||||
while (rs.next()) {
|
||||
Pair<Long, Long> templateZonePair = new Pair<Long, Long>(rs.getLong(1), rs.getLong(2));
|
||||
templateZonePairList.add(templateZonePair);
|
||||
}
|
||||
|
||||
while (rs.next()) {
|
||||
Pair<Long, Long> templateZonePair = new Pair<Long, Long>(rs.getLong(1), rs.getLong(2));
|
||||
templateZonePairList.add(templateZonePair);
|
||||
}
|
||||
//for now, defaulting pageSize to a large val if null; may need to revisit post 2.2RC2
|
||||
if(isIso && templateZonePairList.size() < (pageSize != null ? pageSize : 500)
|
||||
&& templateFilter != TemplateFilter.community
|
||||
&& !(templateFilter == TemplateFilter.self && !BaseCmd.isRootAdmin(caller.getType())) ){ //evaluates to true If root admin and filter=self
|
||||
List<VMTemplateVO> publicIsos = publicIsoSearch(bootable, false, tags);
|
||||
for( int i=0; i < publicIsos.size(); i++){
|
||||
if (keyword != null && publicIsos.get(i).getName().contains(keyword)) {
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
continue;
|
||||
} else if (name != null && publicIsos.get(i).getName().contains(name)) {
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
continue;
|
||||
}else if (keyword == null && name == null){
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
|
||||
List<VMTemplateVO> publicIsos = publicIsoSearch(bootable, false, tags);
|
||||
List<VMTemplateVO> userIsos = userIsoSearch(false);
|
||||
|
||||
//Listing the ISOs according to the page size.Restricting the total no. of ISOs on a page
|
||||
//to be less than or equal to the pageSize parameter
|
||||
|
||||
int i=0;
|
||||
|
||||
if (startIndex > userIsos.size()) {
|
||||
i=(int) (startIndex - userIsos.size());
|
||||
}
|
||||
|
||||
for (; i < publicIsos.size(); i++) {
|
||||
if(templateZonePairList.size() >= pageSize){
|
||||
break;
|
||||
} else {
|
||||
if (keyword != null && publicIsos.get(i).getName().contains(keyword)) {
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
continue;
|
||||
} else if (name != null && publicIsos.get(i).getName().contains(name)) {
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
continue;
|
||||
} else if (keyword == null && name == null){
|
||||
templateZonePairList.add(new Pair<Long,Long>(publicIsos.get(i).getId(), null));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
s_logger.warn("Error listing templates", e);
|
||||
} finally {
|
||||
|
||||
@ -1477,4 +1477,9 @@ public class SecondaryStorageManagerImpl implements SecondaryStorageVmManager, V
|
||||
// TODO Auto-generated method stub
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<SecondaryStorageVmVO> profile) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,6 +79,7 @@ import com.cloud.storage.SnapshotPolicyVO;
|
||||
import com.cloud.storage.SnapshotScheduleVO;
|
||||
import com.cloud.storage.SnapshotVO;
|
||||
import com.cloud.storage.Storage;
|
||||
import com.cloud.storage.Storage.StoragePoolType;
|
||||
import com.cloud.storage.StorageManager;
|
||||
import com.cloud.storage.StoragePool;
|
||||
import com.cloud.storage.StoragePoolVO;
|
||||
@ -298,6 +299,13 @@ public class SnapshotManagerImpl implements SnapshotManager, SnapshotService, Ma
|
||||
}
|
||||
}
|
||||
StoragePoolVO srcPool = _storagePoolDao.findById(volume.getPoolId());
|
||||
|
||||
// RBD volumes do not support snapshotting in the way CloudStack does it.
|
||||
// For now we leave the snapshot feature disabled for RBD volumes
|
||||
if (srcPool.getPoolType() == StoragePoolType.RBD) {
|
||||
throw new CloudRuntimeException("RBD volumes do not support snapshotting");
|
||||
}
|
||||
|
||||
ManageSnapshotCommand cmd = new ManageSnapshotCommand(snapshotId, volume.getPath(), srcPool, preSnapshotPath, snapshot.getName(), vmName);
|
||||
|
||||
ManageSnapshotAnswer answer = (ManageSnapshotAnswer) sendToPool(volume, cmd);
|
||||
|
||||
@ -55,6 +55,7 @@ import com.cloud.upgrade.dao.Upgrade229to2210;
|
||||
import com.cloud.upgrade.dao.Upgrade301to302;
|
||||
import com.cloud.upgrade.dao.Upgrade302to303;
|
||||
import com.cloud.upgrade.dao.Upgrade30to301;
|
||||
import com.cloud.upgrade.dao.Upgrade303to40;
|
||||
import com.cloud.upgrade.dao.UpgradeSnapshot217to224;
|
||||
import com.cloud.upgrade.dao.UpgradeSnapshot223to224;
|
||||
import com.cloud.upgrade.dao.VersionDao;
|
||||
@ -152,11 +153,13 @@ public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
|
||||
_upgradeMap.put("2.2.14", new DbUpgrade[] { new Upgrade2214to30(), new Upgrade30to301(), new Upgrade301to302(),
|
||||
new Upgrade302to303() });
|
||||
|
||||
_upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303() });
|
||||
_upgradeMap.put("3.0.0", new DbUpgrade[] { new Upgrade30to301(), new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to40() });
|
||||
|
||||
_upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to303() });
|
||||
_upgradeMap.put("3.0.1", new DbUpgrade[] { new Upgrade301to302(), new Upgrade302to303(), new Upgrade303to40() });
|
||||
|
||||
_upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to303() });
|
||||
_upgradeMap.put("3.0.2", new DbUpgrade[] { new Upgrade302to303(), new Upgrade303to40() });
|
||||
|
||||
_upgradeMap.put("3.0.3", new DbUpgrade[] { new Upgrade303to40() });
|
||||
}
|
||||
|
||||
protected void runScript(Connection conn, File file) {
|
||||
|
||||
@ -14,17 +14,6 @@
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
/*Copyright 2012 Citrix Systems, Inc. Licensed under the
|
||||
Apache License, Version 2.0 (the "License"); you may not use this
|
||||
file except in compliance with the License. Citrix Systems, Inc.
|
||||
reserves all rights not expressly granted by the License.
|
||||
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.*/
|
||||
|
||||
|
||||
package com.cloud.upgrade.dao;
|
||||
|
||||
|
||||
67
server/src/com/cloud/upgrade/dao/Upgrade303to40.java
Normal file
67
server/src/com/cloud/upgrade/dao/Upgrade303to40.java
Normal file
@ -0,0 +1,67 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one
|
||||
// or more contributor license agreements. See the NOTICE file
|
||||
// distributed with this work for additional information
|
||||
// regarding copyright ownership. The ASF licenses this file
|
||||
// to you under the Apache License, Version 2.0 (the
|
||||
// "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing,
|
||||
// software distributed under the License is distributed on an
|
||||
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
// KIND, either express or implied. See the License for the
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
|
||||
package com.cloud.upgrade.dao;
|
||||
|
||||
/**
|
||||
* @author Alena Prokharchyk
|
||||
*/
|
||||
import java.io.File;
|
||||
import java.sql.Connection;
|
||||
|
||||
import org.apache.log4j.Logger;
|
||||
//
|
||||
import com.cloud.utils.exception.CloudRuntimeException;
|
||||
import com.cloud.utils.script.Script;
|
||||
|
||||
public class Upgrade303to40 implements DbUpgrade {
|
||||
final static Logger s_logger = Logger.getLogger(Upgrade303to40.class);
|
||||
|
||||
@Override
|
||||
public String[] getUpgradableVersionRange() {
|
||||
return new String[] { "3.0.3", "4.0.0" };
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUpgradedVersion() {
|
||||
return "4.0.0";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supportsRollingUpgrade() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public File[] getPrepareScripts() {
|
||||
String script = Script.findScript("", "db/schema-303to40.sql");
|
||||
if (script == null) {
|
||||
throw new CloudRuntimeException("Unable to find db/schema-303to40.sql");
|
||||
}
|
||||
|
||||
return new File[] { new File(script) };
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performDataMigration(Connection conn) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public File[] getCleanupScripts() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@ -3695,4 +3695,8 @@ public class UserVmManagerImpl implements UserVmManager, UserVmService, Manager
|
||||
throw new UnsupportedOperationException("Unplug nic is not supported for vm of type " + vm.getType());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void prepareStop(VirtualMachineProfile<UserVmVO> profile) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -82,6 +82,7 @@ public interface VirtualMachineGuru<T extends VirtualMachine> {
|
||||
Long convertToId(String vmName);
|
||||
|
||||
/**
|
||||
<<<<<<< HEAD
|
||||
* Prepare for a nic to be plugged into the network.
|
||||
* @param network
|
||||
* @param nic
|
||||
@ -110,4 +111,11 @@ public interface VirtualMachineGuru<T extends VirtualMachine> {
|
||||
*/
|
||||
boolean unplugNic(Network network, NicTO nic, VirtualMachineTO vm,
|
||||
ReservationContext context, DeployDestination dest) throws ConcurrentOperationException, ResourceUnavailableException;
|
||||
|
||||
/**
|
||||
* Prepare Vm for Stop
|
||||
* @param profile
|
||||
* @return
|
||||
*/
|
||||
void prepareStop(VirtualMachineProfile<T> profile);
|
||||
}
|
||||
|
||||
@ -1074,11 +1074,10 @@ public class VirtualMachineManagerImpl implements VirtualMachineManager, Listene
|
||||
if (vm.getState() != State.Stopping) {
|
||||
throw new CloudRuntimeException("We cannot proceed with stop VM " + vm + " since it is not in 'Stopping' state, current state: " + vm.getState());
|
||||
}
|
||||
String routerPrivateIp = null;
|
||||
if (vm.getType() == VirtualMachine.Type.DomainRouter) {
|
||||
routerPrivateIp = vm.getPrivateIpAddress();
|
||||
}
|
||||
StopCommand stop = new StopCommand(vm, vm.getInstanceName(), null, routerPrivateIp);
|
||||
|
||||
vmGuru.prepareStop(profile);
|
||||
|
||||
StopCommand stop = new StopCommand(vm, vm.getInstanceName(), null);
|
||||
boolean stopped = false;
|
||||
StopAnswer answer = null;
|
||||
try {
|
||||
|
||||
@ -1487,6 +1487,7 @@ CREATE TABLE `cloud`.`storage_pool` (
|
||||
`available_bytes` bigint unsigned,
|
||||
`capacity_bytes` bigint unsigned,
|
||||
`host_address` varchar(255) NOT NULL COMMENT 'FQDN or IP of storage server',
|
||||
`user_info` varchar(255) NULL COMMENT 'Authorization information for the storage pool. Used by network filesystems',
|
||||
`path` varchar(255) NOT NULL COMMENT 'Filesystem path that is shared',
|
||||
`created` datetime COMMENT 'date the pool created',
|
||||
`removed` datetime COMMENT 'date removed if not null',
|
||||
|
||||
63
setup/db/db/schema-303to40.sql
Normal file
63
setup/db/db/schema-303to40.sql
Normal file
@ -0,0 +1,63 @@
|
||||
-- Licensed to the Apache Software Foundation (ASF) under one
|
||||
-- or more contributor license agreements. See the NOTICE file
|
||||
-- distributed with this work for additional information
|
||||
-- regarding copyright ownership. The ASF licenses this file
|
||||
-- to you under the Apache License, Version 2.0 (the
|
||||
-- "License"); you may not use this file except in compliance
|
||||
-- with the License. You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing,
|
||||
-- software distributed under the License is distributed on an
|
||||
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
-- KIND, either express or implied. See the License for the
|
||||
-- specific language governing permissions and limitations
|
||||
-- under the License.
|
||||
|
||||
--;
|
||||
-- Schema upgrade from 3.0.3 to 4.0.0;
|
||||
--;
|
||||
|
||||
# RBD Primary Storage pool support (commit: 406fd95d87bfcdbb282d65589ab1fb6e9fd0018a)
|
||||
ALTER TABLE `storage_pool` ADD `user_info` VARCHAR( 255 ) NULL COMMENT 'Authorization information for the storage pool. Used by network filesystems' AFTER `host_address`;
|
||||
|
||||
# Resource tags (commit: 62d45b9670520a1ee8b520509393d4258c689b50)
|
||||
CREATE TABLE `cloud`.`resource_tags` (
|
||||
`id` bigint unsigned NOT NULL auto_increment COMMENT 'id',
|
||||
`uuid` varchar(40),
|
||||
`key` varchar(255),
|
||||
`value` varchar(255),
|
||||
`resource_id` bigint unsigned NOT NULL,
|
||||
`resource_uuid` varchar(40),
|
||||
`resource_type` varchar(255),
|
||||
`customer` varchar(255),
|
||||
`domain_id` bigint unsigned NOT NULL COMMENT 'foreign key to domain id',
|
||||
`account_id` bigint unsigned NOT NULL COMMENT 'owner of this network',
|
||||
PRIMARY KEY (`id`),
|
||||
CONSTRAINT `fk_tags__account_id` FOREIGN KEY(`account_id`) REFERENCES `account`(`id`),
|
||||
CONSTRAINT `fk_tags__domain_id` FOREIGN KEY(`domain_id`) REFERENCES `domain`(`id`),
|
||||
UNIQUE `i_tags__resource_id__resource_type__key`(`resource_id`, `resource_type`, `key`),
|
||||
CONSTRAINT `uc_resource_tags__uuid` UNIQUE (`uuid`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
# Nicira Integration (commit: 79c7da07abd4294f150851aa0c2d06a28564c5a9)
|
||||
CREATE TABLE `cloud`.`external_nicira_nvp_devices` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`uuid` varchar(255) UNIQUE,
|
||||
`physical_network_id` bigint unsigned NOT NULL COMMENT 'id of the physical network in to which nicira nvp device is added',
|
||||
`provider_name` varchar(255) NOT NULL COMMENT 'Service Provider name corresponding to this nicira nvp device',
|
||||
`device_name` varchar(255) NOT NULL COMMENT 'name of the nicira nvp device',
|
||||
`host_id` bigint unsigned NOT NULL COMMENT 'host id coresponding to the external nicira nvp device',
|
||||
PRIMARY KEY (`id`),
|
||||
CONSTRAINT `fk_external_nicira_nvp_devices__host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_external_nicira_nvp_devices__physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
|
||||
CREATE TABLE `cloud`.`nicira_nvp_nic_map` (
|
||||
`id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
|
||||
`logicalswitch` varchar(255) NOT NULL COMMENT 'nicira uuid of logical switch this port is provisioned on',
|
||||
`logicalswitchport` varchar(255) UNIQUE COMMENT 'nicira uuid of this logical switch port',
|
||||
`nic` varchar(255) UNIQUE COMMENT 'cloudstack uuid of the nic connected to this logical switch port',
|
||||
PRIMARY KEY (`id`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
6
tools/devcloud/README
Normal file
6
tools/devcloud/README
Normal file
@ -0,0 +1,6 @@
|
||||
This folder contains various scripts used to build the devcloud image.
|
||||
|
||||
Folders:
|
||||
puppet - puppet conf files and module directories for the various build phases
|
||||
vagrant - basic vagrant configuration for a devcloud box (and a puppet manifest to help configure the system to be a valid vagrant box)
|
||||
ubuntu_install - ubuntu installation / initial configuration
|
||||
12
tools/devcloud/puppet/README
Normal file
12
tools/devcloud/puppet/README
Normal file
@ -0,0 +1,12 @@
|
||||
To use:
|
||||
|
||||
Preconfiguration phase -
|
||||
- [setup your box in vagrant and place the contents of tools/devcloud/puppet into the box's folder]
|
||||
- vagrant up; vagrant ssh
|
||||
- sudo bash
|
||||
- ln -s /vagrant/puppet-devcloudinitial /etc/puppet/modules/puppet-devcloudinitial
|
||||
- cd /vagrant
|
||||
- puppet apply --fsconfig fileserver.conf --debug --verbose -e "include puppet-devcloudinitial"
|
||||
|
||||
Postconfiguration phase -
|
||||
TODO
|
||||
8
tools/devcloud/puppet/puppet-devcloudinitial/Modulefile
Normal file
8
tools/devcloud/puppet/puppet-devcloudinitial/Modulefile
Normal file
@ -0,0 +1,8 @@
|
||||
name 'puppet-devcloudinitial'
|
||||
version '0.0.1'
|
||||
source ''
|
||||
author 'Apache Software Foundation'
|
||||
license 'Apache 2.0'
|
||||
summary 'CloudStack DevCloud initial configuration module'
|
||||
description 'Installation and configuration of all prequisites for building a DevCloud image.'
|
||||
project_page 'http://cloudstack.org'
|
||||
34
tools/devcloud/puppet/puppet-devcloudinitial/files/grub
Normal file
34
tools/devcloud/puppet/puppet-devcloudinitial/files/grub
Normal file
@ -0,0 +1,34 @@
|
||||
# If you change this file, run 'update-grub' afterwards to update
|
||||
# /boot/grub/grub.cfg.
|
||||
# For full documentation of the options in this file, see:
|
||||
# info -f grub -n 'Simple configuration'
|
||||
|
||||
GRUB_DEFAULT=0
|
||||
#GRUB_HIDDEN_TIMEOUT=0
|
||||
GRUB_HIDDEN_TIMEOUT_QUIET=true
|
||||
GRUB_TIMEOUT=2
|
||||
GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
|
||||
GRUB_CMDLINE_LINUX_DEFAULT=""
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
|
||||
# Uncomment to enable BadRAM filtering, modify to suit your needs
|
||||
# This works with Linux (no patch required) and with any kernel that obtains
|
||||
# the memory map information from GRUB (GNU Mach, kernel of FreeBSD ...)
|
||||
#GRUB_BADRAM="0x01234567,0xfefefefe,0x89abcdef,0xefefefef"
|
||||
|
||||
# Uncomment to disable graphical terminal (grub-pc only)
|
||||
#GRUB_TERMINAL=console
|
||||
|
||||
# The resolution used on graphical terminal
|
||||
# note that you can use only modes which your graphic card supports via VBE
|
||||
# you can see them in real GRUB with the command `vbeinfo'
|
||||
#GRUB_GFXMODE=640x480
|
||||
|
||||
# Uncomment if you don't want GRUB to pass "root=UUID=xxx" parameter to Linux
|
||||
#GRUB_DISABLE_LINUX_UUID=true
|
||||
|
||||
# Uncomment to disable generation of recovery mode menu entries
|
||||
#GRUB_DISABLE_RECOVERY="true"
|
||||
|
||||
# Uncomment to get a beep at grub start
|
||||
#GRUB_INIT_TUNE="480 440 1"
|
||||
@ -0,0 +1,16 @@
|
||||
# The loopback network interface
|
||||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
# The primary network interface
|
||||
auto xenbr0
|
||||
iface xenbr0 inet dhcp
|
||||
gateway 10.0.2.2
|
||||
bridge_ports eth0
|
||||
|
||||
|
||||
auto eth0
|
||||
iface eth0 inet dhcp
|
||||
pre-up iptables-save < /etc/iptables.save
|
||||
pre-up /etc/init.d/ebtables load
|
||||
|
||||
@ -0,0 +1 @@
|
||||
bridge
|
||||
@ -0,0 +1 @@
|
||||
TOOLSTACK=xapi
|
||||
171
tools/devcloud/puppet/puppet-devcloudinitial/files/xend
Normal file
171
tools/devcloud/puppet/puppet-devcloudinitial/files/xend
Normal file
@ -0,0 +1,171 @@
|
||||
#!/bin/sh
|
||||
### BEGIN INIT INFO
|
||||
# Provides: xend
|
||||
# Required-Start: $remote_fs
|
||||
# Required-Stop: $remote_fs
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: XEN control daemon
|
||||
# Description: XEN control daemon
|
||||
### END INIT INFO
|
||||
|
||||
PATH=/usr/lib/xen-common/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||
DESC="Xen daemons"
|
||||
|
||||
VERSION=$(xen-version)
|
||||
ROOT=/usr/lib/xen-$VERSION
|
||||
|
||||
XEND="$ROOT"/bin/xend
|
||||
XENCONSOLED="$ROOT"/bin/xenconsoled
|
||||
XENCONSOLED_PIDFILE="/var/run/xenconsoled.pid"
|
||||
XENSTORED="$ROOT"/bin/xenstored
|
||||
XENSTORED_DIR="/var/run/xenstored"
|
||||
XENSTORED_PIDFILE="/var/run/xenstore.pid"
|
||||
|
||||
[ "$VERSION" ] || exit 0
|
||||
[ -x "$XEND" ] || exit 0
|
||||
|
||||
[ -r /etc/default/xend ] && . /etc/default/xend
|
||||
|
||||
. /lib/init/vars.sh
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
modules_setup()
|
||||
{
|
||||
modprobe xenfs 2>/dev/null
|
||||
modprobe xen-evtchn 2>/dev/null
|
||||
modprobe xen_blkback 2>/dev/null
|
||||
modprobe xen_netback 2>/dev/null
|
||||
modprobe xen_gntdev 2>/dev/null
|
||||
}
|
||||
|
||||
xenfs_setup()
|
||||
{
|
||||
[ -e "/proc/xen/capabilities" ] && return 0
|
||||
log_progress_msg "xenfs"
|
||||
[ -d "/proc/xen" ] || return 1
|
||||
mount -t xenfs xenfs /proc/xen || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
capability_check()
|
||||
{
|
||||
[ -e "/proc/xen/capabilities" ] || return 1
|
||||
grep -q "control_d" /proc/xen/capabilities || return 1
|
||||
return 0
|
||||
}
|
||||
|
||||
xend_start()
|
||||
{
|
||||
log_progress_msg "xend"
|
||||
$XEND status && return 1
|
||||
$XEND start || return 2
|
||||
|
||||
i=0
|
||||
while [ $i -lt 10 ]; do
|
||||
$XEND status && return 0 || true
|
||||
i=$(($i + 1))
|
||||
sleep 1
|
||||
done
|
||||
return 2
|
||||
}
|
||||
|
||||
xend_stop()
|
||||
{
|
||||
log_progress_msg "xend"
|
||||
$XEND status || return 0
|
||||
$XEND stop || return 1
|
||||
}
|
||||
|
||||
xenconsoled_start()
|
||||
{
|
||||
log_progress_msg "xenconsoled"
|
||||
start-stop-daemon --start --quiet --pidfile "$XENCONSOLED_PIDFILE" --exec "$XENCONSOLED" --test > /dev/null \
|
||||
|| return 1
|
||||
start-stop-daemon --start --quiet --pidfile "$XENCONSOLED_PIDFILE" --exec "$XENCONSOLED" -- \
|
||||
$XENCONSOLED_ARGS --pid-file="$XENCONSOLED_PIDFILE" \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
xenstored_start()
|
||||
{
|
||||
log_progress_msg "xenstored"
|
||||
start-stop-daemon --start --quiet --pidfile "$XENSTORED_PIDFILE" --exec "$XENSTORED" --test > /dev/null \
|
||||
|| return 1
|
||||
[ -d "$XENSTORED_DIR" ] || mkdir -p "$XENSTORED_DIR"
|
||||
export XENSTORED_ROOTDIR="$XENSTORED_DIR"
|
||||
start-stop-daemon --start --quiet --pidfile "$XENSTORED_PIDFILE" --exec "$XENSTORED" -- \
|
||||
$XENSTORED_ARGS --pid-file="$XENSTORED_PIDFILE" \
|
||||
|| return 2
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC"
|
||||
modules_setup
|
||||
xenfs_setup
|
||||
case "$?" in
|
||||
0) ;;
|
||||
*) log_end_msg 1; exit ;;
|
||||
esac
|
||||
capability_check
|
||||
case "$?" in
|
||||
0) ;;
|
||||
*) log_end_msg 255; exit ;;
|
||||
esac
|
||||
xenstored_start
|
||||
case "$?" in
|
||||
0|1) ;;
|
||||
*) log_end_msg 1; exit ;;
|
||||
esac
|
||||
xenconsoled_start
|
||||
case "$?" in
|
||||
0|1) ;;
|
||||
*) log_end_msg 1; exit ;;
|
||||
esac
|
||||
#xend_start
|
||||
case "$?" in
|
||||
0|1) ;;
|
||||
*) log_end_msg 1; exit ;;
|
||||
esac
|
||||
log_end_msg 0
|
||||
;;
|
||||
stop)
|
||||
capability_check
|
||||
case "$?" in
|
||||
0) ;;
|
||||
*) exit ;;
|
||||
esac
|
||||
log_daemon_msg "Stopping $DESC"
|
||||
#xend_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
*) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
restart|force-reload)
|
||||
capability_check
|
||||
case "$?" in
|
||||
0) ;;
|
||||
*) exit ;;
|
||||
esac
|
||||
log_daemon_msg "Restarting $DESC"
|
||||
#xend_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
#xend_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
*) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
*) log_end_msg 1 ;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 {start|stop|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
1
tools/devcloud/puppet/puppet-devcloudinitial/init.pp
Normal file
1
tools/devcloud/puppet/puppet-devcloudinitial/init.pp
Normal file
@ -0,0 +1 @@
|
||||
include puppet-devcloudinitial
|
||||
101
tools/devcloud/puppet/puppet-devcloudinitial/manifests/init.pp
Normal file
101
tools/devcloud/puppet/puppet-devcloudinitial/manifests/init.pp
Normal file
@ -0,0 +1,101 @@
|
||||
class puppet-devcloudinitial {
|
||||
|
||||
package { 'linux-headers-3.2.0-23-generic':
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
package { 'xen-hypervisor-4.1-amd64':
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
package { 'xcp-xapi':
|
||||
require => Package['xen-hypervisor-4.1-amd64'],
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
file { '/etc/xcp/network.conf':
|
||||
require => Package['xcp-xapi'],
|
||||
ensure => 'file',
|
||||
source => 'puppet:///modules/puppet-devcloudinitial/network.conf',
|
||||
group => '0',
|
||||
mode => '644',
|
||||
owner => '0',
|
||||
}
|
||||
|
||||
file { '/etc/init.d/xend':
|
||||
require => Package['xcp-xapi'],
|
||||
ensure => 'file',
|
||||
source => 'puppet:///modules/puppet-devcloudinitial/xend',
|
||||
group => '0',
|
||||
owner => '0',
|
||||
mode => '755',
|
||||
}
|
||||
|
||||
service { 'xendomains':
|
||||
require => Package['xcp-xapi'],
|
||||
ensure => 'stopped',
|
||||
enable => 'false',
|
||||
}
|
||||
|
||||
file { '/etc/default/grub':
|
||||
require => Package['xen-hypervisor-4.1-amd64'],
|
||||
ensure => 'file',
|
||||
source => 'puppet:///modules/puppet-devcloudinitial/grub',
|
||||
group => '0',
|
||||
mode => '644',
|
||||
owner => '0',
|
||||
}
|
||||
|
||||
exec { "/usr/sbin/update-grub":
|
||||
subscribe => File['/etc/default/grub'],
|
||||
refreshonly => true,
|
||||
cwd => '/',
|
||||
}
|
||||
|
||||
file { '/usr/share/qemu':
|
||||
require => Package['xen-hypervisor-4.1-amd64'],
|
||||
ensure => 'directory',
|
||||
group => '0',
|
||||
mode => '755',
|
||||
owner => '0',
|
||||
}
|
||||
|
||||
file { '/usr/share/qemu/keymaps':
|
||||
require => File['/usr/share/qemu'],
|
||||
ensure => 'link',
|
||||
group => '0',
|
||||
mode => '777',
|
||||
owner => '0',
|
||||
target => '/usr/share/qemu-linaro/keymaps',
|
||||
}
|
||||
|
||||
file { '/etc/network/interfaces':
|
||||
ensure => 'file',
|
||||
source => 'puppet:///modules/puppet-devcloudinitial/interfaces',
|
||||
group => '0',
|
||||
mode => '644',
|
||||
owner => '0',
|
||||
}
|
||||
|
||||
file { '/etc/default/xen':
|
||||
require => Package['xen-hypervisor-4.1-amd64'],
|
||||
ensure => 'file',
|
||||
source => 'puppet:///modules/puppet-devcloudinitial/xen-defaults',
|
||||
group => '0',
|
||||
mode => '644',
|
||||
owner => '0',
|
||||
}
|
||||
|
||||
user { 'root':
|
||||
ensure => 'present',
|
||||
comment => 'root',
|
||||
gid => '0',
|
||||
home => '/root',
|
||||
password => '$6$SCixzUjT$sVs9PwR2g7XdHSLnQW5Zsy2dVpVV3qESFV4Joniusbu3BqWUtKgc91vwEDwPhLqyCYM3kKR1.7G9g2Hu/pTQN/',
|
||||
password_max_age => '99999',
|
||||
password_min_age => '0',
|
||||
shell => '/bin/bash',
|
||||
uid => '0',
|
||||
}
|
||||
|
||||
}
|
||||
1
tools/devcloud/ubuntu_install/README
Normal file
1
tools/devcloud/ubuntu_install/README
Normal file
@ -0,0 +1 @@
|
||||
Nothing in this directory yet, but it will contain the scripts and preconfiguration files to build the basic Ubuntu 12.04 server used in devcloud.
|
||||
72
tools/devcloud/vagrant/Vagrantfile
vendored
Normal file
72
tools/devcloud/vagrant/Vagrantfile
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
config.vm.box = "devcloudbase-ubuntu-12-04-64bit"
|
||||
# TODO: Get a URL to host the base image
|
||||
# config.vm.box_url = "http://domain.com/path/to/above.box"
|
||||
|
||||
# Uncomment this line to enable the console for debugging the
|
||||
# build process.
|
||||
#config.vm.boot_mode = :gui
|
||||
|
||||
# Setup port forwarding
|
||||
config.vm.forward_port 22, 2222
|
||||
config.vm.forward_port 8080, 8080
|
||||
config.vm.forward_port 8443, 8443
|
||||
config.vm.forward_port 5901, 5901
|
||||
config.vm.forward_port 8787, 8787
|
||||
config.vm.forward_port 8250, 8250
|
||||
|
||||
# Ensure the VM has the right virtual resources
|
||||
#config.vm.
|
||||
|
||||
config.vm.provision :puppet do |puppet|
|
||||
puppet.manifests_path = "puppet-devcloudinitial"
|
||||
puppet.manifest_file = "init.pp"
|
||||
puppet.with_ssh = true
|
||||
puppet.pp_path = "/etc/puppet"
|
||||
puppet.module_path = "puppet-devcloudinitial"
|
||||
end
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file my_box.pp in the manifests_path directory.
|
||||
#
|
||||
# An example Puppet manifest to provision the message of the day:
|
||||
#
|
||||
# # group { "puppet":
|
||||
# # ensure => "present",
|
||||
# # }
|
||||
# #
|
||||
# # File { owner => 0, group => 0, mode => 0644 }
|
||||
# #
|
||||
# # file { '/etc/motd':
|
||||
# # content => "Welcome to your Vagrant-built virtual machine!
|
||||
# # Managed by Puppet.\n"
|
||||
# # }
|
||||
#
|
||||
# config.vm.provision :puppet do |puppet|
|
||||
# puppet.manifests_path = "manifests"
|
||||
# puppet.manifest_file = "my_box.pp"
|
||||
# end
|
||||
|
||||
end
|
||||
43
tools/devcloud/vagrant/vagrant.pp
Normal file
43
tools/devcloud/vagrant/vagrant.pp
Normal file
@ -0,0 +1,43 @@
|
||||
group { 'vagranttest':
|
||||
ensure => 'present',
|
||||
gid => '5000',
|
||||
}
|
||||
|
||||
group { 'admin':
|
||||
ensure => 'present',
|
||||
gid => '1002',
|
||||
}
|
||||
|
||||
user { 'vagranttest':
|
||||
ensure => 'present',
|
||||
comment => 'vagrant,,,',
|
||||
gid => '5000',
|
||||
groups => ['adm', 'cdrom', 'dip', 'plugdev', 'lpadmin', 'sambashare', 'admin'],
|
||||
home => '/home/vagranttest',
|
||||
shell => '/bin/bash',
|
||||
uid => '5000',
|
||||
}
|
||||
|
||||
file { '/home/vagranttest':
|
||||
ensure => 'directory',
|
||||
group => '1002',
|
||||
mode => '755',
|
||||
owner => '5000',
|
||||
}
|
||||
|
||||
file { '/home/vagranttest/.ssh':
|
||||
ensure => 'directory',
|
||||
group => '1002',
|
||||
mode => '775',
|
||||
owner => '5000',
|
||||
}
|
||||
|
||||
$auth_key = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key"
|
||||
|
||||
file { '/home/vagranttest/.ssh/authorized_keys':
|
||||
ensure => 'file',
|
||||
content => $auth_key,
|
||||
group => '1002',
|
||||
mode => '664',
|
||||
owner => '5000',
|
||||
}
|
||||
@ -280,6 +280,8 @@ body.login {
|
||||
.login .fields .field label.error {
|
||||
color: #FF0000;
|
||||
float: right;
|
||||
left: 204px;
|
||||
top: 0;
|
||||
}
|
||||
|
||||
.login .fields input {
|
||||
@ -10004,3 +10006,7 @@ div.panel.ui-dialog div.list-view div.fixed-header {
|
||||
background-position: -230px -615px;
|
||||
}
|
||||
|
||||
.label-hovered {
|
||||
cursor: pointer;
|
||||
color: blue !important;
|
||||
}
|
||||
|
||||
20
ui/index.jsp
20
ui/index.jsp
@ -274,7 +274,7 @@
|
||||
</div>
|
||||
|
||||
<div class="secondary-input hide-if-unselected">
|
||||
<input type="radio" name="defaultNetwork" value="new-network" />
|
||||
<input type="radio" name="defaultNetwork" value="new-network" wizard-field="default-network" />
|
||||
<div class="name"><fmt:message key="label.default"/></div>
|
||||
</div>
|
||||
</div>
|
||||
@ -348,7 +348,7 @@
|
||||
<span wizard-field="hypervisor"></span>
|
||||
</div>
|
||||
<div class="edit">
|
||||
<a href="1"><fmt:message key="label.edit"/></a>
|
||||
<a href="2"><fmt:message key="label.edit"/></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@ -397,7 +397,20 @@
|
||||
<span><fmt:message key="label.network"/></span>
|
||||
</div>
|
||||
<div class="value">
|
||||
<span wizard-field="default-network"></span>
|
||||
<span wizard-field="default-network" conditional-field="select-network"></span>
|
||||
</div>
|
||||
<div class="edit">
|
||||
<a href="5"><fmt:message key="label.edit"/></a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Security groups -->
|
||||
<div class="select odd">
|
||||
<div class="name">
|
||||
<span><fmt:message key="label.security.groups"/></span>
|
||||
</div>
|
||||
<div class="value">
|
||||
<span wizard-field="security-groups" conditional-field="select-security-group"></span>
|
||||
</div>
|
||||
<div class="edit">
|
||||
<a href="5"><fmt:message key="label.edit"/></a>
|
||||
@ -2034,6 +2047,7 @@ dictionary = {
|
||||
'label.SR.name ': '<fmt:message key="label.SR.name " />',
|
||||
'label.SharedMountPoint': '<fmt:message key="label.SharedMountPoint" />',
|
||||
'label.clvm': '<fmt:message key="label.clvm" />',
|
||||
'label.rbd': '<fmt:message key="label.rbd" />',
|
||||
'label.volgroup': '<fmt:message key="label.volgroup" />',
|
||||
'label.VMFS.datastore': '<fmt:message key="label.VMFS.datastore" />',
|
||||
'label.network.device': '<fmt:message key="label.network.device" />',
|
||||
|
||||
@ -15,6 +15,7 @@
|
||||
// specific language governing permissions and limitations
|
||||
// under the License.
|
||||
(function(cloudStack) {
|
||||
var getProjectAdmin, selectedProjectObj;
|
||||
cloudStack.projects = {
|
||||
requireInvitation: function(args) {
|
||||
return g_capabilities.projectinviterequired;
|
||||
@ -447,12 +448,28 @@
|
||||
return ['destroy'];
|
||||
}
|
||||
|
||||
if (args.context.multiRule[0].role != 'Admin') {
|
||||
if (args.context.multiRule[0].role != 'Admin' &&
|
||||
(cloudStack.context.users[0].account == getProjectAdmin || isAdmin() || isDomainAdmin())) { // This is for the new project wizard: check if current logged in User is the Project Owner
|
||||
return args.context.actions;
|
||||
}
|
||||
|
||||
return [];
|
||||
},
|
||||
readOnlyCheck: function(args) { // check if current logged in User is the Project Owner
|
||||
if (isAdmin() || isDomainAdmin())
|
||||
return true;
|
||||
|
||||
var projectOwner, currentUser = cloudStack.context.users[0].account;
|
||||
$(args.data).each(function() {
|
||||
var data = this;
|
||||
if (data.role == 'Admin')
|
||||
projectOwner = data.username;
|
||||
});
|
||||
if (projectOwner == currentUser)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
},
|
||||
actions: {
|
||||
destroy: {
|
||||
label: 'label.remove.project.account',
|
||||
@ -497,7 +514,15 @@
|
||||
success: function(data) {
|
||||
args.response.success({
|
||||
_custom: {
|
||||
jobId: data.updateprojectresponse.jobid
|
||||
jobId: data.updateprojectresponse.jobid,
|
||||
onComplete: function(){
|
||||
setTimeout(function() {
|
||||
$(window).trigger('cloudStack.fullRefresh');
|
||||
if (isUser()) {
|
||||
$(window).trigger('cloudStack.detailsRefresh');
|
||||
}
|
||||
}, 500);
|
||||
}
|
||||
},
|
||||
notification: {
|
||||
label: 'label.make.project.owner',
|
||||
@ -522,6 +547,8 @@
|
||||
success: function(data) {
|
||||
args.response.success({
|
||||
data: $.map(data.listprojectaccountsresponse.projectaccount, function(elem) {
|
||||
if (elem.role == 'Owner' || elem.role == 'Admin')
|
||||
getProjectAdmin = elem.account;
|
||||
return {
|
||||
id: elem.accountid,
|
||||
role: elem.role,
|
||||
@ -635,6 +662,30 @@
|
||||
},
|
||||
|
||||
detailView: {
|
||||
updateContext: function (args) {
|
||||
var project;
|
||||
var projectID = args.context.projects[0].id;
|
||||
var url = 'listProjects';
|
||||
if (isDomainAdmin()) {
|
||||
url += '&domainid=' + args.context.users[0].domainid;
|
||||
}
|
||||
$.ajax({
|
||||
url: createURL(url),
|
||||
data: {
|
||||
listAll: true,
|
||||
id: projectID
|
||||
},
|
||||
async: false,
|
||||
success: function(json) {
|
||||
project = json.listprojectsresponse.project[0]; // override project after update owner
|
||||
}
|
||||
});
|
||||
selectedProjectObj = project;
|
||||
|
||||
return {
|
||||
projects: [project]
|
||||
};
|
||||
},
|
||||
actions: {
|
||||
edit: {
|
||||
label: 'label.edit',
|
||||
@ -758,7 +809,7 @@
|
||||
},
|
||||
|
||||
tabFilter: function(args) {
|
||||
var project = args.context.projects[0];
|
||||
var project = selectedProjectObj;
|
||||
var projectOwner = project.account;
|
||||
var currentAccount = args.context.users[0].account;
|
||||
var hiddenTabs = [];
|
||||
|
||||
@ -475,6 +475,31 @@ function SharedMountPointURL(server, path) {
|
||||
return url;
|
||||
}
|
||||
|
||||
function rbdURL(monitor, pool, id, secret) {
|
||||
var url;
|
||||
|
||||
/*
|
||||
Replace the + and / symbols by - and _ to have URL-safe base64 going to the API
|
||||
It's hacky, but otherwise we'll confuse java.net.URI which splits the incoming URI
|
||||
*/
|
||||
secret = secret.replace("+", "-");
|
||||
secret = secret.replace("/", "_");
|
||||
|
||||
if (id != null && secret != null) {
|
||||
monitor = id + ":" + secret + "@" + monitor;
|
||||
}
|
||||
|
||||
if(pool.substring(0,1) != "/")
|
||||
pool = "/" + pool;
|
||||
|
||||
if(monitor.indexOf("://")==-1)
|
||||
url = "rbd://" + monitor + pool;
|
||||
else
|
||||
url = monitor + pool;
|
||||
|
||||
return url;
|
||||
}
|
||||
|
||||
function clvmURL(vgname) {
|
||||
var url;
|
||||
if(vgname.indexOf("://")==-1)
|
||||
|
||||
@ -3865,14 +3865,14 @@
|
||||
|
||||
fields: [
|
||||
{
|
||||
name: { label: 'label.zone', isEditable: true }
|
||||
name: { label: 'label.zone', isEditable: true, validation: { required: true } }
|
||||
},
|
||||
{
|
||||
id: { label: 'label.id' },
|
||||
allocationstate: { label: 'label.allocation.state' },
|
||||
dns1: { label: 'label.dns.1', isEditable: true },
|
||||
dns1: { label: 'label.dns.1', isEditable: true, validation: { required: true } },
|
||||
dns2: { label: 'label.dns.2', isEditable: true },
|
||||
internaldns1: { label: 'label.internal.dns.1', isEditable: true },
|
||||
internaldns1: { label: 'label.internal.dns.1', isEditable: true, validation: { required: true } },
|
||||
internaldns2: { label: 'label.internal.dns.2', isEditable: true },
|
||||
domainname: { label: 'label.domain' },
|
||||
networktype: { label: 'label.network.type' },
|
||||
@ -6293,14 +6293,14 @@
|
||||
title: 'label.details',
|
||||
fields: [
|
||||
{
|
||||
name: { label: 'label.name', isEditable: true }
|
||||
name: { label: 'label.name', isEditable: true, validation: { required: true } }
|
||||
},
|
||||
{
|
||||
id: { label: 'label.id' },
|
||||
netmask: { label: 'label.netmask', isEditable: true },
|
||||
startip: { label: 'label.start.IP', isEditable: true },
|
||||
netmask: { label: 'label.netmask', isEditable: true, validation: { required: true } },
|
||||
startip: { label: 'label.start.IP', isEditable: true, validation: { required: true } },
|
||||
endip: { label: 'label.end.IP', isEditable: true },
|
||||
gateway: { label: 'label.gateway', isEditable: true },
|
||||
gateway: { label: 'label.gateway', isEditable: true, validation: { required: true } },
|
||||
allocationstate: {
|
||||
converter: function(str) {
|
||||
// For localization
|
||||
@ -7865,6 +7865,7 @@
|
||||
var items = [];
|
||||
items.push({id: "nfs", description: "nfs"});
|
||||
items.push({id: "SharedMountPoint", description: "SharedMountPoint"});
|
||||
items.push({id: "rbd", description: "RBD"});
|
||||
args.response.success({data: items});
|
||||
}
|
||||
else if(selectedClusterObj.hypervisortype == "XenServer") {
|
||||
@ -8048,6 +8049,27 @@
|
||||
$form.find('.form-item[rel=vCenterDataCenter]').hide();
|
||||
$form.find('.form-item[rel=vCenterDataStore]').hide();
|
||||
}
|
||||
else if(protocol == "rbd") {
|
||||
$form.find('.form-item[rel=rbdmonitor]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=rbdmonitor]').find(".name").find("label").text("RADOS Monitor:");
|
||||
|
||||
$form.find('.form-item[rel=rbdpool]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=rbdpool]').find(".name").find("label").text("RADOS Pool:");
|
||||
|
||||
$form.find('.form-item[rel=rbdid]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=rbdid]').find(".name").find("label").text("RADOS User:");
|
||||
|
||||
$form.find('.form-item[rel=rbdsecret]').css('display', 'inline-block');
|
||||
$form.find('.form-item[rel=rbdsecret]').find(".name").find("label").text("RADOS Secret:");
|
||||
|
||||
$form.find('.form-item[rel=server]').hide();
|
||||
$form.find('.form-item[rel=iqn]').hide();
|
||||
$form.find('.form-item[rel=lun]').hide();
|
||||
$form.find('.form-item[rel=volumegroup]').hide();
|
||||
$form.find('.form-item[rel=path]').hide();
|
||||
$form.find('.form-item[rel=vCenterDataCenter]').hide();
|
||||
$form.find('.form-item[rel=vCenterDataStore]').hide();
|
||||
}
|
||||
else {
|
||||
//$dialogAddPool.find("#add_pool_server_container").show();
|
||||
$form.find('.form-item[rel=server]').css('display', 'inline-block');
|
||||
@ -8116,6 +8138,28 @@
|
||||
isHidden: true
|
||||
},
|
||||
|
||||
// RBD
|
||||
rbdmonitor: {
|
||||
label: 'label.rbd.monitor',
|
||||
validation: { required: true },
|
||||
isHidden: true
|
||||
},
|
||||
rbdpool: {
|
||||
label: 'label.rbd.pool',
|
||||
validation: { required: true },
|
||||
isHidden: true
|
||||
},
|
||||
rbdid: {
|
||||
label: 'label.rbd.id',
|
||||
validation: { required: false },
|
||||
isHidden: true
|
||||
},
|
||||
rbdsecret: {
|
||||
label: 'label.rbd.secret',
|
||||
validation: { required: false },
|
||||
isHidden: true
|
||||
},
|
||||
|
||||
//always appear (begin)
|
||||
storageTags: {
|
||||
label: 'label.storage.tags',
|
||||
@ -8174,6 +8218,14 @@
|
||||
vg = "/" + vg;
|
||||
url = clvmURL(vg);
|
||||
}
|
||||
else if (args.data.protocol == "rbd") {
|
||||
var rbdmonitor = args.data.rbdmonitor;
|
||||
var rbdpool = args.data.rbdpool;
|
||||
var rbdid = args.data.rbdid;
|
||||
var rbdsecret = args.data.rbdsecret;
|
||||
|
||||
url = rbdURL(rbdmonitor, rbdpool, rbdid, rbdsecret);
|
||||
}
|
||||
else if (args.data.protocol == "vmfs") {
|
||||
//var path = trim($thisDialog.find("#add_pool_vmfs_dc").val());
|
||||
var path = args.data.vCenterDataCenter;
|
||||
|
||||
@ -204,11 +204,42 @@
|
||||
}
|
||||
},
|
||||
|
||||
osTypeId: {
|
||||
label: 'label.os.type',
|
||||
osCategory: {
|
||||
label: 'OS Category',
|
||||
select: function(args) {
|
||||
$.ajax({
|
||||
url: createURL("listOsTypes"),
|
||||
url: createURL("listOsCategories"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
var osCats = json.listoscategoriesresponse.oscategory;
|
||||
var items = [];
|
||||
if (isAdmin())
|
||||
items.push({id: -1, description: "All OS"});
|
||||
$(osCats).each(function() {
|
||||
items.push({id: this.id, description: this.name});
|
||||
});
|
||||
args.response.success({data: items});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
osTypeId: {
|
||||
label: 'label.os.type',
|
||||
dependsOn: 'osCategory',
|
||||
select: function(args) {
|
||||
if(args.osCategory == null)
|
||||
return;
|
||||
|
||||
var apiCmd;
|
||||
if(args.osCategory == -1)
|
||||
apiCmd = "listOsTypes";
|
||||
else
|
||||
apiCmd = "listOsTypes&oscategoryid=" + args.osCategory;
|
||||
|
||||
$.ajax({
|
||||
url: createURL(apiCmd),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
@ -784,14 +815,45 @@
|
||||
isChecked: true
|
||||
},
|
||||
|
||||
osCategory: {
|
||||
label: 'OS Category',
|
||||
dependsOn: 'isBootable',
|
||||
select: function(args) {
|
||||
$.ajax({
|
||||
url: createURL("listOsCategories"),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
var osCats = json.listoscategoriesresponse.oscategory;
|
||||
var items = [];
|
||||
if (isAdmin())
|
||||
items.push({id: -1, description: "All OS"});
|
||||
$(osCats).each(function() {
|
||||
items.push({id: this.id, description: this.name});
|
||||
});
|
||||
args.response.success({data: items});
|
||||
}
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
osTypeId: {
|
||||
label: 'label.os.type',
|
||||
dependsOn: 'isBootable',
|
||||
dependsOn: ['isBootable','osCategory'],
|
||||
isHidden: false,
|
||||
validation: { required: true },
|
||||
select: function(args) {
|
||||
if(args.osCategory == null)
|
||||
return;
|
||||
|
||||
var apiCmd;
|
||||
if(args.osCategory == -1)
|
||||
apiCmd = "listOsTypes";
|
||||
else
|
||||
apiCmd = "listOsTypes&oscategoryid=" + args.osCategory;
|
||||
|
||||
$.ajax({
|
||||
url: createURL("listOsTypes"),
|
||||
url: createURL(apiCmd),
|
||||
dataType: "json",
|
||||
async: true,
|
||||
success: function(json) {
|
||||
|
||||
@ -135,7 +135,8 @@
|
||||
$('<input>')
|
||||
.attr({
|
||||
type: options.secondary.type,
|
||||
name: options.secondary.name
|
||||
name: options.secondary.name,
|
||||
'wizard-field': options.secondary['wizard-field']
|
||||
})
|
||||
.val(id)
|
||||
.click(function() {
|
||||
@ -516,7 +517,8 @@
|
||||
secondary: {
|
||||
desc: 'Default',
|
||||
name: 'defaultNetwork',
|
||||
type: 'radio'
|
||||
type: 'radio',
|
||||
'wizard-field': 'default-network'
|
||||
}
|
||||
})
|
||||
);
|
||||
@ -528,7 +530,8 @@
|
||||
desc: 'description',
|
||||
id: 'id'
|
||||
}, {
|
||||
type: 'checkbox'
|
||||
type: 'checkbox',
|
||||
'wizard-field': 'security-groups'
|
||||
})
|
||||
);
|
||||
|
||||
@ -539,9 +542,6 @@
|
||||
},
|
||||
|
||||
'review': function($step, formData) {
|
||||
return {
|
||||
response: {
|
||||
success: function(args) {
|
||||
$step.find('[wizard-field]').each(function() {
|
||||
var field = $(this).attr('wizard-field');
|
||||
var fieldName;
|
||||
@ -552,24 +552,45 @@
|
||||
if ($input.is('option')) {
|
||||
fieldName = $input.html();
|
||||
} else if ($input.is('input[type=radio]')) {
|
||||
// Choosen New network as default
|
||||
if ($input.parents('div.new-network').size()) {
|
||||
fieldName = $input.closest('div.new-network').find('input[name="new-network-name"]').val();
|
||||
// Choosen Network from existed
|
||||
} else if ($input.parents('div.my-networks').size()) {
|
||||
fieldName = $input.closest('div.select').find('.select-desc .name').html();
|
||||
} else {
|
||||
fieldName = $input.parent().find('.select-desc .name').html();
|
||||
}
|
||||
} else if ($input.eq(0).is('input[type=checkbox]')) {
|
||||
fieldName = '';
|
||||
$input.each(function(index) {
|
||||
if (index != 0) fieldName += '<br />';
|
||||
fieldName += $(this).next('div.select-desc').find('.name').html();
|
||||
});
|
||||
}
|
||||
|
||||
if (fieldName) {
|
||||
$(this).html(fieldName);
|
||||
} else {
|
||||
$(this).html('(' + _l('label.none') + ')');
|
||||
}
|
||||
});
|
||||
|
||||
var conditionalFieldFrom = $(this).attr('conditional-field');
|
||||
if (conditionalFieldFrom) {
|
||||
if ($wizard.find('.'+conditionalFieldFrom).css('display') == 'block') {
|
||||
$(this).closest('div.select').show();
|
||||
} else {
|
||||
$(this).closest('div.select').hide();
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
// Go to specified step in wizard,
|
||||
// updating nav items and diagram
|
||||
var showStep = function(index) {
|
||||
var showStep = function(index, options) {
|
||||
if (!options) options = {};
|
||||
var targetIndex = index - 1;
|
||||
|
||||
if (index <= 1) targetIndex = 0;
|
||||
|
||||
@ -38,11 +38,22 @@
|
||||
$login.appendTo('html body');
|
||||
$('html body').addClass('login');
|
||||
|
||||
// Remove label if field was auto filled
|
||||
$.each($form.find('label'), function() {
|
||||
var $label = $(this);
|
||||
var $input = $form.find('input').filter(function() {
|
||||
return $(this).attr('name') == $label.attr('for');
|
||||
});
|
||||
if ($input.val()) {
|
||||
$label.hide();
|
||||
}
|
||||
});
|
||||
|
||||
// Form validation
|
||||
$form.validate();
|
||||
|
||||
// Form label behavior
|
||||
$inputs.bind('keydown keyup focus blur', function(event) {
|
||||
$inputs.bind('keydown focus click blur', function(event) {
|
||||
var $target = $(event.target);
|
||||
var $label = $form.find('label').filter(function() {
|
||||
return $(this).attr('for') == $target.attr('name');
|
||||
@ -52,11 +63,16 @@
|
||||
$label.hide();
|
||||
|
||||
return true;
|
||||
} else {
|
||||
if (!$target.val()) {
|
||||
} else if (event.type == 'blur') {
|
||||
if ($target.hasClass('first-input')) {
|
||||
$target.removeClass('first-input');
|
||||
}
|
||||
if (!$(this).val()) {
|
||||
$label.show();
|
||||
} else {
|
||||
$label.hide();
|
||||
}
|
||||
} else {
|
||||
if (!$target.hasClass('first-input')) {
|
||||
$label.hide();
|
||||
}
|
||||
}
|
||||
|
||||
@ -68,11 +84,13 @@
|
||||
// Labels cause related input to be focused
|
||||
$login.find('label').click(function() {
|
||||
var $input = $inputs.filter('[name=' + $(this).attr('for') + ']');
|
||||
var $label = $(this);
|
||||
|
||||
$input.focus();
|
||||
$label.hide();
|
||||
});
|
||||
|
||||
$inputs.filter(':first').focus();
|
||||
$inputs.filter(':first').addClass('first-input').focus();
|
||||
|
||||
// Login action
|
||||
$login.find('input[type=submit]').click(function() {
|
||||
|
||||
@ -171,7 +171,8 @@
|
||||
var $input = $('<input>').attr({
|
||||
type: 'text',
|
||||
name: resource.type,
|
||||
value: resource.value
|
||||
value: resource.value,
|
||||
id: resource.type
|
||||
}).addClass('required');
|
||||
|
||||
$field.append($label, $input);
|
||||
@ -305,13 +306,15 @@
|
||||
.append($('<label>').attr('for', 'project-name').html(_l('label.project.name')))
|
||||
.append($('<input>').addClass('required').attr({
|
||||
type: 'text',
|
||||
name: 'project-name'
|
||||
name: 'project-name',
|
||||
id: 'project-name'
|
||||
}));
|
||||
var $projectDesc = $('<div>').addClass('field desc')
|
||||
.append($('<label>').attr('for', 'project-desc').html(_l('label.display.text')))
|
||||
.append($('<input>').attr({
|
||||
type: 'text',
|
||||
name: 'project-display-text'
|
||||
name: 'project-display-text',
|
||||
id: 'project-desc'
|
||||
}));
|
||||
var $submit = $('<input>').attr({ type: 'submit' }).val(_l('label.create.project'));
|
||||
var $cancel = $('<div>').addClass('button cancel').html(_l('label.cancel'));
|
||||
|
||||
@ -831,6 +831,11 @@
|
||||
$form.find('.form-item .name').each(function() {
|
||||
$(this).html($(this).find('label'));
|
||||
});
|
||||
$form.find('label[for]').each(function() {
|
||||
var forAttr = $(this).attr('for');
|
||||
$form.find('#' + forAttr).attr('id', id + '_' + forAttr);
|
||||
$(this).attr('for', id + '_' + forAttr)
|
||||
});
|
||||
|
||||
$form.find('select, input').change(function() {
|
||||
cloudStack.evenOdd($form, '.field:visible', {
|
||||
|
||||
@ -349,9 +349,26 @@
|
||||
|
||||
// Events
|
||||
$(function() {
|
||||
// Check if target should be hovered
|
||||
function checkHoveredLabel($target) {
|
||||
var $multiWizard = $('div.ui-dialog div.multi-wizard');
|
||||
if (($target.is('label[for]') && !$target.parents('body.login')) ||
|
||||
($multiWizard.size() &&
|
||||
($target.is('.multi-wizard label') && $target.prev('input[type="radio"],input[type="checkbox"]').size()) ||
|
||||
($target.is('.multi-wizard .select-desc div.name') && $target.parent('div.select-desc').prev('input[type="radio"],input[type="checkbox"]').size())
|
||||
))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Rollover behavior for user options
|
||||
$(document).bind('mouseover', function(event) {
|
||||
if ($(event.target).closest('#user, #user-options').size()) {
|
||||
var $target = $(event.target);
|
||||
if (checkHoveredLabel($target)) {
|
||||
$target.addClass('label-hovered');
|
||||
}
|
||||
if ($target.closest('#user, #user-options').size()) {
|
||||
return false;
|
||||
}
|
||||
else $('#user-options').hide();
|
||||
@ -359,11 +376,29 @@
|
||||
return false;
|
||||
});
|
||||
|
||||
$(document).bind('mouseout', function(event) {
|
||||
var $target = $(event.target);
|
||||
if (checkHoveredLabel($target)) {
|
||||
$target.removeClass('label-hovered');
|
||||
}
|
||||
});
|
||||
|
||||
$(document).bind('click', function(event) {
|
||||
var $target = $(event.target);
|
||||
var $container = $target.closest('[cloudStack-container]');
|
||||
var args = $container.data('cloudStack-args');
|
||||
var $browser = $container.find('#browser .container');
|
||||
var $multiWizard = $('div.ui-dialog div.multi-wizard');
|
||||
|
||||
// Wizard: trigger click event for input when click it label
|
||||
if ($multiWizard.size()) {
|
||||
if ($target.is('.multi-wizard label') && $target.prev('input[type="radio"],input[type="checkbox"]').size()) {
|
||||
$target.prev('input').trigger('click');
|
||||
}
|
||||
if ($target.is('.multi-wizard .select-desc div.name') && $target.parent('div.select-desc').prev('input[type="radio"],input[type="checkbox"]').size()) {
|
||||
$target.parent('div.select-desc').prev('input').trigger('click');
|
||||
}
|
||||
}
|
||||
|
||||
if (!$container.size()) return true;
|
||||
|
||||
|
||||
@ -117,52 +117,56 @@
|
||||
var $value = $('<div>').addClass('value')
|
||||
.appendTo($formItem);
|
||||
var $input, $dependsOn, selectFn, selectArgs;
|
||||
var dependsOn = field.dependsOn;
|
||||
var dependsOn = $.isArray(field.dependsOn) ? field.dependsOn : [field.dependsOn] ; //now an array
|
||||
|
||||
// Depends on fields
|
||||
if (field.dependsOn) {
|
||||
$formItem.attr('depends-on', dependsOn);
|
||||
$dependsOn = $form.find('input, select').filter(function() {
|
||||
return $(this).attr('name') === dependsOn;
|
||||
});
|
||||
if (dependsOn.length) {
|
||||
$.each(dependsOn, function(key, value){
|
||||
var dependsOn = value;
|
||||
|
||||
if ($dependsOn.is('[type=checkbox]')) {
|
||||
var isReverse = args.form.fields[dependsOn].isReverse;
|
||||
|
||||
// Checkbox
|
||||
$dependsOn.bind('click', function(event) {
|
||||
var $target = $(this);
|
||||
var $dependent = $target.closest('form').find('[depends-on=\'' + dependsOn + '\']');
|
||||
|
||||
if (($target.is(':checked') && !isReverse) ||
|
||||
($target.is(':unchecked') && isReverse)) {
|
||||
$dependent.css('display', 'inline-block');
|
||||
$dependent.each(function() {
|
||||
if ($(this).data('dialog-select-fn')) {
|
||||
$(this).data('dialog-select-fn')();
|
||||
}
|
||||
});
|
||||
} else if (($target.is(':unchecked') && !isReverse) ||
|
||||
($target.is(':checked') && isReverse)) {
|
||||
$dependent.hide();
|
||||
}
|
||||
|
||||
$dependent.find('input[type=checkbox]').click();
|
||||
|
||||
if (!isReverse) {
|
||||
$dependent.find('input[type=checkbox]').attr('checked', false);
|
||||
} else {
|
||||
$dependent.find('input[type=checkbox]').attr('checked', true);
|
||||
}
|
||||
|
||||
return true;
|
||||
$formItem.attr('depends-on-'+value, dependsOn);
|
||||
$dependsOn = $form.find('input, select').filter(function() {
|
||||
return $(this).attr('name') === dependsOn;
|
||||
});
|
||||
|
||||
// Show fields by default if it is reverse checkbox
|
||||
if (isReverse) {
|
||||
$dependsOn.click();
|
||||
}
|
||||
}
|
||||
if ($dependsOn.is('[type=checkbox]')) {
|
||||
var isReverse = args.form.fields[dependsOn].isReverse;
|
||||
|
||||
// Checkbox
|
||||
$dependsOn.bind('click', function(event) {
|
||||
var $target = $(this);
|
||||
var $dependent = $target.closest('form').find('[depends-on-' + value + '=\'' + dependsOn + '\']');
|
||||
|
||||
if (($target.is(':checked') && !isReverse) ||
|
||||
($target.is(':unchecked') && isReverse)) {
|
||||
$dependent.css('display', 'inline-block');
|
||||
$dependent.each(function() {
|
||||
if ($(this).find('select').data('dialog-select-fn')) {
|
||||
$(this).find('select').data('dialog-select-fn')();
|
||||
}
|
||||
});
|
||||
} else if (($target.is(':unchecked') && !isReverse) ||
|
||||
($target.is(':checked') && isReverse)) {
|
||||
$dependent.hide();
|
||||
}
|
||||
|
||||
$dependent.find('input[type=checkbox]').click();
|
||||
|
||||
if (!isReverse) {
|
||||
$dependent.find('input[type=checkbox]').attr('checked', false);
|
||||
} else {
|
||||
$dependent.find('input[type=checkbox]').attr('checked', true);
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
// Show fields by default if it is reverse checkbox
|
||||
if (isReverse) {
|
||||
$dependsOn.click();
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Determine field type of input
|
||||
@ -207,31 +211,35 @@
|
||||
// Pass form item to provider for additional manipulation
|
||||
$.extend(selectArgs, { $select: $input });
|
||||
|
||||
if (dependsOn) {
|
||||
$dependsOn = $input.closest('form').find('input, select').filter(function() {
|
||||
return $(this).attr('name') === dependsOn;
|
||||
if (dependsOn.length) {
|
||||
$.each(dependsOn, function(key, value){
|
||||
var dependsOn = value;
|
||||
|
||||
$dependsOn = $input.closest('form').find('input, select').filter(function() {
|
||||
return $(this).attr('name') === dependsOn;
|
||||
});
|
||||
|
||||
$dependsOn.bind('change', function(event) {
|
||||
var $target = $(this);
|
||||
|
||||
if (!$dependsOn.is('select')) return true;
|
||||
|
||||
var dependsOnArgs = {};
|
||||
|
||||
$input.find('option').remove();
|
||||
|
||||
if (!$target.children().size()) return true;
|
||||
|
||||
dependsOnArgs[dependsOn] = $target.val();
|
||||
selectFn($.extend(selectArgs, dependsOnArgs));
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (!$dependsOn.is('select')) {
|
||||
selectFn(selectArgs);
|
||||
}
|
||||
});
|
||||
|
||||
$dependsOn.bind('change', function(event) {
|
||||
var $target = $(this);
|
||||
|
||||
if (!$dependsOn.is('select')) return true;
|
||||
|
||||
var dependsOnArgs = {};
|
||||
|
||||
$input.find('option').remove();
|
||||
|
||||
if (!$target.children().size()) return true;
|
||||
|
||||
dependsOnArgs[dependsOn] = $target.val();
|
||||
selectFn($.extend(selectArgs, dependsOnArgs));
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
if (!$dependsOn.is('select')) {
|
||||
selectFn(selectArgs);
|
||||
}
|
||||
} else {
|
||||
selectFn(selectArgs);
|
||||
}
|
||||
@ -338,6 +346,10 @@
|
||||
else
|
||||
$input.data('validation-rules', {});
|
||||
|
||||
var fieldLabel = field.label;
|
||||
var inputId = $input.attr('id') ? $input.attr('id') : fieldLabel.replace(/\./g,'_');
|
||||
$input.attr('id', inputId);
|
||||
$name.find('label').attr('for', inputId);
|
||||
});
|
||||
|
||||
var getFormValues = function() {
|
||||
|
||||
@ -271,6 +271,7 @@
|
||||
var $panel = args.panel;
|
||||
var $container = this.element;
|
||||
var $toHide = $panel.siblings(':not(.always-maximized)');
|
||||
var $shadow = $toHide.find('div.shadow');
|
||||
|
||||
if (args.panel.hasClass('maximized')) {
|
||||
_breadcrumb.filter($panel).removeClass('maximized');
|
||||
@ -279,6 +280,7 @@
|
||||
_breadcrumb.filter($panel.siblings()).find('span').animate({ opacity: 1 });
|
||||
$toHide.animate({ left: _panel.position($container, {}) },
|
||||
{ duration: 500 });
|
||||
$shadow.show();
|
||||
} else {
|
||||
_breadcrumb.filter($panel).addClass('maximized');
|
||||
$panel.removeClass('reduced');
|
||||
@ -286,6 +288,7 @@
|
||||
_breadcrumb.filter($panel.siblings()).find('span').animate({ opacity: 0.5 });
|
||||
$toHide.animate(_panel.initialState($container),
|
||||
{ duration: 500 });
|
||||
$shadow.hide();
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
@ -836,7 +836,7 @@
|
||||
actionFilter: actionFilter,
|
||||
data: data,
|
||||
context: $detailView.data('view-args').context
|
||||
}).prependTo($firstRow.closest('div.detail-group').closest('.details'));
|
||||
});
|
||||
|
||||
// 'View all' button
|
||||
var showViewAll = detailViewArgs.viewAll ?
|
||||
@ -846,6 +846,9 @@
|
||||
context: context
|
||||
}) : true
|
||||
) : true;
|
||||
if ($actions.find('div.action').size() || (detailViewArgs.viewAll && showViewAll)) {
|
||||
$actions.prependTo($firstRow.closest('div.detail-group').closest('.details'));
|
||||
}
|
||||
if (detailViewArgs.viewAll && showViewAll) {
|
||||
$('<div>')
|
||||
.addClass('view-all')
|
||||
@ -1035,7 +1038,7 @@
|
||||
);
|
||||
};
|
||||
|
||||
var replaceTabs = function($detailView, $newTabs, tabs, options) {
|
||||
var replaceTabs = function($detailView, tabs, options) {
|
||||
var $detailViewElems = $detailView.find('ul.ui-tabs-nav, .detail-group');
|
||||
$detailView.tabs('destroy');
|
||||
$detailViewElems.remove();
|
||||
@ -1054,25 +1057,32 @@
|
||||
);
|
||||
};
|
||||
|
||||
$.fn.detailView = function(args) {
|
||||
$.fn.detailView = function(args, options) {
|
||||
var $detailView = this;
|
||||
|
||||
$detailView.addClass('detail-view');
|
||||
$detailView.data('view-args', args);
|
||||
if (options == 'refresh') {
|
||||
var $tabs = replaceTabs($detailView, args.tabs, {
|
||||
context: args.context,
|
||||
tabFilter: args.tabFilter
|
||||
});
|
||||
} else {
|
||||
$detailView.addClass('detail-view');
|
||||
$detailView.data('view-args', args);
|
||||
|
||||
if (args.$listViewRow) {
|
||||
$detailView.data('list-view-row', args.$listViewRow);
|
||||
if (args.$listViewRow) {
|
||||
$detailView.data('list-view-row', args.$listViewRow);
|
||||
}
|
||||
|
||||
// Create toolbar
|
||||
var $toolbar = makeToolbar().appendTo($detailView);
|
||||
|
||||
// Create tabs
|
||||
var $tabs = makeTabs($detailView, args.tabs, {
|
||||
context: args.context,
|
||||
tabFilter: args.tabFilter
|
||||
}).appendTo($detailView);
|
||||
}
|
||||
|
||||
// Create toolbar
|
||||
var $toolbar = makeToolbar().appendTo($detailView);
|
||||
|
||||
// Create tabs
|
||||
var $tabs = makeTabs($detailView, args.tabs, {
|
||||
context: args.context,
|
||||
tabFilter: args.tabFilter
|
||||
}).appendTo($detailView);
|
||||
|
||||
$detailView.tabs();
|
||||
|
||||
return $detailView;
|
||||
@ -1151,4 +1161,17 @@
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
// Detail view refresh handler
|
||||
$(window).bind('cloudStack.detailsRefresh', function() {
|
||||
var $detailView = $('.detail-view');
|
||||
|
||||
$detailView.each(function() {
|
||||
var $detailView = $(this),
|
||||
args = $detailView.data('view-args');
|
||||
|
||||
$detailView.detailView(args, 'refresh');
|
||||
});
|
||||
});
|
||||
|
||||
}(window.jQuery, window.cloudStack, window._l));
|
||||
|
||||
@ -675,6 +675,7 @@
|
||||
var context = args.context;
|
||||
var ignoreEmptyFields = args.ignoreEmptyFields;
|
||||
var actionPreFilter = args.actionPreFilter;
|
||||
var readOnlyCheck = args.readOnlyCheck;
|
||||
|
||||
var $thead = $('<tr>').appendTo(
|
||||
$('<thead>').appendTo($inputTable)
|
||||
@ -935,6 +936,11 @@
|
||||
).appendTo($dataBody);
|
||||
});
|
||||
|
||||
if (readOnlyCheck && !readOnlyCheck(args)) {
|
||||
$multi.find('th.add-user, td.add-user').detach();
|
||||
$multiForm.find('tbody').detach();
|
||||
}
|
||||
|
||||
_medit.refreshItemWidths($multi);
|
||||
},
|
||||
error: cloudStack.dialog.error
|
||||
|
||||
@ -10,6 +10,7 @@ import Utils,Node,Options,Logs,Scripting,Environment,Build,Configure
|
||||
from os import unlink as _unlink, makedirs as _makedirs, getcwd as _getcwd, chdir as _chdir
|
||||
from os.path import abspath as _abspath, basename as _basename, dirname as _dirname, exists as _exists, isdir as _isdir, split as _split, join as _join, sep, pathsep, pardir, curdir
|
||||
from glob import glob as _glob
|
||||
from subprocess import Popen as _Popen,PIPE
|
||||
try: set([1,2,3])
|
||||
except Exception: from Sets import set
|
||||
import re
|
||||
@ -31,7 +32,49 @@ filelist = bld.path.ant_glob
|
||||
distdir = Utils.relpath(_join(sourcedir,"dist"))
|
||||
targetdir = Utils.relpath(_join(sourcedir,"target"))
|
||||
|
||||
def gitinfo(dir=None):
|
||||
if dir and not _isdir(dir): return ''
|
||||
try: p = _Popen(['git','remote','show','-n','origin'],stdin=PIPE,stdout=PIPE,stderr=PIPE,cwd=dir)
|
||||
except OSError,e:
|
||||
if e.errno == 2: return '' # svn command is not installed
|
||||
raise
|
||||
stdout,stderr = p.communicate('')
|
||||
retcode = p.wait()
|
||||
# If the guess fails, just return nothing.
|
||||
if retcode: return
|
||||
stdout = [ s.strip() for s in stdout.splitlines() ]
|
||||
try: url = [ s[11:] for s in stdout if s.startswith("Fetch URL") ][0]
|
||||
except IndexError: url = [ s[5:] for s in stdout if s.startswith("URL") ][0]
|
||||
assert url
|
||||
|
||||
p = _Popen(['git','log','-1'],stdin=PIPE,stdout=PIPE,stderr=PIPE,cwd=dir)
|
||||
stdout,stderr = p.communicate('')
|
||||
retcode = p.wait()
|
||||
if retcode: return
|
||||
# If the guess fails, just return nothing.
|
||||
stdout = [ s.strip() for s in stdout.splitlines() ]
|
||||
commitid = [ s.split()[1] for s in stdout if s.startswith("commit") ][0]
|
||||
assert commitid
|
||||
|
||||
return "Git Revision: %s"%commitid + "\n" + "Git URL: %s"%url + "\n"
|
||||
|
||||
def build_utils_docs ():
|
||||
stdout = gitinfo()
|
||||
if stdout:
|
||||
f = file("sccs-info","w")
|
||||
f.write(stdout)
|
||||
f.flush()
|
||||
f.close()
|
||||
else:
|
||||
if _exists("sccs-info"):
|
||||
# If the file already existed, we preserve it
|
||||
return
|
||||
else:
|
||||
f = file("sccs-info","w")
|
||||
f.write("No revision control information could be detected when the source distribution was built.")
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
sccsinfo = _join(sourcedir,"sccs-info")
|
||||
if _exists(sccsinfo): bld.install_files("${DOCDIR}","sccs-info")
|
||||
|
||||
@ -88,19 +131,6 @@ def build_jars ():
|
||||
bld.install_files ('${JAVADIR}', ant_jars)
|
||||
|
||||
|
||||
#def build_python_and_daemonize ():
|
||||
# obj = bld(features = 'py',name='pythonmodules')
|
||||
# obj.find_sources_in_dirs('python/lib', exts=['.py'])
|
||||
#
|
||||
# if bld.env.DISTRO not in ['Windows','Mac']:
|
||||
# # build / install declarations of the daemonization utility - except for Windows
|
||||
# bld(
|
||||
# name='daemonize',
|
||||
# features='cc cprogram',
|
||||
# source='daemonize/daemonize.c',
|
||||
# target='daemonize/cloud-daemonize'
|
||||
# )
|
||||
|
||||
def build_premium ():
|
||||
if buildpremium: bld.recurse(["cloudstack-proprietary/"],'build')
|
||||
|
||||
@ -388,7 +418,6 @@ def build_usage_dir ():
|
||||
# Get started to execute here
|
||||
build_utils_docs ()
|
||||
build_jars ()
|
||||
#build_python_and_daemonize ()
|
||||
build_premium ()
|
||||
#build_thirdparty_dir()
|
||||
build_dependences ()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user