CLOUDSTACK-57, CLOUDSTACK-14

remove unnecessary build/deploy folder, and add CAStorSDK-1.3.1-CS40.jar into rpm/deb build system

Conflicts:

	debian/cloud-deps.install
	wscript_build
This commit is contained in:
Edison Su 2012-09-12 20:47:20 -07:00
parent 4c3ab1afb6
commit 5e3921519a
29 changed files with 2 additions and 2927 deletions

View File

@ -119,7 +119,6 @@
<property name="tools.dir" location="${base.dir}/tools" />
<!-- <property name="antcontrib.dir" location="${tools.dir}/tools/ant/apache-ant-1.8.0/lib" />-->
<property name="deploy.dir" location="${build.dir}/deploy" />
<property name="production.dir" location="${deploy.dir}/production" />
<property name="meld.home" location="/usr/local/bin" />
<property name="assertion" value="-da" />

View File

@ -61,7 +61,6 @@
<import file="${build.dir}/build-common.xml" />
<!-- In case these didn't get defined in the build-cloud.properties -->
<property name="branding.name" value="default" />
<property name="tomcat.home" value="${env.CATALINA_HOME}" />
<property name="deprecation" value="off" />
<property name="target.compat.version" value="1.6" />
@ -113,17 +112,12 @@
<property name="tools.dir" location="${base.dir}/tools" />
<!-- <property name="antcontrib.dir" location="${tools.dir}/tools/ant/apache-ant-1.8.0/lib" />-->
<property name="deploy.dir" location="${build.dir}/deploy" />
<property name="production.dir" location="${deploy.dir}/production" />
<property name="meld.home" location="/usr/local/bin" />
<property name="assertion" value="-da" />
<!-- directory for vmware-base library -->
<property name="vmware-base.dir" location="${base.dir}/vmware-base" />
<!-- directories for branding -->
<property name="branding.dir" location="${build.dir}/deploy/branding/${branding.name}" />
<property name="core.jar" value="cloud-core.jar" />
<property name="utils.jar" value="cloud-utils.jar" />
<property name="server.jar" value="cloud-server.jar" />
@ -374,7 +368,7 @@
</fileset>
</copy>
<copy todir="${copyto.dir}/conf">
<fileset dir="${production.dir}/consoleproxy/conf">
<fileset dir="${console-proxy.dir}/conf">
<include name="log4j-cloud.xml" />
<include name="consoleproxy.properties" />
</fileset>
@ -487,17 +481,9 @@
<target name="build-servers" depends="-init, build-server" />
<target name="build-opensource" depends="-init, build-server, build-agent, build-scripts, build-ui, build-console-proxy, package-oss-systemvm-iso">
<copy overwrite="true" todir="${dist.dir}">
<fileset dir="${base.dir}/build/deploy/">
<include name="deploy-agent.sh" />
<include name="deploy-server.sh" />
<include name="deploy-console-proxy.sh" />
<include name="install.sh" />
</fileset>
<fileset dir="${base.dir}/client">
<include name="setup/**/*" />
</fileset>
</copy>
<copy overwrite="true" todir="${jar.dir}">
<fileset dir="${deps.dir}">
@ -505,8 +491,6 @@
</fileset>
</copy>
<chmod file="${dist.dir}/deploy-agent.sh" perm="uog+xr" />
<chmod file="${dist.dir}/deploy-server.sh" perm="uog+xr" />
</target>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.8 KiB

View File

@ -1,126 +0,0 @@
#!/usr/bin/env bash
#
# deploy-db.sh -- deploys the database configuration.
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# set -x
if [ "$1" == "" ]; then
printf "Usage: %s [path to additional sql] [root password]\n" $(basename $0) >&2
exit 1;
fi
if [ ! -f $1 ]; then
echo "Error: Unable to find $1"
exit 2
fi
if [ "$2" != "" ]; then
if [ ! -f $2 ]; then
echo "Error: Unable to find $2"
exit 3
fi
fi
if [ ! -f create-database.sql ]; then
printf "Error: Unable to find create-database.sql\n"
exit 4
fi
if [ ! -f create-schema.sql ]; then
printf "Error: Unable to find create-schema.sql\n"
exit 5
fi
if [ ! -f create-index-fk.sql ]; then
printf "Error: Unable to find create-index-fk.sql\n"
exit 6;
fi
PATHSEP=':'
if [[ $OSTYPE == "cygwin" ]] ; then
export CATALINA_HOME=`cygpath -m $CATALINA_HOME`
PATHSEP=';'
else
mysql="mysql"
service mysql status > /dev/null 2>/dev/null
if [ $? -eq 1 ]; then
mysql="mysqld"
service mysqld status > /dev/null 2>/dev/null
if [ $? -ne 0 ]; then
printf "Unable to find mysql daemon\n"
exit 7
fi
fi
echo "Starting mysql"
service $mysql start > /dev/null 2>/dev/null
fi
echo "Recreating Database."
mysql --user=root --password=$3 < create-database.sql > /dev/null 2>/dev/null
mysqlout=$?
if [ $mysqlout -eq 1 ]; then
printf "Please enter root password for MySQL.\n"
mysql --user=root --password < create-database.sql
if [ $? -ne 0 ]; then
printf "Error: Cannot execute create-database.sql\n"
exit 10
fi
elif [ $mysqlout -ne 0 ]; then
printf "Error: Cannot execute create-database.sql\n"
exit 11
fi
mysql --user=cloud --password=cloud cloud < create-schema.sql
if [ $? -ne 0 ]; then
printf "Error: Cannot execute create-schema.sql\n"
exit 11
fi
mysql --user=cloud --password=cloud cloud < create-schema-premium.sql
if [ $? -ne 0 ]; then
printf "Error: Cannot execute create-schema-premium.sql\n"
exit 11
fi
if [ "$1" != "" ]; then
mysql --user=cloud --password=cloud cloud < $1
if [ $? -ne 0 ]; then
printf "Error: Cannot execute $1\n"
exit 12
fi
fi
if [ "$2" != "" ]; then
echo "Adding Templates"
mysql --user=cloud --password=cloud cloud < $2
if [ $? -ne 0 ]; then
printf "Error: Cannot execute $2\n"
exit 12
fi
fi
echo "Creating Indice and Foreign Keys"
mysql --user=cloud --password=cloud cloud < create-index-fk.sql
if [ $? -ne 0 ]; then
printf "Error: Cannot execute create-index-fk.sql\n"
exit 13
fi

View File

@ -1,24 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Target=System.out
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ABSOLUTE} %5p %c{1}:%L - %m%n
log4j.appender.stdout.threshold=ERROR
log4j.rootLogger=INFO, stdout
log4j.category.org.apache=INFO, stdout

View File

@ -1,232 +0,0 @@
#!/usr/bin/env bash
#
# install.sh -- installs an agent
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
printf "Usage: %s: -d [directory to deploy to] -t [routing|storage|computing] -z [zip file] -h [host] -p [pod] -c [data center] -m [expert|novice|setup]\n" $(basename $0) >&2
}
mode=
host=
pod=
zone=
deploydir=
confdir=
zipfile=
typ=
#set -x
while getopts 'd:z:t:x:m:h:p:c:' OPTION
do
case "$OPTION" in
d) deploydir="$OPTARG"
;;
z) zipfile="$OPTARG"
;;
t) typ="$OPTARG"
;;
m) mode="$OPTARG"
;;
h) host="$OPTARG"
;;
p) pod="$OPTARG"
;;
c) zone="$OPTARG"
;;
?) usage
exit 2
;;
esac
done
printf "NOTE: You must have root privileges to install and run this program.\n"
if [ "$typ" == "" ]; then
if [ "$mode" != "expert" ]
then
printf "Type of agent to install [routing|computing|storage]: "
read typ
fi
fi
if [ "$typ" != "computing" ] && [ "$typ" != "routing" ] && [ "$typ" != "storage" ]
then
printf "ERROR: The choices are computing, routing, or storage.\n"
exit 4
fi
if [ "$host" == "" ]; then
if [ "$mode" != "expert" ]
then
printf "Host name or ip address of management server [Required]: "
read host
if [ "$host" == "" ]; then
printf "ERROR: Host is required\n"
exit 23;
fi
fi
fi
port=
if [ "$mode" != "expert" ]
then
printf "Port number of management server [defaults to 8250]: "
read port
fi
if [ "$port" == "" ]
then
port=8250
fi
if [ "$zone" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Availability Zone [Required]: "
read zone
if [ "$zone" == "" ]; then
printf "ERROR: Zone is required\n";
exit 21;
fi
fi
fi
if [ "$pod" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Pod [Required]: "
read pod
if [ "$pod" == "" ]; then
printf "ERROR: Pod is required\n";
exit 22;
fi
fi
fi
workers=
if [ "$mode" != "expert" ]; then
printf "# of workers to start [defaults to 3]: "
read workers
fi
if [ "$workers" == "" ]; then
workers=3
fi
if [ "$deploydir" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Directory to deploy to [defaults to /usr/local/vmops/agent]: "
read deploydir
fi
if [ "$deploydir" == "" ]; then
deploydir="/usr/local/vmops/agent"
fi
fi
if ! mkdir -p $deploydir
then
printf "ERROR: Unable to create $deploydir\n"
exit 5
fi
if [ "$zipfile" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Path of the zip file [defaults to agent.zip]: "
read zipfile
fi
if [ "$zipfile" == "" ]; then
zipfile="agent.zip"
fi
fi
if ! unzip -o $zipfile -d $deploydir
then
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
exit 6
fi
#if ! chmod -R +x $deploydir/scripts/*.sh
#then
# printf "ERROR: Unable to change scripts to executable.\n"
# exit 7
#fi
#if ! chmod -R +x $deploydir/scripts/iscsi/*.sh
#then
# printf "ERROR: Unable to change scripts to executable.\n"
# exit 8
#fi
#if ! chmod -R +x $deploydir/*.sh
#then
# printf "ERROR: Unable to change scripts to executable.\n"
# exit 9
#fi
if [ "$mode" == "setup" ]; then
mode="expert"
deploydir="/usr/local/vmops/agent"
confdir="/etc/vmops"
/bin/cp -f $deploydir/conf/agent.properties $confdir/agent.properties
if [ $? -gt 0 ]; then
printf "ERROR: Failed to copy the agent.properties file into the right place."
exit 10;
fi
else
confdir="$deploydir/conf"
fi
if [ "$typ" != "" ]; then
sed s/@TYPE@/"$typ"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Type is not set\n"
fi
if [ "$host" != "" ]; then
sed s/@HOST@/"$host"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: host is not set\n"
fi
if [ "$port" != "" ]; then
sed s/@PORT@/"$port"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Port is not set\n"
fi
if [ "$pod" != "" ]; then
sed s/@POD@/"$pod"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Pod is not set\n"
fi
if [ "$zone" != "" ]; then
sed s/@ZONE@/"$zone"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Zone is not set\n"
fi
if [ "$workers" != "" ]; then
sed s/@WORKERS@/"$workers"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Workers is not set\n"
fi
printf "SUCCESS: Installation is now complete. If you like to make changes, edit $confdir/agent.properties\n"
exit 0

View File

@ -1,90 +0,0 @@
#!/usr/bin/env bash
#
# Deploy console proxy package to an existing VM template
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
printf "Usage: %s: -d [work directory to deploy to] -z [zip file]" $(basename $0) >&2
}
deploydir=
zipfile=
#set -x
while getopts 'd:z:' OPTION
do
case "$OPTION" in
d) deploydir="$OPTARG"
;;
z) zipfile="$OPTARG"
;;
?) usage
exit 2
;;
esac
done
printf "NOTE: You must have root privileges to install and run this program.\n"
if [ "$deploydir" == "" ]; then
printf "ERROR: Unable to find deployment work directory $deploydir\n"
exit 3;
fi
if [ ! -f $deploydir/consoleproxy.tar.gz ]
then
printf "ERROR: Unable to find existing console proxy template file (consoleproxy.tar.gz) to work on at $deploydir\n"
exit 5
fi
if [ "$zipfile" == "" ]; then
zipfile="console-proxy.zip"
fi
if ! mkdir -p /mnt/consoleproxy
then
printf "ERROR: Unable to create /mnt/consoleproxy for mounting template image\n"
exit 5
fi
tar xvfz $deploydir/consoleproxy.tar.gz -C $deploydir
mount -o loop $deploydir/vmi-root-fc8-x86_64-domP /mnt/consoleproxy
if ! unzip -o $zipfile -d /mnt/consoleproxy/usr/local/vmops/consoleproxy
then
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
exit 6
fi
umount /mnt/consoleproxy
pushd $deploydir
tar cvf consoleproxy.tar vmi-root-fc8-x86_64-domP
mv -f consoleproxy.tar.gz consoleproxy.tar.gz.old
gzip consoleproxy.tar
popd
if [ ! -f $deploydir/consoleproxy.tar.gz ]
then
mv consoleproxy.tar.gz.old consoleproxy.tar.gz
printf "ERROR: failed to deploy and recreate the template at $deploydir\n"
fi
printf "SUCCESS: Installation is now complete. please go to $deploydir to review it\n"
exit 0

View File

@ -1,121 +0,0 @@
#!/usr/bin/env bash
#
# deploy.sh -- deploys a management server
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
printf "Usage: %s: -d [tomcat directory to deploy to] -z [zip file to use]\n" $(basename $0) >&2
}
dflag=
zflag=
tflag=
iflag=
deploydir=
zipfile="client.zip"
typ=
#set -x
while getopts 'd:z:x:h:' OPTION
do
case "$OPTION" in
d) dflag=1
deploydir="$OPTARG"
;;
z) zflag=1
zipfile="$OPTARG"
;;
h) iflag="$OPTARG"
;;
?) usage
exit 2
;;
esac
done
if [ "$deploydir" == "" ]
then
if [ "$CATALINA_HOME" == "" ]
then
printf "Tomcat Directory to deploy to: "
read deploydir
else
deploydir="$CATALINA_HOME"
fi
fi
if [ "$deploydir" == "" ]
then
printf "Tomcat directory was not specified\n";
exit 15;
fi
printf "Check to see if the Tomcat directory exist: $deploydir\n"
if [ ! -d $deploydir ]
then
printf "Tomcat directory does not exist\n";
exit 16;
fi
if [ "$zipfile" == "" ]
then
printf "Path of the zip file [defaults to client.zip]: "
read zipfile
if [ "$zipfile" == "" ]
then
zipfile="client.zip"
fi
fi
if ! unzip -o $zipfile client.war
then
exit 6
fi
rm -fr $deploydir/webapps/client
if ! unzip -o ./client.war -d $deploydir/webapps/client
then
exit 10;
fi
rm -f ./client.war
if ! unzip -o $zipfile lib/* -d $deploydir
then
exit 11;
fi
if ! unzip -o $zipfile conf/* -d $deploydir
then
exit 12;
fi
if ! unzip -o $zipfile bin/* -d $deploydir
then
exit 13;
fi
printf "Adding the conf directory to the class loader for tomcat\n"
sed 's/shared.loader=$/shared.loader=\$\{catalina.home\},\$\{catalina.home\}\/conf\
/' $deploydir/conf/catalina.properties > $deploydir/conf/catalina.properties.tmp
mv $deploydir/conf/catalina.properties.tmp $deploydir/conf/catalina.properties
printf "Installation is now complete\n"
exit 0

View File

@ -1,200 +0,0 @@
#!/usr/bin/env bash
#
# install.sh -- installs an agent
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
printf "Usage: %s: -d [directory to deploy to] -z [zip file] -h [host] -p [pod] -c [data center] -m [expert|novice|setup]\n" $(basename $0) >&2
}
mode=
host=
pod=
zone=
deploydir=
confdir=
zipfile=
typ=
#set -x
while getopts 'd:z:x:m:h:p:c:' OPTION
do
case "$OPTION" in
d) deploydir="$OPTARG"
;;
z) zipfile="$OPTARG"
;;
m) mode="$OPTARG"
;;
h) host="$OPTARG"
;;
p) pod="$OPTARG"
;;
c) zone="$OPTARG"
;;
?) usage
exit 2
;;
esac
done
printf "NOTE: You must have root privileges to install and run this program.\n"
if [ "$mode" == "setup" ]; then
mode="expert"
deploydir="/usr/local/cloud/agent-simulator"
confdir="/etc/cloud"
/bin/cp -f $deploydir/conf/agent.properties $confdir/agent.properties
if [ $? -gt 0 ]; then
printf "ERROR: Failed to copy the agent.properties file into the right place."
exit 10;
fi
else
confdir="$deploydir/conf"
fi
if [ "$host" == "" ]; then
if [ "$mode" != "expert" ]
then
printf "Host name or ip address of management server [Required]: "
read host
if [ "$host" == "" ]; then
printf "ERROR: Host is required\n"
exit 23;
fi
fi
fi
port=
if [ "$mode" != "expert" ]
then
printf "Port number of management server [defaults to 8250]: "
read port
fi
if [ "$port" == "" ]
then
port=8250
fi
if [ "$zone" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Availability Zone [Required]: "
read zone
if [ "$zone" == "" ]; then
printf "ERROR: Zone is required\n";
exit 21;
fi
fi
fi
if [ "$pod" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Pod [Required]: "
read pod
if ["$pod" == ""]; then
printf "ERROR: Pod is required\n";
exit 22;
fi
fi
fi
workers=
if [ "$mode" != "expert" ]; then
printf "# of workers to start [defaults to 3]: "
read workers
fi
if [ "$workers" == "" ]; then
workers=3
fi
if [ "$deploydir" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Directory to deploy to [defaults to /usr/local/cloud/agent-simulator]: "
read deploydir
fi
if [ "$deploydir" == "" ]; then
deploydir="/usr/local/cloud/agent-simulator"
fi
fi
if ! mkdir -p $deploydir
then
printf "ERROR: Unable to create $deploydir\n"
exit 5
fi
if [ "$zipfile" == "" ]; then
if [ "$mode" != "expert" ]; then
printf "Path of the zip file [defaults to agent-simulator.zip]: "
read zipfile
fi
if [ "$zipfile" == "" ]; then
zipfile="agent-simulator.zip"
fi
fi
if ! unzip -o $zipfile -d $deploydir
then
printf "ERROR: Unable to unzip $zipfile to $deploydir\n"
exit 6
fi
if ! chmod +x $deploydir/*.sh
then
printf "ERROR: Unable to change scripts to executable.\n"
exit 9
fi
if [ "$host" != "" ]; then
sed s/@HOST@/"$host"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: host is not set\n"
fi
if [ "$port" != "" ]; then
sed s/@PORT@/"$port"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Port is not set\n"
fi
if [ "$pod" != "" ]; then
sed s/@POD@/"$pod"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Pod is not set\n"
fi
if [ "$zone" != "" ]; then
sed s/@ZONE@/"$zone"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Zone is not set\n"
fi
if [ "$workers" != "" ]; then
sed s/@WORKERS@/"$workers"/ $confdir/agent.properties > $confdir/tmp
/bin/mv -f $confdir/tmp $confdir/agent.properties
else
printf "INFO: Workers is not set\n"
fi
printf "SUCCESS: Installation is now complete. If you like to make changes, edit $confdir/agent.properties\n"
exit 0

View File

@ -1,149 +0,0 @@
#!/usr/bin/env bash
#
# install-storage-server.sh: Installs a VMOps Storage Server
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
choose_correct_filename() {
local default_filename=$1
local user_specified_filename=$2
if [ -f "$user_specified_filename" ]
then
echo $user_specified_filename
return 0
else
if [ -f "$default_filename" ]
then
echo $default_filename
return 0
else
echo ""
return 1
fi
fi
}
install_opensolaris_package() {
pkg_name=$1
pkg info $pkg_name >> /dev/null
if [ $? -gt 0 ]
then
# The package is not installed, so install it
pkg install $pkg_name
return $?
else
# The package is already installed
return 0
fi
}
exit_if_error() {
return_code=$1
msg=$2
if [ $return_code -gt 0 ]
then
echo $msg
exit 1
fi
}
usage() {
printf "Usage: ./install-storage-server.sh <path to agent.zip> <path to templates.tar.gz>"
}
AGENT_FILE=$(choose_correct_filename "./agent.zip" $1)
exit_if_error $? "Please download agent.zip to your Storage Server."
TEMPLATES_FILE=$(choose_correct_filename "./templates.tar.gz" $2)
exit_if_error $? "Please download templates.tar.gz to your Storage Server."
VMOPS_DIR="/usr/local/vmops"
AGENT_DIR="/usr/local/vmops/agent"
CONF_DIR="/etc/vmops"
TEMPLATES_DIR="/root/template"
# Make all the necessary directories if they don't already exist
echo "Creating VMOps directories..."
for dir in $VMOPS_DIR $CONF_DIR $TEMPLATES_DIR
do
mkdir -p $dir
done
# Unzip agent.zip to $AGENT_DIR
echo "Uncompressing and installing VMOps Storage Agent..."
unzip -o $AGENT_FILE -d $AGENT_DIR >> /dev/null
# Remove agent/conf/agent.properties, since we should use the file in the real configuration directory
rm $AGENT_DIR/conf/agent.properties
# Backup any existing VMOps configuration files, if there aren't any backups already
if [ ! -d $CONF_DIR/BACKUP ]
then
echo "Backing up existing configuration files..."
mkdir -p $CONF_DIR/BACKUP
cp $CONF_DIR/*.properties $CONF_DIR/BACKUP >> /dev/null
fi
# Copy all the files in storagehdpatch to their proper places
echo "Installing system files..."
(cd $AGENT_DIR/storagehdpatch; tar cf - .) | (cd /; tar xf -)
exit_if_error $? "There was a problem with installing system files. Please contact VMOps Support."
# Make vsetup executable
chmod +x /usr/sbin/vsetup
# Make vmops executable
chmod +x /lib/svc/method/vmops
# Uncompress the templates and copy them to the templates directory
echo "Uncompressing templates..."
tar -xzf $TEMPLATES_FILE -C $TEMPLATES_DIR >> /dev/null
exit_if_error $? "There was a problem with uncompressing templates. Please contact VMOps Support."
# Install the storage-server package, if it is not already installed
echo "Installing OpenSolaris storage server package..."
install_opensolaris_package "storage-server"
exit_if_error $? "There was a problem with installing the storage server package. Please contact VMOps Support."
echo "Installing COMSTAR..."
install_opensolaris_package "SUNWiscsit"
exit_if_error $? "Unable to install COMSTAR iscsi target. Please contact VMOps Support."
# Install the SUNWinstall-test package, if it is not already installed
echo "Installing OpenSolaris test tools package..."
install_opensolaris_package "SUNWinstall-test"
exit_if_error $? "There was a problem with installing the test tools package. Please contact VMOps Support."
# Print a success message
printf "\nSuccessfully installed the VMOps Storage Server.\n"
printf "Please complete the following steps to configure your networking settings and storage pools:\n\n"
printf "1. Specify networking settings in /etc/vmops/network.properties\n"
printf "2. Run \"vsetup networking\" and then specify disk settings in /etc/vmops/disks.properties\n"
printf "3. Run \"vsetup zpool\" and reboot the machine when prompted.\n\n"

View File

@ -1,155 +0,0 @@
#!/bin/bash
#
# install.sh -- installs MySQL, Java, Tomcat, and the VMOps server
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#set -x
set -e
EX_NOHOSTNAME=15
EX_SELINUX=16
function usage() {
printf "Usage: %s [path to server-setup.xml]\n" $(basename $0) >&2
exit 64
}
function checkhostname() {
if hostname | grep -qF . ; then true ; else
echo "You need to have a fully-qualified host name for the setup to work." > /dev/stderr
echo "Please use your operating system's network setup tools to set one." > /dev/stderr
exit $EX_NOHOSTNAME
fi
}
function checkselinux() {
#### before checking arguments, make sure SELINUX is "permissible" in /etc/selinux/config
if /usr/sbin/getenforce | grep -qi enforcing ; then borked=1 ; fi
if grep -i SELINUX=enforcing /etc/selinux/config ; then borked=1 ; fi
if [ "$borked" == "1" ] ; then
echo "SELINUX is set to enforcing, please set it to permissive in /etc/selinux/config" > /dev/stderr
echo "then reboot the machine, after which you can run the install script again." > /dev/stderr
exit $EX_SELINUX
fi
}
checkhostname
checkselinux
if [ "$1" == "" ]; then
usage
fi
if [ ! -f $1 ]; then
echo "Error: Unable to find $1" > /dev/stderr
exit 2
fi
#### check that all files exist
if [ ! -f apache-tomcat-6.0.18.tar.gz ]; then
printf "Error: Unable to find apache-tomcat-6.0.18.tar.gz\n" > /dev/stderr
exit 3
fi
if [ ! -f MySQL-client-5.1.30-0.glibc23.x86_64.rpm ]; then
printf "Error: Unable to find MySQL-client-5.1.30-0.glibc23.x86_64.rpm\n" > /dev/stderr
exit 4
fi
if [ ! -f MySQL-server-5.1.30-0.glibc23.x86_64.rpm ]; then
printf "Error: Unable to find MySQL-server-5.1.30-0.glibc23.x86_64.rpm\n" > /dev/stderr
exit 5
fi
if [ ! -f jdk-6u13-linux-amd64.rpm.bin ]; then
printf "Error: Unable to find jdk-6u13-linux-amd64.rpm.bin\n" > /dev/stderr
exit 6
fi
#if [ ! -f osol.tar.bz2 ]; then
# printf "Error: Unable to find osol.tar.bz2\n"
# exit 7
#fi
if [ ! -f apache-tomcat-6.0.18.tar.gz ]; then
printf "Error: Unable to find apache-tomcat-6.0.18.tar.gz\n" > /dev/stderr
exit 8
fi
if [ ! -f vmops-*.zip ]; then
printf "Error: Unable to find vmops install file\n" > /dev/stderr
exit 9
fi
if [ ! -f catalina ] ; then
printf "Error: Unable to find catalina initscript\n" > /dev/stderr
exit 10
fi
if [ ! -f usageserver ] ; then
printf "Error: Unable to find usageserver initscript\n" > /dev/stderr
exit 11
fi
###### install Apache
# if [ ! -d /usr/local/tomcat ] ; then
echo "installing Apache..."
mkdir -p /usr/local/tomcat
tar xfz apache-tomcat-6.0.18.tar.gz -C /usr/local/tomcat
ln -s /usr/local/tomcat/apache-tomcat-6.0.18 /usr/local/tomcat/current
# fi
# if [ ! -f /etc/profile.d/catalinahome.sh ] ; then
# echo "export CATALINA_HOME=/usr/local/tomcat/current" >> /etc/profile.d/catalinahome.sh
# fi
source /etc/profile.d/catalinahome.sh
# if [ ! -f /etc/init.d/catalina ] ; then
cp -f catalina /etc/init.d
/sbin/chkconfig catalina on
# fi
####### set up usage server as a service
if [ ! -f /etc/init.d/usageserver ] ; then
cp -f usageserver /etc/init.d
/sbin/chkconfig usageserver on
fi
##### set up mysql
if rpm -q MySQL-server MySQL-client > /dev/null 2>&1 ; then true ; else
echo "installing MySQL..."
yum localinstall --nogpgcheck -y MySQL-*.rpm
fi
#### install JDK
echo "installing JDK..."
sh jdk-6u13-linux-amd64.rpm.bin
rm -rf /usr/bin/java
ln -s /usr/java/default/bin/java /usr/bin/java
#### setting up OSOL image
#mkdir -p $CATALINA_HOME/webapps/images
#echo "copying Open Solaris image, this may take a few moments..."
#cp osol.tar.bz2 $CATALINA_HOME/webapps/images
#### deploying database
unzip -o vmops-*.zip
cd vmops-*
sh deploy-server.sh -d "$CATALINA_HOME"
cd db
sh deploy-db.sh "../../$1" templates.sql
exit 0

View File

@ -1,23 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
consoleproxy.tcpListenPort=0
consoleproxy.httpListenPort=80
consoleproxy.httpCmdListenPort=8001
consoleproxy.jarDir=./applet/
consoleproxy.viewerLinger=180
consoleproxy.reconnectMaxRetry=5

View File

@ -1,550 +0,0 @@
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<data>
<version>2.0</version>
<zones>
<zone>
<id>1</id>
<name>AH</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>100-199</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>2</id>
<name>KM</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>200-299</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>3</id>
<name>KY</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>300-399</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>4</id>
<name>WC</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>400-499</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>5</id>
<name>CV</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>500-599</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>6</id>
<name>KS</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>600-699</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>7</id>
<name>ES</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>700-799</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>8</id>
<name>RC</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>800-899</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>9</id>
<name>AX</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>900-999</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>10</id>
<name>JW</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>900-999</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
<zone>
<id>11</id>
<name>AJ</name>
<dns1>72.52.126.11</dns1>
<dns2>72.52.126.12</dns2>
<internalDns1>192.168.10.253</internalDns1>
<internalDns2>192.168.10.254</internalDns2>
<vnet>1000-1099</vnet>
<guestNetworkCidr>10.1.1.0/24</guestNetworkCidr>
</zone>
</zones>
<!--
<storagePools>
<storagePool>
<zoneId>5</zoneId>
<name>sol10-2</name>
<hostAddress>sol10-2</hostAddress>
<hostPath>/tank/cloud-nfs/</hostPath>
</storagePool>
</storagePools>
-->
<vlans>
<vlan>
<zoneId>1</zoneId>
<vlanId>31</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.31.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.31.150-192.168.31.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>2</zoneId>
<vlanId>32</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.32.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.32.150-192.168.32.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>3</zoneId>
<vlanId>33</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.33.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.33.150-192.168.33.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>4</zoneId>
<vlanId>34</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.34.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.34.150-192.168.34.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>5</zoneId>
<vlanId>35</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.35.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.35.150-192.168.35.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>6</zoneId>
<vlanId>36</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.36.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.36.150-192.168.36.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>7</zoneId>
<vlanId>37</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.37.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.37.150-192.168.37.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>8</zoneId>
<vlanId>38</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.38.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.38.150-192.168.38.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>9</zoneId>
<vlanId>39</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.39.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.39.150-192.168.39.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>10</zoneId>
<vlanId>40</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.40.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.40.150-192.168.40.159</ipAddressRange>
</vlan>
<vlan>
<zoneId>11</zoneId>
<vlanId>41</vlanId>
<vlanType>VirtualNetwork</vlanType>
<gateway>192.168.41.1</gateway>
<netmask>255.255.255.0</netmask>
<ipAddressRange>192.168.41.150-192.168.41.159</ipAddressRange>
</vlan>
</vlans>
<pods>
<pod>
<id>1</id>
<name>AH</name>
<zoneId>1</zoneId>
<ipAddressRange>192.168.10.20-192.168.10.24</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>2</id>
<name>KM</name>
<zoneId>2</zoneId>
<ipAddressRange>192.168.10.25-192.168.10.29</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>3</id>
<name>KY</name>
<zoneId>3</zoneId>
<ipAddressRange>192.168.10.30-192.168.10.34</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>4</id>
<name>WC</name>
<zoneId>4</zoneId>
<ipAddressRange>192.168.10.35-192.168.10.39</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>5</id>
<name>CV</name>
<zoneId>5</zoneId>
<ipAddressRange>192.168.10.40-192.168.10.44</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>6</id>
<name>KS</name>
<zoneId>6</zoneId>
<ipAddressRange>192.168.10.45-192.168.10.49</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>7</id>
<name>ES</name>
<zoneId>7</zoneId>
<ipAddressRange>192.168.10.50-192.168.10.54</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>8</id>
<name>RC</name>
<zoneId>8</zoneId>
<ipAddressRange>192.168.10.55-192.168.10.59</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>9</id>
<name>AX</name>
<zoneId>9</zoneId>
<ipAddressRange>192.168.10.62-192.168.10.64</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>10</id>
<name>JW</name>
<zoneId>10</zoneId>
<ipAddressRange>192.168.10.65-192.168.10.69</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
<pod>
<id>11</id>
<name>AJ</name>
<zoneId>11</zoneId>
<ipAddressRange>192.168.10.70-192.168.10.74</ipAddressRange>
<cidr>192.168.10.0/24</cidr>
</pod>
</pods>
<!--
* cpu is the number of CPUs for the offering
* ramSize is total memory in MB
* speed is the CPU speed for each core in MHZ
* diskSpace is the storage space in MB
* price is the price of the offering per hour
-->
<serviceOfferings>
<serviceOffering>
<id>1</id>
<name>Small Instance</name>
<displayText>Small Instance [500MHZ CPU, 512MB MEM, 16GB Disk] - $0.10 per hour</displayText>
<cpu>1</cpu>
<ramSize>512</ramSize>
<speed>500</speed>
<mirrored>false</mirrored>
</serviceOffering>
<serviceOffering>
<id>2</id>
<name>Medium Instance</name>
<displayText>Medium Instance [500MHZ CPU, 1GB MEM, 32GB Disk] - $0.20 per hour</displayText>
<cpu>1</cpu>
<ramSize>1024</ramSize>
<speed>512</speed>
</serviceOffering>
<serviceOffering>
<id>3</id>
<name>Large Instance</name>
<displayText>Large Instance [2GHZ CPU, 4GB MEM, 64GB Disk] - $0.30 per hour</displayText>
<cpu>2</cpu>
<ramSize>4096</ramSize>
<speed>2000</speed>
</serviceOffering>
</serviceOfferings>
<diskOfferings>
<diskOffering>
<id>1</id>
<domainId>1</domainId>
<name>Small Disk</name>
<displayText>Small Disk [16GB Disk]</displayText>
<diskSpace>16000</diskSpace>
</diskOffering>
<diskOffering>
<id>2</id>
<domainId>1</domainId>
<name>Medium Disk</name>
<displayText>Medium Disk [32GB Disk]</displayText>
<diskSpace>32000</diskSpace>
</diskOffering>
<diskOffering>
<id>3</id>
<domainId>1</domainId>
<name>Large Disk</name>
<displayText>Large Disk [64GB Disk]</displayText>
<diskSpace>64000</diskSpace>
</diskOffering>
</diskOfferings>
<!--
* firstname/lastname are optional parameters
* id, username, password are required parameters
-->
<users>
<user>
<id>2</id>
<username>admin</username>
<password>password</password>
<firstname>Admin</firstname>
<lastname>User</lastname>
<email>admin@mailprovider.com</email>
</user>
</users>
<configurationEntries>
<configuration>
<name>default.zone</name>
<value>AH</value>
</configuration>
<configuration>
<name>domain.suffix</name>
<value>cloud-test.cloud.com</value>
</configuration>
<configuration>
<name>instance.name</name>
<value>AH</value>
</configuration>
<configuration>
<name>consoleproxy.ram.size</name>
<value>256</value>
</configuration>
<configuration>
<name>host.stats.interval</name>
<value>3600000</value>
</configuration>
<configuration>
<name>storage.stats.interval</name>
<value>120000</value>
</configuration>
<configuration>
<name>volume.stats.interval</name>
<value>-1</value>
</configuration>
<configuration>
<name>ping.interval</name>
<value>60</value>
</configuration>
<configuration>
<name>alert.wait</name>
<value>1800</value>
</configuration>
<configuration>
<name>expunge.interval</name>
<value>86400</value>
</configuration>
<configuration>
<name>usage.aggregation.timezone</name>
<value>GMT</value>
</configuration>
<!-- RSA Keys -->
<configuration>
<name>ssh.privatekey</name>
<value>-----BEGIN RSA PRIVATE KEY-----\nMIIEoQIBAAKCAQEAnNUMVgQS87EzAQN9ufGgH3T1kOpqcvTmUrp8RVZyeA5qwptS\nrZxONRbhLK709pZFBJLmeFqiqciWoA/srVIFk+rPmBlVsMw8BK53hTGoax7iSe8s\nLFCAATm6vp0HnZzYqNfrzR2by36ET5aQD/VAyA55u+uUgAlxQuhKff2xjyahEHs+\nUiRlReiAgItygm9g3co3+8fJDOuRse+s0TOip1D0jPdo2AJFscyxrG9hWqQH86R/\nZlLJ7DqsiaAcUmn52u6Nsmd3BkRmGVx/D35Mq6upJqrk/QDfug9LF66yiIP/BEIn\n08N/wQ6m/O37WUtqqyl3rRKqs5TJ9ZnhsqeO9QIBIwKCAQA6QIDsv69EkkYk8qsK\njPJU06uq2rnS7T+bEhDmjdK+4MiRbOQx2vh6HnDktgM3BJ1K13oss/NGYHJ190lH\nsMA+QUXKx5TbRItSMixkrAta/Ne1D7FSScklBtBVbYZ8XtQhdMVML5GjWuCv2NZs\nU8eaw4xNHPyklcr7mBurI7b6p13VK5BNUWR/VNuigT4U89YzRcoEZ/sTlR+4ACYr\nxbUJJGBA03+NhdSAe2vodlMh5lGflD0JmHMFqqg9BcAtVb73JsOsxFQArbXwRd/q\nNckdoAvgJfhTOvXF5GMPLI0lGb6skJkS229F4GaBB2Iz4A9O0aHZob8I8zsWUbiu\npvBrAoGBAMjUDfF2x13NjH1cFHietO5O1oM0nZaAxKodxoAUvHVMUd5DIY50tqYw\n7ecKi2Cw43ONpdj0nP9Nc2NV3NDRqLopwkKUsTtq9AKQ2cIuw3+uS5vm0VZBzmTP\nuF04Qo4bXh/jFRA62u9bXsmIFtaehKxE1Gp6zi393GcbWP4HX/3dAoGBAMfq0KD3\ngeU1PHi9uI3Ss89nXzJsiGcwC5Iunu1aTzJCYhMlJkfmRcXYMAqSfg0nGWnfvlDh\nuOO26CHKjG182mTwYXdgQzIPpBc8suvgUWDBTrIzJI+zuyBLtPbd9DJEVrZkRVQX\nXrOV3Y5oOWsba4F+b20jaaHFAiY7s6OtrX/5AoGBAMMXI3zZyPwJgSlSIoPNX03m\nL3gke9QID4CvNduB26UlkVuRq5GzNRZ4rJdMEl3tqcC1fImdKswfWiX7o06ChqY3\nMb0FePfkPX7V2tnkSOJuzRsavLoxTCdqsxi6T0g318c0XZq81K4A/P5Jr8ksRl40\nPA+qfyVdAf3Cy3ptkHLzAoGASkFGLSi7N+CSzcLPhSJgCzUGGgsOF7LCeB/x4yGL\nIUvbSPCKj7vuB6gR2AqGlyvHnFprQpz7h8eYDI0PlmGS8kqn2+HtEpgYYGcAoMEI\nSIJQbhL+84vmaxTOL87IanEnhZL1LdzLZ0ZK+mE55fQ936P9gE77WVfNmSweJtob\n3xMCgYAl0aLeGf4oUZbI56eEaCbu8U7dEe6MF54VbozyiXqbp455QnUpuBrRn5uf\nc079dNcqTNDuk1+hYX9qNn1aXsvWeuofBXqWoFXu/c4yoWxJAPhEVhzZ9xrXI76I\nBKiPCyKrOa7bSLvs6SQPpuf5AQ8+NJrOxkEB9hbMuaAr2N5rCw==\n-----END RSA PRIVATE KEY-----
</value>
<category>Hidden</category>
</configuration>
<configuration>
<name>ssh.publickey</name>
<value>
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAnNUMVgQS87EzAQN9ufGgH3T1kOpqcvTmUrp8RVZyeA5qwptSrZxONRbhLK709pZFBJLmeFqiqciWoA/srVIFk+rPmBlVsMw8BK53hTGoax7iSe8sLFCAATm6vp0HnZzYqNfrzR2by36ET5aQD/VAyA55u+uUgAlxQuhKff2xjyahEHs+UiRlReiAgItygm9g3co3+8fJDOuRse+s0TOip1D0jPdo2AJFscyxrG9hWqQH86R/ZlLJ7DqsiaAcUmn52u6Nsmd3BkRmGVx/D35Mq6upJqrk/QDfug9LF66yiIP/BEIn08N/wQ6m/O37WUtqqyl3rRKqs5TJ9ZnhsqeO9Q== root@test2.lab.vmops.com
</value>
<category>Hidden</category>
</configuration>
<!-- the following are for configuring alerts and need to be changed to proper configuration values -->
<!--
<configuration>
<name>alert.smtp.host</name>
<value>smtp.host.com</value>
</configuration>
<configuration>
<name>alert.smtp.port</name>
<value>25</value>
</configuration>
<configuration>
<name>alert.smtp.useAuth</name>
<value>false</value>
</configuration>
<configuration>
<name>alert.smtp.username</name>
<value>some.user@example.com</value>
</configuration>
<configuration>
<name>alert.smtp.password</name>
<value>password</value>
</configuration>
<configuration>
<name>alert.email.sender</name>
<value>some.user@example.com</value>
</configuration>
<configuration>
<name>alert.email.addresses</name>
<value>some.admin@example.com</value>
</configuration>
<configuration>
<name>alert.smtp.debug</name>
<value>false</value>
</configuration>
-->
<configuration>
<name>memory.capacity.threshold</name>
<value>0.85</value>
</configuration>
<configuration>
<name>cpu.capacity.threshold</name>
<value>0.85</value>
</configuration>
<configuration>
<name>storage.capacity.threshold</name>
<value>0.85</value>
</configuration>
<configuration>
<name>storage.allocated.capacity.threshold</name>
<value>0.85</value>
</configuration>
<configuration>
<name>capacity.check.period</name>
<value>3600000</value>
</configuration>
<configuration>
<name>wait</name>
<value>240</value>
</configuration>
<configuration>
<name>network.throttling.rate</name>
<value>200</value>
</configuration>
<configuration>
<name>multicast.throttling.rate</name>
<value>10</value>
</configuration>
</configurationEntries>
<!--
It is possible to specify a single IP address. For example, to add 192.168.1.1
as the only address, specify as follows.
<publicIpAddresses>
<zoneId>1</zoneId>
<ipAddressRange>192.168.1.1</ipAddressRange>
</publicIpAddresses>
For each ip address range, create a new object. For example, to add the range 192.168.2.1 to 192.168.2.255
copy the following object tag into the privateIpRange
<privateIpAddresses>
<zoneId>1</zoneId>
<podId>1</podId>
<ipAddressRange>192.168.2.1-192.168.2.255</ipAddressRange>
</privateIpAddresses>
-->
<!--
It is possible to specify a single IP address. For example, to add 65.37.141.29
as the only address, specify as follows.
<publicIpAddresses>
<zoneId>1</zoneId>
<ipAddressRange>65.37.141.29</ipAddressRange>
</publicIpAddresses>
For each ip address range, create a new object. For example, to add the range 65.37.141.29 to 65.37.141.39
copy the following object tag into the publicIpRange
<publicIpAddresses>
<zoneId>1</zoneId>
<ipAddressRange>65.37.141.29-65.37.141.39</ipAddressRange>
</publicIpAddresses>
-->
</data>

View File

@ -1,30 +0,0 @@
-- Licensed to the Apache Software Foundation (ASF) under one
-- or more contributor license agreements. See the NOTICE file
-- distributed with this work for additional information
-- regarding copyright ownership. The ASF licenses this file
-- to you under the Apache License, Version 2.0 (the
-- "License"); you may not use this file except in compliance
-- with the License. You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing,
-- software distributed under the License is distributed on an
-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-- KIND, either express or implied. See the License for the
-- specific language governing permissions and limitations
-- under the License.
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (1, 'routing', 'DomR Template', 0, 'tank/volumes/demo/template/private/u000000/os/routing', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/routing/vmi-root-fc8-x86_64-domR.img.bz2', 'd00927f863a23b98cc6df6e377c9d0c6', 0, 'DomR Template', 0);
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (3, 'centos53-x86_64', 'Centos 5.3(x86_64) no GUI', 1, 'tank/volumes/demo/template/public/os/centos53-x86_64', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/centos52-x86_64/vmi-root-centos.5-2.64.pv.img.gz', 'd4ca80825d936db00eedf26620f13d69', 0, 'Centos 5.3(x86_64) no GUI', 0);
#INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
# VALUES (4, 'centos52-x86_64-gui', 'Centos 5.2(x86_64) GUI', 1, 'tank/volumes/demo/template/public/os/centos52-x86_64-gui', now(), 'ext3', 0, 64, 1, 'http://vmopsserver.lab.vmops.com/images/centos52-x86_64/vmi-root-centos.5-2.64.pv.img.gz', 'd4ca80825d936db00eedf26620f13d69', 0, 'Centos 5.2(x86_64) GUI', 0);
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (5, 'winxpsp3', 'Windows XP SP3 (32-bit)', 1, 'tank/volumes/demo/template/public/os/winxpsp3', now(), 'ntfs', 1, 32, 1, 'http://vmopsserver.lab.vmops.com/images/fedora10-x86_64/vmi-root-fedora10.64.img.gz', 'c76d42703f14108b15acc9983307c759', 0, 'Windows XP SP3 (32-bit)', 0);
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (7, 'win2003sp2', 'Windows 2003 SP2 (32-bit)', 1, 'tank/volumes/demo/template/public/os/win2003sp2', now(), 'ntfs', 1, 32, 1, 'http://vmopsserver.lab.vmops.com/images/win2003sp2/vmi-root-win2003sp2.img.gz', '4d2cc51898d05c0f7a2852c15bcdc77b', 0, 'Windows 2003 SP2 (32-bit)', 0);
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (8, 'win2003sp2-x64', 'Windows 2003 SP2 (64-bit)', 1, 'tank/volumes/demo/template/public/os/win2003sp2-x64', now(), 'ntfs', 1, 64, 1, 'http://vmopsserver.lab.vmops.com/images/win2003sp2-x86_64/vmi-root-win2003sp2-x64.img.gz', '35d4de1c38eb4fb9d81a31c1d989c482', 0, 'Windows 2003 SP2 (64-bit)', 0);
INSERT INTO `vmops`.`vm_template` (id, unique_name, name, public, path, created, type, hvm, bits, created_by, url, checksum, ready, display_text, enable_password)
VALUES (9, 'fedora12-GUI-x86_64', 'Fedora 12 Desktop(64-bit)', 1, 'tank/volumes/demo/template/public/os/fedora12-GUI-x86_64', now(), 'ext3', 1, 64, 1, 'http://vmopsserver.lab.vmops.com/images/fedora12-GUI-x86_64/vmi-root-fedora12-GUI-x86_64.qcow2.gz', '', 0, 'Fedora 12 Desktop (with httpd,java and mysql)', 0);

View File

@ -1,85 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
</layout>
</appender>
<!-- ================================ -->
<!-- Append messages to the usage log -->
<!-- ================================ -->
<!-- A time/date based rolling appender -->
<appender name="USAGE" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="/var/log/cloud/cloud_usage.log.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="/var/log/cloud/cloud_usage.log"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="DEBUG"/>
</category>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="USAGE"/>
</root>
</log4j:configuration>

View File

@ -1,85 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
</layout>
</appender>
<!-- ================================ -->
<!-- Append messages to the usage log -->
<!-- ================================ -->
<!-- A time/date based rolling appender -->
<appender name="USAGE" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="@logdir@/cloud_usage.log.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="@logdir@/cloud_usage.log"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="DEBUG"/>
</category>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="USAGE"/>
</root>
</log4j:configuration>

View File

@ -1,67 +0,0 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
usage-components.xml is the configuration file for the VM Ops
usage servers.
Here are some places to look for information.
- To find out the general functionality that each Manager
or Adapter provide, look at the javadoc for the interface
that it implements. The interface is usually the
"key" attribute in the declaration.
- To find specific implementation of each Manager or
Adapter, look at the javadoc for the actual class. The
class can be found in the <class> element.
- To find out the configuration parameters for each Manager
or Adapter, look at the javadoc for the actual implementation
class. It should be documented in the description of the
class.
- To know more about the components.xml in general, look for
the javadoc for ComponentLocator.java.
If you found that the Manager or Adapter are not properly
documented, please contact the author.
-->
<components.xml>
<usage-server>
<dao name="VM Instance" class="com.cloud.vm.dao.VMInstanceDaoImpl"/>
<dao name="User VM" class="com.cloud.vm.dao.UserVmDaoImpl"/>
<dao name="ServiceOffering" class="com.cloud.service.dao.ServiceOfferingDaoImpl">
<param name="cache.size">50</param>
<param name="cache.time.to.live">-1</param>
</dao>
<dao name="UserStats" class="com.cloud.user.dao.UserStatisticsDaoImpl"/>
<dao name="IP Addresses" class="com.cloud.network.dao.IPAddressDaoImpl"/>
<dao name="Usage" class="com.cloud.usage.dao.UsageDaoImpl"/>
<dao name="Domain" class="com.cloud.domain.dao.DomainDaoImpl"/>
<dao name="Account" class="com.cloud.user.dao.AccountDaoImpl"/>
<dao name="UserAccount" class="com.cloud.user.dao.UserAccountDaoImpl"/>
<dao name="Usage VmInstance" class="com.cloud.usage.dao.UsageVMInstanceDaoImpl"/>
<dao name="Usage Network" class="com.cloud.usage.dao.UsageNetworkDaoImpl"/>
<dao name="Usage IPAddress" class="com.cloud.usage.dao.UsageIPAddressDaoImpl"/>
<dao name="Usage Job" class="com.cloud.usage.dao.UsageJobDaoImpl"/>
<dao name="Configuration" class="com.cloud.configuration.dao.ConfigurationDaoImpl"/>
<dao name="Usage Event" class="com.cloud.event.dao.UsageEventDaoImpl"/>
<manager name="Usage alert manager" class="com.cloud.usage.UsageAlertManagerImpl"/>
<manager name="usage manager" class="com.cloud.usage.UsageManagerImpl">
<param name="period">DAILY</param> <!-- DAILY, WEEKLY, MONTHLY; how often it creates usage records -->
</manager>
</usage-server>
</components.xml>

View File

@ -1,18 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
agent.minimal.version=@agent.min.version@

View File

@ -1,544 +0,0 @@
<ehcache xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="ehcache.xsd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
CacheManager Configuration
==========================
An ehcache.xml corresponds to a single CacheManager.
See instructions below or the ehcache schema (ehcache.xsd) on how to configure.
System property tokens can be specified in this file which are replaced when the configuration is loaded.
For example multicastGroupPort=${multicastGroupPort} can be replaced with the System property either
from an environment variable or a system property specified with a command line switch such as
-DmulticastGroupPort=4446.
DiskStore configuration
=======================
The diskStore element is optional. To turn off disk store path creation, comment out the diskStore
element below.
Configure it if you have overflowToDisk or diskPersistent enabled for any cache.
If it is not configured, and a cache is created which requires a disk store, a warning will be
issued and java.io.tmpdir will automatically be used.
diskStore has only one attribute - "path". It is the path to the directory where
.data and .index files will be created.
If the path is one of the following Java System Property it is replaced by its value in the
running VM. For backward compatibility these are not specified without being enclosed in the ${token}
replacement syntax.
The following properties are translated:
* user.home - User's home directory
* user.dir - User's current working directory
* java.io.tmpdir - Default temp file path
* ehcache.disk.store.dir - A system property you would normally specify on the command line
e.g. java -Dehcache.disk.store.dir=/u01/myapp/diskdir ...
Subdirectories can be specified below the property e.g. java.io.tmpdir/one
-->
<!-- diskStore path="java.io.tmpdir"/ -->
<!--
CacheManagerEventListener
=========================
Specifies a CacheManagerEventListenerFactory, be used to create a CacheManagerPeerProvider,
which is notified when Caches are added or removed from the CacheManager.
The attributes of CacheManagerEventListenerFactory are:
* class - a fully qualified factory class name
* properties - comma separated properties having meaning only to the factory.
Sets the fully qualified class name to be registered as the CacheManager event listener.
The events include:
* adding a Cache
* removing a Cache
Callbacks to listener methods are synchronous and unsynchronized. It is the responsibility
of the implementer to safely handle the potential performance and thread safety issues
depending on what their listener is doing.
If no class is specified, no listener is created. There is no default.
-->
<cacheManagerEventListenerFactory class="" properties=""/>
<!--
CacheManagerPeerProvider
========================
(Enable for distributed operation)
Specifies a CacheManagerPeerProviderFactory which will be used to create a
CacheManagerPeerProvider, which discovers other CacheManagers in the cluster.
The attributes of cacheManagerPeerProviderFactory are:
* class - a fully qualified factory class name
* properties - comma separated properties having meaning only to the factory.
Ehcache comes with a built-in RMI-based distribution system with two means of discovery of
CacheManager peers participating in the cluster:
* automatic, using a multicast group. This one automatically discovers peers and detects
changes such as peers entering and leaving the group
* manual, using manual rmiURL configuration. A hardcoded list of peers is provided at
configuration time.
Configuring Automatic Discovery:
Automatic discovery is configured as per the following example:
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic, multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=32"/>
Valid properties are:
* peerDiscovery (mandatory) - specify "automatic"
* multicastGroupAddress (mandatory) - specify a valid multicast group address
* multicastGroupPort (mandatory) - specify a dedicated port for the multicast heartbeat
traffic
* timeToLive - specify a value between 0 and 255 which determines how far the packets will
propagate.
By convention, the restrictions are:
0 - the same host
1 - the same subnet
32 - the same site
64 - the same region
128 - the same continent
255 - unrestricted
Configuring Manual Discovery:
Manual discovery is configured as per the following example:
<cacheManagerPeerProviderFactory class=
"net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,
rmiUrls=//server1:40000/sampleCache1|//server2:40000/sampleCache1
| //server1:40000/sampleCache2|//server2:40000/sampleCache2"
propertySeparator="," />
Valid properties are:
* peerDiscovery (mandatory) - specify "manual"
* rmiUrls (mandatory) - specify a pipe separated list of rmiUrls, in the form
//hostname:port
The hostname is the hostname of the remote CacheManager peer. The port is the listening
port of the RMICacheManagerPeerListener of the remote CacheManager peer.
Configuring JGroups replication:
<cacheManagerPeerProviderFactory class="net.sf.ehcache.distribution.jgroups.JGroupsCacheManagerPeerProviderFactory"
properties="connect=UDP(mcast_addr=231.12.21.132;mcast_port=45566;ip_ttl=32;
mcast_send_buf_size=150000;mcast_recv_buf_size=80000):
PING(timeout=2000;num_initial_members=6):
MERGE2(min_interval=5000;max_interval=10000):
FD_SOCK:VERIFY_SUSPECT(timeout=1500):
pbcast.NAKACK(gc_lag=10;retransmit_timeout=3000):
UNICAST(timeout=5000):
pbcast.STABLE(desired_avg_gossip=20000):
FRAG:
pbcast.GMS(join_timeout=5000;join_retry_timeout=2000;shun=false;print_local_addr=false)"
propertySeparator="::"
/>
The only property necessay is the connect String used by jgroups to configure itself. Refer to the Jgroups documentation for explanation
of all the protocols. The example above uses UDP multicast. If the connect property is not specified the default JGroups connection will be
used.
-->
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic,
multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=1"
propertySeparator=","
/>
<!--
CacheManagerPeerListener
========================
(Enable for distributed operation)
Specifies a CacheManagerPeerListenerFactory which will be used to create a
CacheManagerPeerListener, which
listens for messages from cache replicators participating in the cluster.
The attributes of cacheManagerPeerListenerFactory are:
class - a fully qualified factory class name
properties - comma separated properties having meaning only to the factory.
Ehcache comes with a built-in RMI-based distribution system. The listener component is
RMICacheManagerPeerListener which is configured using
RMICacheManagerPeerListenerFactory. It is configured as per the following example:
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=fully_qualified_hostname_or_ip,
port=40001,
socketTimeoutMillis=120000"
propertySeparator="," />
All properties are optional. They are:
* hostName - the hostName of the host the listener is running on. Specify
where the host is multihomed and you want to control the interface over which cluster
messages are received. Defaults to the host name of the default interface if not
specified.
* port - the port the RMI Registry listener listens on. This defaults to a free port if not specified.
* remoteObjectPort - the port number on which the remote objects bound in the registry receive calls.
This defaults to a free port if not specified.
* socketTimeoutMillis - the number of ms client sockets will stay open when sending
messages to the listener. This should be long enough for the slowest message.
If not specified it defaults 120000ms.
-->
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"/>
<!--
Cache configuration
===================
The following attributes are required.
name:
Sets the name of the cache. This is used to identify the cache. It must be unique.
maxElementsInMemory:
Sets the maximum number of objects that will be created in memory
maxElementsOnDisk:
Sets the maximum number of objects that will be maintained in the DiskStore
The default value is zero, meaning unlimited.
eternal:
Sets whether elements are eternal. If eternal, timeouts are ignored and the
element is never expired.
overflowToDisk:
Sets whether elements can overflow to disk when the memory store
has reached the maxInMemory limit.
The following attributes and elements are optional.
timeToIdleSeconds:
Sets the time to idle for an element before it expires.
i.e. The maximum amount of time between accesses before an element expires
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that an Element can idle for infinity.
The default value is 0.
timeToLiveSeconds:
Sets the time to live for an element before it expires.
i.e. The maximum time between creation time and when an element expires.
Is only used if the element is not eternal.
Optional attribute. A value of 0 means that and Element can live for infinity.
The default value is 0.
diskPersistent:
Whether the disk store persists between restarts of the Virtual Machine.
The default value is false.
diskExpiryThreadIntervalSeconds:
The number of seconds between runs of the disk expiry thread. The default value
is 120 seconds.
diskSpoolBufferSizeMB:
This is the size to allocate the DiskStore for a spool buffer. Writes are made
to this area and then asynchronously written to disk. The default size is 30MB.
Each spool buffer is used only by its cache. If you get OutOfMemory errors consider
lowering this value. To improve DiskStore performance consider increasing it. Trace level
logging in the DiskStore will show if put back ups are occurring.
memoryStoreEvictionPolicy:
Policy would be enforced upon reaching the maxElementsInMemory limit. Default
policy is Least Recently Used (specified as LRU). Other policies available -
First In First Out (specified as FIFO) and Less Frequently Used
(specified as LFU)
Cache elements can also contain sub elements which take the same format of a factory class
and properties. Defined sub-elements are:
* cacheEventListenerFactory - Enables registration of listeners for cache events, such as
put, remove, update, and expire.
* bootstrapCacheLoaderFactory - Specifies a BootstrapCacheLoader, which is called by a
cache on initialisation to prepopulate itself.
* cacheExtensionFactory - Specifies a CacheExtension, a generic mechansim to tie a class
which holds a reference to a cache to the cache lifecycle.
* cacheExceptionHandlerFactory - Specifies a CacheExceptionHandler, which is called when
cache exceptions occur.
* cacheLoaderFactory - Specifies a CacheLoader, which can be used both asynchronously and
synchronously to load objects into a cache.
RMI Cache Replication
Each cache that will be distributed needs to set a cache event listener which replicates
messages to the other CacheManager peers. For the built-in RMI implementation this is done
by adding a cacheEventListenerFactory element of type RMICacheReplicatorFactory to each
distributed cache's configuration as per the following example:
<cacheEventListenerFactory class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true,
replicatePuts=true,
replicateUpdates=true,
replicateUpdatesViaCopy=true,
replicateRemovals=true
asynchronousReplicationIntervalMillis=<number of milliseconds"
propertySeparator="," />
The RMICacheReplicatorFactory recognises the following properties:
* replicatePuts=true|false - whether new elements placed in a cache are
replicated to others. Defaults to true.
* replicateUpdates=true|false - whether new elements which override an
element already existing with the same key are replicated. Defaults to true.
* replicateRemovals=true - whether element removals are replicated. Defaults to true.
* replicateAsynchronously=true | false - whether replications are
asynchronous (true) or synchronous (false). Defaults to true.
* replicateUpdatesViaCopy=true | false - whether the new elements are
copied to other caches (true), or whether a remove message is sent. Defaults to true.
* asynchronousReplicationIntervalMillis=<number of milliseconds> - The asynchronous
replicator runs at a set interval of milliseconds. The default is 1000. The minimum
is 10. This property is only applicable if replicateAsynchronously=true
For the Jgroups replication this is done with:
<cacheEventListenerFactory class="net.sf.ehcache.distribution.jgroups.JGroupsCacheReplicatorFactory"
properties="replicateAsynchronously=true, replicatePuts=true,
replicateUpdates=true, replicateUpdatesViaCopy=false,
replicateRemovals=true,asynchronousReplicationIntervalMillis=1000"/>
This listener supports the same property than the RMICacheReplicationFactory.
Cluster Bootstrapping
The RMIBootstrapCacheLoader bootstraps caches in clusters where RMICacheReplicators are
used. It is configured as per the following example:
<bootstrapCacheLoaderFactory
class="net.sf.ehcache.distribution.RMIBootstrapCacheLoaderFactory"
properties="bootstrapAsynchronously=true, maximumChunkSizeBytes=5000000"
propertySeparator="," />
The RMIBootstrapCacheLoaderFactory recognises the following optional properties:
* bootstrapAsynchronously=true|false - whether the bootstrap happens in the background
after the cache has started. If false, bootstrapping must complete before the cache is
made available. The default value is true.
* maximumChunkSizeBytes=<integer> - Caches can potentially be very large, larger than the
memory limits of the VM. This property allows the bootstraper to fetched elements in
chunks. The default chunk size is 5000000 (5MB).
Cache Exception Handling
By default, most cache operations will propagate a runtime CacheException on failure. An
interceptor, using a dynamic proxy, may be configured so that a CacheExceptionHandler can
be configured to intercept Exceptions. Errors are not intercepted.
It is configured as per the following example:
<cacheExceptionHandlerFactory class="com.example.ExampleExceptionHandlerFactory"
properties="logLevel=FINE"/>
Caches with ExceptionHandling configured are not of type Cache, but are of type Ehcache only,
and are not available using CacheManager.getCache(), but using CacheManager.getEhcache().
Cache Loader
A default CacheLoader may be set which loads objects into the cache through asynchronous and
synchronous methods on Cache. This is different to the bootstrap cache loader, which is used
only in distributed caching.
It is configured as per the following example:
<cacheLoaderFactory class="com.example.ExampleCacheLoaderFactory"
properties="type=int,startCounter=10"/>
Cache Extension
CacheExtensions are a general purpose mechanism to allow generic extensions to a Cache.
CacheExtensions are tied into the Cache lifecycle.
CacheExtensions are created using the CacheExtensionFactory which has a
<code>createCacheCacheExtension()</code> method which takes as a parameter a
Cache and properties. It can thus call back into any public method on Cache, including, of
course, the load methods.
Extensions are added as per the following example:
<cacheExtensionFactory class="com.example.FileWatchingCacheRefresherExtensionFactory"
properties="refreshIntervalMillis=18000, loaderTimeout=3000,
flushPeriod=whatever, someOtherProperty=someValue ..."/>
-->
<!--
Mandatory Default Cache configuration. These settings will be applied to caches
created programmtically using CacheManager.add(String cacheName).
The defaultCache has an implicit name "default" which is a reserved cache name.
-->
<defaultCache
maxElementsInMemory="10000"
eternal="false"
timeToIdleSeconds="120"
timeToLiveSeconds="120"
overflowToDisk="false"
diskSpoolBufferSizeMB="30"
maxElementsOnDisk="10000000"
diskPersistent="false"
diskExpiryThreadIntervalSeconds="120"
memoryStoreEvictionPolicy="LRU"
/>
<!--
Sample caches. Following are some example caches. Remove these before use.
-->
<!--
Sample cache named sampleCache1
This cache contains a maximum in memory of 10000 elements, and will expire
an element if it is idle for more than 5 minutes and lives for more than
10 minutes.
If there are more than 10000 elements it will overflow to the
disk cache, which in this configuration will go to wherever java.io.tmp is
defined on your system. On a standard Linux system this will be /tmp"
-->
<!--
<cache name="sampleCache1"
maxElementsInMemory="10000"
maxElementsOnDisk="1000"
eternal="false"
overflowToDisk="true"
diskSpoolBufferSizeMB="20"
timeToIdleSeconds="300"
timeToLiveSeconds="600"
memoryStoreEvictionPolicy="LFU"
/>
-->
<!--
Sample cache named sampleCache2
This cache has a maximum of 1000 elements in memory. There is no overflow to disk, so 1000
is also the maximum cache size. Note that when a cache is eternal, timeToLive and
timeToIdle are not used and do not need to be specified.
-->
<!--
<cache name="sampleCache2"
maxElementsInMemory="1000"
eternal="true"
overflowToDisk="false"
memoryStoreEvictionPolicy="FIFO"
/>
-->
<!--
Sample cache named sampleCache3. This cache overflows to disk. The disk store is
persistent between cache and VM restarts. The disk expiry thread interval is set to 10
minutes, overriding the default of 2 minutes.
-->
<!--
<cache name="sampleCache3"
maxElementsInMemory="500"
eternal="false"
overflowToDisk="true"
timeToIdleSeconds="300"
timeToLiveSeconds="600"
diskPersistent="true"
diskExpiryThreadIntervalSeconds="1"
memoryStoreEvictionPolicy="LFU"
/>
-->
<!--
Sample distributed cache named sampleDistributedCache1.
This cache replicates using defaults.
It also bootstraps from the cluster, using default properties.
-->
<!--
<cache name="sampleDistributedCache1"
maxElementsInMemory="10"
eternal="false"
timeToIdleSeconds="100"
timeToLiveSeconds="100"
overflowToDisk="false">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"/>
<bootstrapCacheLoaderFactory
class="net.sf.ehcache.distribution.RMIBootstrapCacheLoaderFactory"/>
</cache>
-->
<!--
Sample distributed cache named sampleDistributedCache2.
This cache replicates using specific properties.
It only replicates updates and does so synchronously via copy
-->
<!--
<cache name="sampleDistributedCache2"
maxElementsInMemory="10"
eternal="false"
timeToIdleSeconds="100"
timeToLiveSeconds="100"
overflowToDisk="false">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=false, replicatePuts=false,
replicateUpdates=true, replicateUpdatesViaCopy=true,
replicateRemovals=false"/>
</cache>
-->
<!--
Sample distributed cache named sampleDistributedCache3.
This cache replicates using defaults except that the asynchronous replication
interval is set to 200ms.
-->
<!--
<cache name="sampleDistributedCache3"
maxElementsInMemory="10"
eternal="false"
timeToIdleSeconds="100"
timeToLiveSeconds="100"
overflowToDisk="false">
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="asynchronousReplicationIntervalMillis=200"/>
</cache>
-->
</ehcache>

View File

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ================================= -->
<!-- Preserve messages in a local file -->
<!-- ================================= -->
<appender name="FILE1" class="org.apache.log4j.RollingFileAppender">
<param name="File" value="/var/log/cloud.log"/>
<param name="Append" value="true"/>
<param name="MaxFileSize" value="10000KB"/>
<param name="MaxBackupIndex" value="4"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p
[%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<appender name="FILE2" class="org.apache.log4j.RollingFileAppender">
<param name="File" value="/var/log/cloud/cloud.out"/>
<param name="Append" value="true"/>
<param name="MaxFileSize" value="10000KB"/>
<param name="MaxBackupIndex" value="4"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p
[%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<appender name="FILE3" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="File" value="/usr/local/cloud/systemvm/cloud.log"/>
<param name="Append" value="true"/>
<param name="MaxFileSize" value="10000KB"/>
<param name="MaxBackupIndex" value="4"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p
[%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<appender name="APISERVER" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="/var/log/cloud/api-server.log.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="/var/log/cloud/api-server.log"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %m%n"/>
</layout>
</appender>
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
</layout>
</appender>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="DEBUG"/>
</category>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<category name="apiserver.com.cloud">
<priority value="DEBUG"/>
</category>
<logger name="apiserver.com.cloud" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="APISERVER"/>
</logger>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE1"/>
<appender-ref ref="FILE2"/>
<appender-ref ref="FILE3"/>
</root>
</log4j:configuration>

View File

@ -1,107 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<log4j:configuration xmlns:log4j="http://jakarta.apache.org/log4j/" debug="false">
<!-- ================================= -->
<!-- Preserve messages in a local file -->
<!-- ================================= -->
<!-- A time/date based rolling appender -->
<appender name="FILE" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="@logdir@/cloud.log.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="@logdir@/cloud.log"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %-5p [%c{3}] (%t:%x) %m%n"/>
</layout>
</appender>
<appender name="APISERVER" class="org.apache.log4j.rolling.RollingFileAppender">
<param name="Append" value="true"/>
<param name="Threshold" value="DEBUG"/>
<rollingPolicy class="org.apache.log4j.rolling.TimeBasedRollingPolicy">
<param name="FileNamePattern" value="@logdir@/api-server.log.%d{yyyy-MM-dd}{GMT}.gz"/>
<param name="ActiveFileName" value="@logdir@/api-server.log"/>
</rollingPolicy>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ISO8601}{GMT} %m%n"/>
</layout>
</appender>
<!-- ============================== -->
<!-- Append messages to the console -->
<!-- ============================== -->
<appender name="CONSOLE" class="org.apache.log4j.ConsoleAppender">
<param name="Target" value="System.out"/>
<param name="Threshold" value="INFO"/>
<layout class="org.apache.log4j.EnhancedPatternLayout">
<param name="ConversionPattern" value="%d{ABSOLUTE}{GMT} %5p %c{1}:%L - %m%n"/>
</layout>
</appender>
<!-- ================ -->
<!-- Limit categories -->
<!-- ================ -->
<category name="com.cloud">
<priority value="DEBUG"/>
</category>
<!-- Limit the org.apache category to INFO as its DEBUG is verbose -->
<category name="org.apache">
<priority value="INFO"/>
</category>
<category name="org">
<priority value="INFO"/>
</category>
<category name="net">
<priority value="INFO"/>
</category>
<category name="apiserver.com.cloud">
<priority value="DEBUG"/>
</category>
<logger name="apiserver.com.cloud" additivity="false">
<level value="DEBUG"/>
<appender-ref ref="APISERVER"/>
</logger>
<!-- ======================= -->
<!-- Setup the Root category -->
<!-- ======================= -->
<root>
<level value="INFO"/>
<appender-ref ref="CONSOLE"/>
<appender-ref ref="FILE"/>
</root>
</log4j:configuration>

View File

@ -1,149 +0,0 @@
<?xml version='1.0' encoding='utf-8'?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Note: A "Server" is not itself a "Container", so you may not
define subcomponents such as "Valves" at this level.
Documentation at /docs/config/server.html
-->
<Server port="8005" shutdown="SHUTDOWN">
<!--APR library loader. Documentation at /docs/apr.html -->
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<!--Initialize Jasper prior to webapps are loaded. Documentation at /docs/jasper-howto.html -->
<Listener className="org.apache.catalina.core.JasperListener" />
<!-- JMX Support for the Tomcat server. Documentation at /docs/non-existent.html -->
<Listener className="org.apache.catalina.mbeans.ServerLifecycleListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<!-- Global JNDI resources
Documentation at /docs/jndi-resources-howto.html
-->
<GlobalNamingResources>
<!-- Editable user database that can also be used by
UserDatabaseRealm to authenticate users
-->
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<!-- A "Service" is a collection of one or more "Connectors" that share
a single "Container" Note: A "Service" is not itself a "Container",
so you may not define subcomponents such as "Valves" at this level.
Documentation at /docs/config/service.html
-->
<Service name="Catalina">
<!--The connectors can use a shared executor, you can define one or more named thread pools-->
<Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
maxThreads="150" minSpareThreads="25"/>
<!-- A "Connector" represents an endpoint by which requests are received
and responses are returned. Documentation at :
Java HTTP Connector: /docs/config/http.html (blocking & non-blocking)
Java AJP Connector: /docs/config/ajp.html
APR (HTTP/AJP) Connector: /docs/apr.html
Define a non-SSL HTTP/1.1 Connector on port 8080
-->
<!--
<Connector port="8080" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443" />
-->
<!-- A "Connector" using the shared thread pool-->
<Connector executor="tomcatThreadPool"
port="8080" protocol="org.apache.coyote.http11.Http11NioProtocol"
connectionTimeout="20000" disableUploadTimeout="true"
acceptCount="150" enableLookups="false" maxThreads="150"
maxHttpHeaderSize="8192" redirectPort="8443" />
<!-- Define a SSL HTTP/1.1 Connector on port 8443
This connector uses the JSSE configuration, when using APR, the
connector should be using the OpenSSL style configuration
described in the APR documentation -->
<!--
<Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
maxThreads="150" scheme="https" secure="true"
clientAuth="false" sslProtocol="TLS"
keystoreType="PKCS12"
keystoreFile="conf\cloud-localhost.pk12"
keystorePass="password"
/>
-->
<!-- Define an AJP 1.3 Connector on port 8009 -->
<Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />
<!-- An Engine represents the entry point (within Catalina) that processes
every request. The Engine implementation for Tomcat stand alone
analyzes the HTTP headers included with the request, and passes them
on to the appropriate Host (virtual host).
Documentation at /docs/config/engine.html -->
<!-- You should set jvmRoute to support load-balancing via AJP ie :
<Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
-->
<Engine name="Catalina" defaultHost="localhost">
<!--For clustering, please take a look at documentation at:
/docs/cluster-howto.html (simple how to)
/docs/config/cluster.html (reference documentation) -->
<!--
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
-->
<!-- The request dumper valve dumps useful debugging information about
the request and response data received and sent by Tomcat.
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.valves.RequestDumperValve"/>
-->
<!-- This Realm uses the UserDatabase configured in the global JNDI
resources under the key "UserDatabase". Any edits
that are performed against this UserDatabase are immediately
available for use by the Realm. -->
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
<!-- Define the default virtual host
Note: XML Schema validation will not work with Xerces 2.2.
-->
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true"
xmlValidation="false" xmlNamespaceAware="false">
<!-- SingleSignOn valve, share authentication between web applications
Documentation at: /docs/config/valve.html -->
<!--
<Valve className="org.apache.catalina.authenticator.SingleSignOn" />
-->
<!-- Access log processes all example.
Documentation at: /docs/config/valve.html -->
<Valve className="org.apache.catalina.valves.FastCommonAccessLogValve" directory="logs"
prefix="access_log." suffix=".txt" pattern="common" resolveHosts="false"/>
</Host>
</Engine>
</Service>
</Server>

View File

@ -272,29 +272,6 @@
<tstamp>
<format property="package.date" pattern="yyyy-MM-dd" />
</tstamp>
<zip destfile="${publish.dir}/cloud-${company.major.version}.${company.minor.version}.${company.patch.version}.${build.number}-${package.date}.zip" duplicate="preserve">
<zipfileset dir="${dist.dir}" prefix="cloud-${company.major.version}.${company.minor.version}.${company.patch.version}.${build.number}-${package.date}" filemode="777">
<include name="deploy-agent.sh" />
<include name="deploy-server.sh" />
</zipfileset>
<zipfileset dir="${db.dist.dir}" prefix="cloud-${company.major.version}.${company.minor.version}.${company.patch.version}.${build.number}-${package.date}/db" filemode="777">
<include name="deploy-db.sh" />
</zipfileset>
<zipfileset dir="${db.dist.dir}" prefix="cloud-${company.major.version}.${company.minor.version}.${company.patch.version}.${build.number}-${package.date}/db">
<include name="log4j.properties" />
<include name="create-schema.sql" />
<include name="init-data.sql" />
<include name="create-index-fk.sql" />
<include name="server-setup.xml" />
<include name="create-database.sql" />
<include name="templates.sql" />
</zipfileset>
<zipfileset dir="${dist.dir}" prefix="cloud-${company.major.version}.${company.minor.version}.${company.patch.version}.${build.number}-${package.date}">
<include name="agent.zip" />
<include name="client.zip" />
<include name="docs.zip" />
</zipfileset>
</zip>
</target>
<target name="package-kvm" depends="package-agent">

View File

@ -468,6 +468,7 @@ fi
%{_javadir}/commons-dbcp-1.4.jar
%{_javadir}/commons-pool-1.6.jar
%{_javadir}/gson-1.7.1.jar
%{_javadir}/CAStorSDK-*.jar
%{_javadir}/backport-util-concurrent-3.1.jar
%{_javadir}/ehcache-1.5.0.jar
%{_javadir}/httpcore-4.0.jar