Merge branch 'master' into ui-regions

This commit is contained in:
Brian Federle 2013-02-27 08:37:31 -08:00
commit 79bfbe32ce
133 changed files with 4124 additions and 2136 deletions

View File

@ -83,7 +83,7 @@
<filterreader
classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile"
value="${basedir}/../build/replace.properties" />
value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -97,7 +97,7 @@
<filterreader
classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile"
value="${basedir}/../build/replace.properties" />
value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -106,6 +106,24 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -28,8 +28,20 @@ public class VirtualMachineTO {
private BootloaderType bootloader;
Type type;
int cpus;
/**
'speed' is still here since 4.0.X/4.1.X management servers do not support
the overcommit feature yet.
The overcommit feature sends minSpeed and maxSpeed
So this is here for backwards compatibility with 4.0.X/4.1.X management servers
and newer agents.
*/
Integer speed;
Integer minSpeed;
Integer maxSpeed;
long minRam;
long maxRam;
String hostName;
@ -48,6 +60,21 @@ public class VirtualMachineTO {
VolumeTO[] disks;
NicTO[] nics;
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
this.id = id;
this.name = instanceName;
this.type = type;
this.cpus = cpus;
this.speed = speed;
this.minRam = minRam;
this.maxRam = maxRam;
this.bootloader = bootloader;
this.os = os;
this.enableHA = enableHA;
this.limitCpuUse = limitCpuUse;
this.vncPassword = vncPassword;
}
public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer minSpeed, Integer maxSpeed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) {
this.id = id;
this.name = instanceName;
@ -103,6 +130,10 @@ public class VirtualMachineTO {
this.cpus = cpus;
}
public Integer getSpeed() {
return speed;
}
public Integer getMinSpeed() {
return minSpeed;
}

View File

@ -0,0 +1,36 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network;
import com.cloud.network.Networks.TrafficType;
/* User can provide a Label, while configuring a zone, to specify
* a physical network that is to be used for a traffic type defined
* by CloudStack. See the enum data type TrafficType. This label is
* called Traffic label. This might encapsulate physical network
* specific properties like VLAN ID, name of virtual network object or more.
* The name of virtual network object is dependent on type of hypervisor.
* For example it is name of xenserver bridge in case of XenServer and
* name of virtual switch in case of VMware hypervisor
*/
public interface TrafficLabel {
public TrafficType getTrafficType();
public String getNetworkLabel();
}

View File

@ -360,7 +360,10 @@ public class ApiConstants {
public static final String CUSTOM_DISK_OFF_MAX_SIZE = "customdiskofferingmaxsize";
public static final String DEFAULT_ZONE_ID = "defaultzoneid";
public static final String GUID = "guid";
public static final String VSWITCH_TYPE_GUEST_TRAFFIC = "guestvswitchtype";
public static final String VSWITCH_TYPE_PUBLIC_TRAFFIC = "publicvswitchtype";
public static final String VSWITCH_NAME_GUEST_TRAFFIC = "guestvswitchname";
public static final String VSWITCH_NAME_PUBLIC_TRAFFIC = "publicvswitchname";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_ID = "vsmdeviceid";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_NAME = "vsmdevicename";
public static final String EXTERNAL_SWITCH_MGMT_DEVICE_STATE = "vsmdevicestate";

View File

@ -91,6 +91,34 @@ public class AddClusterCmd extends BaseCmd {
@Parameter(name = ApiConstants.MEMORY_OVERCOMMIT_RATIO, type = CommandType.STRING, required = false ,description = "value of the default ram overcommit ratio, defaults to 1")
private String memoryovercommitratio;
@Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for guest traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)")
private String vSwitchTypeGuestTraffic;
@Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Type of virtual switch used for public traffic in the cluster. Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)")
private String vSwitchTypePublicTraffic;
@Parameter(name = ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for guest traffic in the cluster. This would override zone wide traffic label setting.")
private String vSwitchNameGuestTraffic;
@Parameter(name = ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, type = CommandType.STRING, required = false, description = "Name of virtual switch used for public traffic in the cluster. This would override zone wide traffic label setting.")
private String vSwitchNamePublicTraffic;
public String getVSwitchTypeGuestTraffic() {
return vSwitchTypeGuestTraffic;
}
public String getVSwitchTypePublicTraffic() {
return vSwitchTypePublicTraffic;
}
public String getVSwitchNameGuestTraffic() {
return vSwitchNameGuestTraffic;
}
public String getVSwitchNamePublicTraffic() {
return vSwitchNamePublicTraffic;
}
public String getVSMIpaddress() {
return vsmipaddress;
}

View File

@ -49,7 +49,7 @@ public class CreateStoragePoolCmd extends BaseCmd {
/////////////////////////////////////////////////////
@Parameter(name=ApiConstants.CLUSTER_ID, type=CommandType.UUID, entityType = ClusterResponse.class,
required=true, description="the cluster ID for the storage pool")
description="the cluster ID for the storage pool")
private Long clusterId;
@Parameter(name=ApiConstants.DETAILS, type=CommandType.MAP, description="the details for the storage pool")
@ -59,7 +59,7 @@ public class CreateStoragePoolCmd extends BaseCmd {
private String storagePoolName;
@Parameter(name=ApiConstants.POD_ID, type=CommandType.UUID, entityType = PodResponse.class,
required=true, description="the Pod ID for the storage pool")
description="the Pod ID for the storage pool")
private Long podId;
@Parameter(name=ApiConstants.TAGS, type=CommandType.STRING, description="the tags for the storage pool")

View File

@ -53,6 +53,9 @@ public class ListZonesByCmd extends BaseListCmd {
description="the ID of the domain associated with the zone")
private Long domainId;
@Parameter(name=ApiConstants.NAME, type=CommandType.STRING, description="the name of the zone")
private String name;
@Parameter(name=ApiConstants.SHOW_CAPACITIES, type=CommandType.BOOLEAN, description="flag to display the capacity of the zones")
private Boolean showCapacities;
@ -72,6 +75,10 @@ public class ListZonesByCmd extends BaseListCmd {
return domainId;
}
public String getName(){
return name;
}
public Boolean getShowCapacities() {
return showCapacities;
}

View File

@ -354,7 +354,7 @@
<filterreader
classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile"
value="${basedir}/../build/replace.properties" />
value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>

View File

@ -275,7 +275,7 @@
<exec executable="cp">
<arg value="-r" />
<arg value="${basedir}/../scripts" />
<arg value="${basedir}/target/generated-webapp/WEB-INF/classes/scripts" />
<arg value="${basedir}/target/generated-webapp/WEB-INF/classes/" />
</exec>
<copy
todir="${basedir}/target/generated-webapp/WEB-INF/">
@ -308,7 +308,7 @@
<filterreader
classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile"
value="${basedir}/../build/replace.properties" />
value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -319,7 +319,7 @@
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
<param type="propertiesfile" value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -330,7 +330,7 @@
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
<param type="propertiesfile" value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -341,7 +341,7 @@
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
<param type="propertiesfile" value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
@ -351,10 +351,26 @@
</fileset>
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${basedir}/../build/replace.properties" />
<param type="propertiesfile" value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" todir="${basedir}/target/conf">
<fileset dir="${basedir}/tomcatconf">
<include name="*.in" />
</fileset>
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile" value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
<copy overwrite="true" todir="${basedir}/target/conf">
<fileset dir="${basedir}/tomcatconf">
<exclude name="*.in" />
</fileset>
</copy>
</target>
</configuration>
</execution>

View File

@ -16,23 +16,11 @@
# specific language governing permissions and limitations
# under the License.
SYSTEMJARS="@SYSTEMJARS@"
SCP=$(build-classpath $SYSTEMJARS 2>/dev/null) ; if [ $? != 0 ] ; then export SCP="@SYSTEMCLASSPATH@" ; fi
MCP="@MSCLASSPATH@"
DCP="@DEPSCLASSPATH@"
CLASSPATH=$SCP:$DCP:$MCP:@MSCONF@:@SETUPDATADIR@
for jarfile in "@PREMIUMJAVADIR@"/* ; do
if [ ! -e "$jarfile" ] ; then continue ; fi
CLASSPATH=$jarfile:$CLASSPATH
done
for plugin in "@PLUGINJAVADIR@"/* ; do
if [ ! -e "$plugin" ] ; then continue ; fi
CLASSPATH=$plugin:$CLASSPATH
done
for vendorconf in "@MSCONF@"/vendor/* ; do
if [ ! -d "$vendorconf" ] ; then continue ; fi
CLASSPATH=$vendorconf:$CLASSPATH
# We use WEB-INF/lib in the webapp for including things into
# the classpath nowdays
for jar in /usr/share/tomcat6/lib/*.jar; do
CLASSPATH=$jar:$CLASSPATH
done
CLASSPATH=${CLASSPATH}/usr/share/cloudstack-management/webapps/client/WEB-INF/lib/mysql-connector-java-5.1.21.jar
export CLASSPATH
PATH=/sbin:/usr/sbin:$PATH
export PATH

View File

@ -101,16 +101,20 @@
<bean id="FirstFitRouting" class="com.cloud.agent.manager.allocator.impl.FirstFitRoutingAllocator">
<property name="name" value="FirstFitRouting"/>
</bean>
<!--
Storage pool allocators
-->
<bean id="LocalStoragePoolAllocator" class="com.cloud.storage.allocator.LocalStoragePoolAllocator">
<bean id="LocalStoragePoolAllocator" class="org.apache.cloudstack.storage.allocator.LocalStoragePoolAllocator">
<property name="name" value="LocalStorage"/>
</bean>
<bean id="FirstFitStoragePoolAllocator" class="com.cloud.storage.allocator.FirstFitStoragePoolAllocator">
<!--
<bean id="FirstFitStoragePoolAllocator" class="org.apache.cloudstack.storage.allocator.FirstFitStoragePoolAllocator">
<property name="name" value="Storage"/>
</bean>
-->
<bean id="UserConcentratedAllocator" class="com.cloud.agent.manager.allocator.impl.UserConcentratedAllocator">
<property name="name" value="User First"/>

View File

@ -66,10 +66,6 @@ under the License.
<!--adapter name="FirstFitRouting" class="com.cloud.agent.manager.allocator.impl.RecreateHostAllocator"/-->
<!--adapter name="FirstFit" class="com.cloud.agent.manager.allocator.impl.FirstFitAllocator"/-->
</adapters>
<adapters key="com.cloud.storage.allocator.StoragePoolAllocator">
<adapter name="LocalStorage" class="com.cloud.storage.allocator.LocalStoragePoolAllocator"/>
<adapter name="Storage" class="com.cloud.storage.allocator.FirstFitStoragePoolAllocator"/>
</adapters>
<adapters key="com.cloud.agent.manager.allocator.PodAllocator">
<adapter name="User First" class="com.cloud.agent.manager.allocator.impl.UserConcentratedAllocator"/>
</adapters>

View File

@ -311,4 +311,8 @@ public class DiskOfferingVO implements DiskOffering {
public int getSortKey() {
return sortKey;
}
public void setRecreatable(boolean recreatable) {
this.recreatable = recreatable;
}
}

View File

@ -57,6 +57,7 @@ public class RawImageProcessor extends AdapterBase implements Processor {
String imgPath = templatePath + File.separator + templateName + "." + ImageFormat.RAW.getFileExtension();
if (!_storage.exists(imgPath)) {
s_logger.debug("Unable to find raw image:" + imgPath);
return null;
}
FormatInfo info = new FormatInfo();
info.format = ImageFormat.RAW;

6
debian/README vendored
View File

@ -1,6 +0,0 @@
The Debian Package
----------------------------
This is part of the Cloud Stack collection of packages.
-- Manuel Amador (Rudd-O) <manuel@vmops.com> Thu, 25 Mar 2010 15:12:06 -0700

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
cloudstack (4.1.0-incubating-0.0.snapshot) unstable; urgency=low
* Incorporate incubating into version, remove epoch
-- Noa Resare <noa@spotify.com> Tue, 05 Feb 2013 18:05:28 +0000
cloud (1:4.0.0-1) unstable; urgency=low
* Bumping the version to 4.0.0

View File

@ -1,17 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

View File

@ -1,21 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/etc/cloud/cli/commands.xml
/usr/bin/cloud-grab-dependent-library-versions
/usr/bin/cloud-tool
/usr/bin/cloudvoladm

View File

@ -1,19 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/cloud/management/webapps/client/*

View File

@ -1,58 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/etc/cloud/management/catalina.policy
/etc/cloud/management/catalina.properties
/etc/cloud/management/commands.properties
/etc/cloud/management/components.xml
/etc/cloud/management/context.xml
/etc/cloud/management/db.properties
/etc/cloud/management/environment.properties
/etc/cloud/management/ehcache.xml
/etc/cloud/management/log4j-cloud.xml
/etc/cloud/management/logging.properties
/etc/cloud/management/server.xml
/etc/cloud/management/tomcat6.conf
/etc/cloud/management/classpath.conf
/etc/cloud/management/tomcat-users.xml
/etc/cloud/management/web.xml
/etc/cloud/management/server-nonssl.xml
/etc/cloud/management/tomcat6-nonssl.conf
/etc/cloud/management/virtualrouter_commands.properties
/etc/cloud/management/f5bigip_commands.properties
/etc/cloud/management/junipersrx_commands.properties
/etc/cloud/management/netscalerloadbalancer_commands.properties
/etc/cloud/management/cisconexusvsm_commands.properties
/etc/cloud/management/Catalina
/etc/cloud/management/Catalina/localhost
/etc/cloud/management/Catalina/localhost/client
/etc/init.d/cloud-management
/usr/share/cloud/management/bin
/usr/share/cloud/management/conf
/usr/share/cloud/management/lib
/usr/share/cloud/management/logs
/usr/share/cloud/management/temp
/usr/share/cloud/management/work
/var/cache/cloud/management
/var/cache/cloud/management/work
/var/cache/cloud/management/temp
/var/log/cloud/management
/var/lib/cloud/mnt
/var/lib/cloud/management
/usr/bin/cloud-setup-management
/usr/bin/cloud-update-xenserver-licenses
/etc/cloud/management/commands-ext.properties

View File

@ -1,49 +0,0 @@
#!/bin/sh -e
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
case "$1" in
configure)
if ! id cloud > /dev/null 2>&1 ; then
adduser --system --home /var/lib/cloud/management --no-create-home \
--group --disabled-password --shell /bin/sh cloud
# update me in all the .postinst that you can find me in, as well
fi
for i in /var/lib/cloud/mnt /var/cache/cloud/management \
/var/cache/cloud/management/work /var/cache/cloud/management/temp \
/var/log/cloud/management /etc/cloud/management/Catalina \
/etc/cloud/management/Catalina/localhost /var/lib/cloud/management /etc/cloud/management/Catalina/localhost/client
do
chmod 0770 $i
chgrp cloud $i
done
for i in /etc/cloud/management/db.properties
do
chmod 0640 $i
chgrp cloud $i
done
if [ "$2" = "" ] ; then # no recently configured version, this is a first install
/usr/sbin/update-rc.d cloud-management defaults || true
fi
;;
esac
#DEBHELPER#

View File

@ -1,19 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/java/cloud-core.jar

View File

@ -1,34 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/java/ehcache-1.5.0.jar
/usr/share/java/mail-1.4.jar
/usr/share/java/httpcore-4.0.jar
/usr/share/java/log4j-*.jar
/usr/share/java/apache-log4j-extras-1.1.jar
/usr/share/java/trilead-ssh2-build213-svnkit-1.3-patch.jar
/usr/share/java/xmlrpc-common-3.*.jar
/usr/share/java/xmlrpc-client-3.*.jar
/usr/share/java/jstl-1.2.jar
/usr/share/java/axis2-1.5.1.jar
/usr/share/java/wsdl4j-1.6.2.jar
/usr/share/java/bcprov-*.jar
/usr/share/java/jasypt-1.*.jar
/usr/share/java/ejb-api-3.0.jar
/usr/share/java/javax.persistence-2.0.0.jar
/usr/share/java/gson-1.7.1.jar
/usr/share/java/xapi-5.6.100-1-SNAPSHOT.jar

View File

@ -1,18 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/lib/python*/dist-packages/cloud*

View File

@ -1,27 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/lib/cloud/common/scripts/installer/*
/usr/lib/cloud/common/scripts/network/*
/usr/lib/cloud/common/scripts/storage/*
/usr/lib/cloud/common/scripts/util/*
/usr/lib/cloud/common/scripts/vm/network/*
/usr/lib/cloud/common/scripts/vm/systemvm/*
/usr/lib/cloud/common/scripts/vm/pingtest.sh
/usr/lib/cloud/common/scripts/vm/hypervisor/kvm/*
/usr/lib/cloud/common/scripts/vm/hypervisor/versions.sh
/usr/lib/cloud/common/scripts/vm/hypervisor/xenserver/*

View File

@ -1,32 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/java/cloud-server.jar
/usr/share/java/cloud-ovm.jar
/etc/cloud/server/*
/usr/share/java/cloud-dp-user-concentrated-pod.jar
/usr/share/java/cloud-dp-user-dispersing.jar
/usr/share/java/cloud-host-allocator-random.jar
/usr/share/java/cloud-plugin-elb.jar
/usr/share/java/cloud-plugin-ovs.jar
/usr/share/java/cloud-plugin-nicira-nvp.jar
/usr/share/java/cloud-plugin-bigswitch-vns.jar
/usr/share/java/cloud-storage-allocator-random.jar
/usr/share/java/cloud-user-authenticator-ldap.jar
/usr/share/java/cloud-user-authenticator-md5.jar
/usr/share/java/cloud-user-authenticator-plaintext.jar
/usr/share/java/cloud-plugin-hypervisor-xen.jar

View File

@ -1,18 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/lib/cloud/common/vms/systemvm.iso

View File

@ -1,23 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/java/cloud-usage.jar
/etc/init.d/cloud-usage
/var/log/cloud/usage
/etc/cloud/usage/usage-components.xml
/etc/cloud/usage/log4j-cloud_usage.xml
/etc/cloud/usage/db.properties

View File

@ -1,47 +0,0 @@
#!/bin/sh -e
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
case "$1" in
configure)
if ! id cloud > /dev/null 2>&1 ; then
adduser --system --home /var/lib/cloud/management --no-create-home \
--group --disabled-password --shell /bin/sh cloud
# update me in cloud-client.postinst as well
fi
for i in /var/log/cloud/usage
do
chmod 0770 $i
chgrp cloud $i
done
for i in /etc/cloud/usage/db.properties
do
chmod 0640 $i
chgrp cloud $i
done
if [ "$2" = "" ] ; then # no recently configured version, this is a first install
/usr/sbin/update-rc.d cloud-usage defaults || true
fi
;;
esac
#DEBHELPER#

View File

@ -1,22 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
/usr/share/java/cloud-utils.jar
/usr/share/java/cloud-api.jar
/usr/share/doc/cloud/version-info
/usr/bin/cloud-sccs
/usr/bin/cloud-gitrevs

View File

@ -16,10 +16,10 @@
# under the License.
/etc/cloud/agent/agent.properties
/etc/cloud/agent/developer.properties.template
/etc/cloud/agent/environment.properties
/etc/cloud/agent/log4j-cloud.xml
/etc/init.d/cloud-agent
/usr/bin/cloud-setup-agent
/usr/bin/cloud-ssh
/var/log/cloud/agent
/var/log/cloudstack/agent
/usr/share/cloudstack-agent/lib/cloudstack-agent-kvm.jar

View File

@ -15,3 +15,4 @@
# specific language governing permissions and limitations
# under the License.
/var/log/cloudstack/awsapi

View File

@ -13,5 +13,4 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# under the License.

View File

@ -15,5 +15,19 @@
# specific language governing permissions and limitations
# under the License.
/usr/share/java/cloud-agent.jar
/usr/share/java/cloud-plugin-hypervisor-kvm.jar
/usr/share/cloudstack-common/lib/cloudstack-core.jar
/usr/share/cloudstack-common/lib/cloudstack-api.jar
/usr/share/cloudstack-common/vms/systemvm.iso
/usr/share/cloudstack-common/scripts/installer/*
/usr/share/cloudstack-common/scripts/network/*
/usr/share/cloudstack-common/scripts/storage/*
/usr/share/cloudstack-common/scripts/util/*
/usr/share/cloudstack-common/scripts/vm/network/*
/usr/share/cloudstack-common/scripts/vm/systemvm/*
/usr/share/cloudstack-common/scripts/vm/pingtest.sh
/usr/share/cloudstack-common/scripts/vm/hypervisor/kvm/*
/usr/share/cloudstack-common/scripts/vm/hypervisor/versions.sh
/usr/share/cloudstack-common/scripts/vm/hypervisor/xenserver/*
/usr/bin/cloud-set-guest-password
/usr/bin/cloud-set-guest-sshkey
/usr/lib/python2.?/*-packages/*

View File

@ -13,5 +13,4 @@
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# under the License.

View File

@ -15,11 +15,17 @@
# specific language governing permissions and limitations
# under the License.
/etc/cloud/server/*
/etc/cloud/management/*
/etc/init.d/cloud-management
/var/cache/cloudstack/management
/var/cache/cloudstack/management/work
/var/cache/cloudstack/management/temp
/var/log/cloudstack/management
/var/lib/cloud/mnt
/var/lib/cloud/management
/usr/bin/cloud-update-xenserver-licenses
/usr/bin/cloud-setup-management
/usr/bin/cloud-setup-databases
/usr/bin/cloud-migrate-databases
/usr/bin/cloud-set-guest-password
/usr/bin/cloud-set-guest-sshkey
/usr/share/cloud/setup/*.sql
/usr/share/cloud/setup/*.sh
/usr/share/cloud/setup/server-setup.xml
/usr/share/cloud/setup/db/*.sql
/usr/share/cloudstack-management/*

View File

@ -16,19 +16,11 @@
# specific language governing permissions and limitations
# under the License.
case "$1" in
configure)
for i in /var/log/cloud/agent
do
chmod 0770 $i
done
if [ "$2" = "" ] ; then # no recently configured version, this is a first install
/usr/sbin/update-rc.d cloud-agent defaults || true
fi
;;
esac
if [ "$1" = configure ]; then
if ! getent passwd cloud >/dev/null; then
adduser --quiet --system --group --no-create-home --home /usr/share/cloudstack-manager cloud
fi
chown cloud /var/log/cloudstack/management
fi
#DEBHELPER#

View File

@ -5,9 +5,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@ -15,4 +15,7 @@
# specific language governing permissions and limitations
# under the License.
/usr/share/java/libvirt-0.4.9.jar
/usr/share/cloudstack-usage/lib/cloudstack-usage.jar
/etc/init.d/cloud-usage
/var/log/cloudstack/usage
/etc/cloud/usage/*

133
debian/control vendored
View File

@ -1,118 +1,51 @@
Source: cloud
Source: cloudstack
Section: libs
Priority: extra
Maintainer: Wido den Hollander <wido@widodh.nl>
Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, libws-commons-util-java, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1), libservlet2.5-java, genisoimage, python-mysqldb, maven3 | maven (>= 3), liblog4j1.2-java (>= 1.2.16)
Build-Depends: debhelper (>= 7), openjdk-6-jdk, tomcat6, genisoimage,
python-mysqldb, maven3 | maven (>= 3), python (>= 2.6.6-3~)
Standards-Version: 3.8.1
Homepage: http://www.cloudstack.org/
Package: cloud-deps
Architecture: any
Depends: openjdk-6-jre, libcommons-codec-java (>= 1.5), libcommons-httpclient-java (>= 3.1)
Description: CloudStack library dependencies
This package contains a number of third-party dependencies
not shipped by distributions, required to run the CloudStack
Management Server.
Package: cloudstack-common
Architecture: all
Depends: bash, genisoimage
Conflicts: cloud-scripts, cloud-utils, cloud-system-iso, cloud-console-proxy, cloud-daemonize, cloud-deps, cloud-python, cloud-setup
Description: A common package which contains files which are shared by several CloudStack packages
Package: cloud-agent-deps
Architecture: any
Depends: openjdk-6-jre, cloud-deps (= ${source:Version})
Description: CloudStack agent library dependencies
This package contains a number of third-party dependencies
not shipped by distributions, required to run the CloudStack
Agent.
Package: cloud-utils
Architecture: any
Depends: openjdk-6-jre, python, libcglib-java (>= 2.2.2), libjsch-java (>= 0.1.42), libbackport-util-concurrent-java (>= 3.1), libcommons-dbcp-java (>= 1.4), libcommons-pool-java (>= 1.5.6)
Description: CloudStack utility library
The CloudStack utility libraries provide a set of Java classes used
in the CloudStack environment.
Package: cloud-client-ui
Architecture: any
Depends: openjdk-6-jre, cloud-client (= ${source:Version})
Description: CloudStack management server UI
The CloudStack management server is the central point of coordination,
management, and intelligence in the CloudStack Cloud Stack. This package
is a requirement of the cloud-client package, which installs the
CloudStack management server.
Package: cloud-server
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-scripts (= ${source:Version}), libservlet2.5-java
Package: cloudstack-management
Architecture: all
Depends: cloudstack-common (= ${source:Version}), tomcat6, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools
Conflicts: cloud-server, cloud-client, cloud-client-ui
Description: CloudStack server library
The CloudStack server libraries provide a set of Java classes used
in the CloudStack management server.
The CloudStack management server
Package: cloud-scripts
Replaces: cloud-agent-scripts
Architecture: any
Depends: openjdk-6-jre, python, bash, bzip2, gzip, unzip, nfs-common, openssh-client, lsb-release
Description: CloudStack scripts
This package contains a number of scripts needed for the CloudStack Agent and Management Server.
Both the CloudStack Agent and Management server depend on this package
Package: cloud-core
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version})
Description: CloudStack core library
The CloudStack core libraries provide a set of Java classes used
in the CloudStack Cloud Stack.
Package: cloud-client
Architecture: any
Depends: openjdk-6-jre, cloud-deps (= ${source:Version}), cloud-utils (= ${source:Version}), cloud-server (= ${source:Version}), cloud-client-ui (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-python (= ${source:Version}), tomcat6, libws-commons-util-java, sysvinit-utils, chkconfig, sudo, jsvc, python-mysqldb, python-paramiko, augeas-tools, genisoimage, cloud-system-iso, libmysql-java (>= 5.1)
Description: CloudStack client
The CloudStack management server is the central point of coordination,
management, and intelligence in the CloudStack Cloud Stack. This package
is required for the management server to work.
Package: cloud-setup
Architecture: any
Depends: openjdk-6-jre, python, cloud-utils (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-python (= ${source:Version}), python-mysqldb
Description: CloudStack client
The CloudStack setup tools let you set up your Management Server and Usage Server.
Package: cloud-python
Architecture: any
Depends: python
Description: CloudStack Python library
The CloudStack Python library contains a few Python modules that the
CloudStack uses.
Package: cloud-agent-libs
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version})
Description: CloudStack agent libraries
The CloudStack agent libraries are used by the Cloud Agent.
Package: cloud-agent
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-agent-deps (= ${source:Version}), cloud-python (= ${source:Version}), cloud-agent-libs (= ${source:Version}), cloud-scripts (= ${source:Version}), cloud-system-iso (= ${source:Version}), libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, liblog4j1.2-java (>= 1.2.16), libjna-java, wget, jsvc, lsb-base (>= 3.2)
Package: cloudstack-agent
Architecture: all
Depends: openjdk-6-jre, cloudstack-common (= ${source:Version}), lsb-base (>= 3.2), openssh-client, libvirt0, sysvinit-utils, chkconfig, qemu-kvm, libvirt-bin, uuid-runtime, rsync, grep, iproute, ebtables, vlan, wget, jsvc
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent
The CloudStack agent is in charge of managing shared computing resources in
a CloudStack powered cloud. Install this package if this computer
will participate in your cloud as a KVM HyperVisor.
Package: cloud-system-iso
Architecture: any
Description: CloudStack system iso
The CloudStack agent is in charge of managing shared computing resources in
a CloudStack powered cloud. Install this package if this computer
will participate in your cloud.
Package: cloud-usage
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version}), jsvc
Package: cloudstack-usage
Architecture: all
Depends: openjdk-6-jre, cloudstack-common (= ${source:Version}), jsvc
Description: CloudStack usage monitor
The CloudStack usage monitor provides usage accounting across the entire cloud for
cloud operators to charge based on usage parameters.
Package: cloud-cli
Provides: cloud-cli
Architecture: any
Depends: openjdk-6-jre, cloud-utils (= ${source:Version}), cloud-core (= ${source:Version}), cloud-deps (= ${source:Version}), cloud-server (= ${source:Version}), cloud-setup (= ${source:Version}), cloud-client (= ${source:Version})
Description: CloudStack commandline tool
The CloudStack commandline tool for invoking APi
Package: cloudstack-awsapi
Architecture: all
Depends: cloudstack-common (= ${source:Version}), cloudstack-management (= ${source:Version})
Description: CloudStack Amazon EC2 API
Package: cloudstack-cli
Architecture: all
Depends: cloudstack-common (= ${source:Version})
Description: The CloudStack CLI called CloudMonkey
Package: cloudstack-docs
Architecture: all
Description: The CloudStack documentation

124
debian/rules vendored
View File

@ -10,6 +10,9 @@
# Modified to make a template file for a multi-binary package with separated
# build-arch and build-indep targets by Bill Allombert 2001
DEBVERS := $(shell dpkg-parsechangelog | sed -n -e 's/^Version: //p')
VERSION := $(shell echo '$(DEBVERS)' | sed -e 's/^[[:digit:]]*://' -e 's/[~-].*//')
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
@ -19,55 +22,118 @@ export DH_OPTIONS
configure: configure-stamp
configure-stamp:
dh_testdir
cp packaging/debian/replace.properties build/replace.properties
echo VERSION=$VERSION >> build/replace.properties
cp packaging/debian/replace.properties replace.properties.tmp
echo VERSION=${VERSION} >> replace.properties.tmp
touch configure-stamp
build: build-arch
build: build-indep
build-arch: build-arch-stamp
build-arch-stamp: configure-stamp
mvn package -Dsystemvm
build-indep: build-indep-stamp
build-indep-stamp: configure
mvn package -DskipTests -Dsystemvm \
-Dcs.replace.properties=replace.properties.tmp
touch $@
clean:
dh_testdir
dh_testroot
rm -f build-arch-stamp build-indep-stamp configure-stamp
rm -f replace.properties.tmp
dh_clean
install:
dh_testdir
dh_testroot
dh_prep -s
mkdir -p debian/tmp/usr/bin
mkdir -p debian/tmp/usr/share/cloud/management
mkdir -p debian/tmp/var/log/cloud
mkdir debian/tmp/var/log/cloud/managament
mkdir debian/tmp/var/log/cloud/awsapi
mkdir debian/tmp/var/log/cloud/agent
mkdir debian/tmp/var/log/cloud/ipallocator
mkdir debian/tmp/var/log/cloud/usage
# Common packages
mkdir -p debian/tmp/etc/cloud
mkdir -p debian/tmp/etc/init.d
mkdir -p debian/tmp/var/cache/cloudstack
mkdir -p debian/tmp/var/log/cloudstack
mkdir -p debian/tmp/var/lib/cloud
mkdir -p debian/tmp/usr/bin
mkdir -p debian/tmp/usr/share/cloud
mkdir -p debian/tmp/usr/lib/cloud
# cloudstack-agent
mkdir debian/tmp/etc/cloud/agent
mkdir debian/tmp/var/log/cloudstack/agent
install -D plugins/hypervisors/kvm/target/cloud-plugin-hypervisor-kvm-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-agent/lib/cloudstack-agent-kvm.jar
install -D packaging/debian/init/cloud-agent debian/tmp/etc/init.d/
install -D agent/bindir/cloud-setup-agent.in debian/tmp/usr/bin/cloud-setup-agent
install -D agent/bindir/cloud-ssh.in debian/tmp/usr/bin/cloud-ssh
cp agent/target/transformed/* debian/tmp/etc/cloud/agent
# cloudstack-management
mkdir debian/tmp/etc/cloud/server
mkdir debian/tmp/etc/cloud/management
mkdir debian/tmp/etc/cloud/usage
mkdir -p debian/tmp/var/cache/cloud
mkdir debian/tmp/var/cache/cloud/management
mkdir -p debian/tmp/usr/share/cloud
mkdir debian/tmp/usr/share/cloud/setup
mkdir -p debian/tmp/usr/share/cloud/management/webapps/client
cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloud/setup/
cp -r client/target/cloud-client-ui-*-SNAPSHOT/* debian/tmp/usr/share/cloud/management/webapps/client/
dh_installdirs -s
dh_install -s
mkdir -p debian/tmp/usr/share/cloudstack-management
mkdir -p debian/tmp/usr/share/cloudstack-management/webapps/client
mkdir debian/tmp/usr/share/cloudstack-management/setup
mkdir debian/tmp/var/log/cloudstack/management
mkdir debian/tmp/var/cache/cloudstack/management
mkdir debian/tmp/var/cache/cloudstack/management/work
mkdir debian/tmp/var/cache/cloudstack/management/temp
mkdir debian/tmp/var/log/cloudstack/ipallocator
mkdir debian/tmp/var/lib/cloud/management
mkdir debian/tmp/var/lib/cloud/mnt
cp -r client/target/utilities/scripts/db/* debian/tmp/usr/share/cloudstack-management/setup/
cp -r client/target/cloud-client-ui-4.1.0-SNAPSHOT/* debian/tmp/usr/share/cloudstack-management/webapps/client/
cp server/target/conf/* debian/tmp/etc/cloud/server/
cp client/target/conf/* debian/tmp/etc/cloud/management/
ln -s tomcat6-nonssl.conf debian/tmp/etc/cloud/management/tomcat6.conf
mkdir -p debian/tmp/etc/cloud/management/Catalina/localhost/client
install -D packaging/debian/init/cloud-management debian/tmp/etc/init.d/cloud-management
install -D client/bindir/cloud-update-xenserver-licenses.in debian/tmp/usr/bin/cloud-update-xenserver-licenses
install -D server/target/cloud-server-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-management/lib/cloudstack-server.jar
ln -s /usr/share/tomcat6/bin debian/tmp/usr/share/cloudstack-management/bin
ln -s ../../../etc/cloud/management debian/tmp/usr/share/cloudstack-management/conf
ln -s /usr/share/tomcat6/lib debian/tmp/usr/share/cloudstack-management/lib
ln -s ../../../var/log/cloudstack/management debian/tmp/usr/share/cloudstack-management/logs
ln -s ../../../var/cache/cloudstack/management/temp debian/tmp/usr/share/cloudstack-management/temp
ln -s ../../../var/cache/cloudstack/management/work debian/tmp/usr/share/cloudstack-management/work
binary: binary-common
binary-common:
dh_testdir
dh_testroot
# cloudstack-common
mkdir -p debian/tmp/usr/share/cloudstack-common
mkdir debian/tmp/usr/share/cloudstack-common/scripts
mkdir debian/tmp/usr/share/cloudstack-common/setup
cp -r scripts/installer debian/tmp/usr/share/cloudstack-common/scripts
cp -r scripts/network debian/tmp/usr/share/cloudstack-common/scripts
cp -r scripts/storage debian/tmp/usr/share/cloudstack-common/scripts
cp -r scripts/util debian/tmp/usr/share/cloudstack-common/scripts
cp -r scripts/vm debian/tmp/usr/share/cloudstack-common/scripts
install -D client/target/utilities/bin/cloud-migrate-databases debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-password debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-set-guest-sshkey debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-setup-databases debian/tmp/usr/bin
install -D client/target/utilities/bin/cloud-setup-management debian/tmp/usr/bin
install -D console-proxy/dist/systemvm.iso debian/tmp/usr/share/cloudstack-common/vms/systemvm.iso
install -D core/target/cloud-core-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-common/lib/cloudstack-core.jar
install -D api/target/cloud-api-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-common/lib/cloudstack-api.jar
# cloudstack-python
mkdir -p debian/tmp/usr/lib/python2.7/dist-packages
cp -r python/lib/cloud* debian/tmp/usr/lib/python2.7/dist-packages
# cloudstack-usage
mkdir debian/tmp/etc/cloud/usage
mkdir debian/tmp/var/log/cloudstack/usage
install -D usage/target/cloud-usage-4.1.0-SNAPSHOT.jar debian/tmp/usr/share/cloudstack-usage/lib/cloudstack-usage.jar
cp usage/target/transformed/* debian/tmp/etc/cloud/usage/
ln -s ../management/db.properties debian/tmp/etc/cloud/usage/db.properties
install -D packaging/debian/init/cloud-usage debian/tmp/etc/init.d
# cloudstack-awsapi
mkdir debian/tmp/var/log/cloudstack/awsapi
dh_installdirs
dh_install
dh_python2
binary: install
dh_install
dh_installchangelogs
dh_installdocs LICENSE
dh_installdocs DISCLAIMER

View File

@ -0,0 +1,110 @@
<?xml version='1.0' encoding='utf-8' ?>
<!DOCTYPE section PUBLIC "-//OASIS//DTD DocBook XML V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd" [
<!ENTITY % BOOK_ENTITIES SYSTEM "cloudstack.ent">
%BOOK_ENTITIES;
]>
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<section id="event-framework">
<title>Event Notification</title>
<para>Event notification framework provides a means for the Management Server components to
publish and subscribe to &PRODUCT; events. Event notification is achieved by implementing the
concept of event bus abstraction in the Management Server. An event bus is introduced in the
Management Server that allows the &PRODUCT;components and extension plug-ins to subscribe to the
events by using the Advanced Message Queuing Protocol (AMQP) client. In &PRODUCT;, a default
implementation of event bus is provided as a plug-in that uses the RabbitMQ AMQP client. The
AMQP client pushes the published events to a compatible AMQP server. Therefore all the &PRODUCT;
events are published to an exchange in the AMQP server. </para>
<para>A new event for state change, resource state change, is introduced as part of Event
notification framework. Every resource, such as user VM, volume, NIC, network, public IP,
snapshot, and template, is associated with a state machine and generates events as part of the
state change. That implies that a change in the state of a resource results in a state change
event, and the event is published in the corresponding state machine on the event bus. All the
&PRODUCT; events (alerts, action events, usage events) and the additional category of resource
state change events, are published on to the events bus.</para>
<formalpara>
<title>Use Cases</title>
<para>The following are some of the use cases:</para>
</formalpara>
<itemizedlist>
<listitem>
<para>Usage or Billing Engines: A third-party cloud usage solution can implement a plug-in
that can connects to &PRODUCT; to subscribe to &PRODUCT; events and generate usage data. The
usage data is consumed by their usage software.</para>
</listitem>
<listitem>
<para>AMQP plug-in can place all the events on the a message queue, then a AMQP message broker
can provide topic-based notification to the subscribers.</para>
</listitem>
<listitem>
<para>Publish and Subscribe notification service can be implemented as a pluggable service in
&PRODUCT; that can provide rich set of APIs for event notification, such as topics-based
subscription and notification. Additionally, the pluggable service can deal with
multi-tenancy, authentication, and authorization issues.</para>
</listitem>
</itemizedlist>
<formalpara>
<title>Configuration</title>
<para>As a &PRODUCT; administrator, perform the following one-time configuration to enable event
notification framework. At run time no changes can control the behaviour.</para>
</formalpara>
<orderedlist>
<listitem>
<para>Open <filename>'componentContext.xml</filename>.</para>
</listitem>
<listitem>
<para>Define a bean named <filename>eventNotificationBus</filename> as follows:</para>
<itemizedlist>
<listitem>
<para>name : Specify a name for the bean.</para>
</listitem>
<listitem>
<para>server : The name or the IP address of the RabbitMQ AMQP server.</para>
</listitem>
<listitem>
<para>port : The port on which RabbitMQ server is running.</para>
</listitem>
<listitem>
<para>username : The username associated with the account to access the RabbitMQ
server.</para>
</listitem>
<listitem>
<para>password : The password associated with the username of the account to access the
RabbitMQ server.</para>
</listitem>
<listitem>
<para>exchange : The exchange name on the RabbitMQ server where &PRODUCT; events are
published.</para>
<para>A sample bean is given below:</para>
<programlisting>&lt;bean id="eventNotificationBus" class="org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus"&gt;
&lt;property name="name" value="eventNotificationBus"/&gt;
&lt;property name="server" value="127.0.0.1"/&gt;
&lt;property name="port" value="5672"/&gt;
&lt;property name="username" value="guest"/&gt;
&lt;property name="password" value="guest"/&gt;
&lt;property name="exchange" value="cloudstack-events"/&gt;
&lt;/bean&gt;</programlisting>
<para>The <filename>eventNotificationBus</filename> bean represents the
<filename>org.apache.cloudstack.mom.rabbitmq.RabbitMQEventBus</filename> class.</para>
</listitem>
</itemizedlist>
</listitem>
<listitem>
<para>Restart the Management Server.</para>
</listitem>
</orderedlist>
</section>

View File

@ -21,11 +21,17 @@
specific language governing permissions and limitations
under the License.
-->
<section id="events">
<title>Events</title>
<xi:include href="events-log.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="standard-events.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="long-running-job-events.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<xi:include href="event-log-queries.xml" xmlns:xi="http://www.w3.org/2001/XInclude" />
<title>Events</title>
<para>An event is essentially a significant or meaningful change in the state of both virtual and
physical resources associated with a cloud environment. Events are used by monitoring systems,
usage and billing systems, or any other event-driven workflow systems to discern a pattern and
make the right business decision. In &PRODUCT; an event could be a state change of virtual or
psychical resources, an action performed by an user (action events), or policy based events
(alerts).</para>
<xi:include href="events-log.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="event-framework.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="standard-events.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="long-running-job-events.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<xi:include href="event-log-queries.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
</section>

View File

@ -16,6 +16,6 @@
# specific language governing permissions and limitations
# under the License.
doc_url: "http://docs.cloudstack.org"
doc_url: "http://incubator.apache.org/cloudstack/docs"
prod_url: "http://cloudstack.org"

View File

@ -53,6 +53,15 @@
completeness or stability of the code, it does indicate that the project
has yet to be fully endorsed by the ASF.
</para>
<para>
CloudStack® is a registered trademark of the Apache Software Foundation.
</para>
<para>
Apache CloudStack, the CloudStack word design, the Apache CloudStack word design, and the cloud monkey logo are trademarks of the
Apache Software Foundation.
</para>
</legalnotice>

View File

@ -1,5 +1,6 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<!-- Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
@ -33,7 +34,7 @@
viewBox="0 0 299 100"
id="Layer_1"
xml:space="preserve"
inkscape:version="0.48.1 r9760"
inkscape:version="0.48.4 r9939"
sodipodi:docname="title_logo.svg"><sodipodi:namedview
pagecolor="#ffffff"
bordercolor="#666666"
@ -43,20 +44,20 @@
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1280"
inkscape:window-height="753"
inkscape:window-width="1366"
inkscape:window-height="748"
id="namedview59"
showgrid="false"
inkscape:zoom="4.4799236"
inkscape:cx="216.06083"
inkscape:cy="50"
inkscape:window-x="-3"
inkscape:window-x="-2"
inkscape:window-y="-3"
inkscape:window-maximized="1"
inkscape:current-layer="g3224" /><metadata
inkscape:current-layer="Layer_1" /><metadata
id="metadata3261"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs
rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title /></cc:Work></rdf:RDF></metadata><defs
id="defs3259" />
<pattern
overflow="visible"
@ -349,4 +350,21 @@
</svg>
<text
xml:space="preserve"
style="font-size:11.06608772px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#8a8a8a;fill-opacity:0.94901961;stroke:none;font-family:Sans"
x="267.11676"
y="14.821712"
id="text3035"
sodipodi:linespacing="125%"
transform="scale(1.0307429,0.97017406)"><tspan
sodipodi:role="line"
id="tspan3037"
x="267.11676"
y="14.821712">™</tspan></text>
</svg>

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -18,7 +18,7 @@
Name: publican-cloudstack
Summary: Common documentation files for Apache %{brand}
Version: 0.4
Version: 0.5
Release: 1%{?dist}
License: ASLv2
Group: Applications/Text
@ -49,12 +49,14 @@ rm -rf $RPM_BUILD_ROOT
%files
%defattr(-,root,root,-)
%doc README
%doc LICENSE
%doc NOTICE
%{_datadir}/publican/Common_Content/%{brand}
%changelog
* Tue Feb 29 2013 David Nalley <david@gnsa.us> 0.5-1
- adding trademark information
* Sun Sep 22 2012 David Nalley <david@gnsa.us> 0.4-1
- added 'Apache' to the description
- moved the issue tracker url to the ASF jira instance

View File

@ -19,6 +19,7 @@
package org.apache.cloudstack.engine.cloud.entity.api;
import java.util.List;
import java.util.Map;
import javax.ws.rs.BeanParam;
import javax.ws.rs.GET;
@ -36,6 +37,7 @@ import com.cloud.exception.CloudException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.vm.VirtualMachineProfile;
/**
@ -104,7 +106,7 @@ public interface VirtualMachineEntity extends CloudStackEntity {
* @param reservationId reservation id from reserve call.
*
*/
void deploy(String reservationId, String caller) throws InsufficientCapacityException, ResourceUnavailableException;
void deploy(String reservationId, String caller, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException;
/**
* Stop the virtual machine

View File

@ -14,14 +14,12 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.allocator;
package org.apache.cloudstack.engine.subsystem.api.storage;
import java.util.List;
import java.util.Set;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
import com.cloud.utils.component.Adapter;
import com.cloud.vm.DiskProfile;
@ -31,12 +29,6 @@ import com.cloud.vm.VirtualMachineProfile;
/**
*/
public interface StoragePoolAllocator extends Adapter {
//keeping since storageMgr is using this API for some existing functionalities
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, long dcId, long podId, Long clusterId, Long hostId, Set<? extends StoragePool> avoids, int returnUpTo);
String chooseStorageIp(VirtualMachine vm, Host host, Host storage);
/**
* Determines which storage pools are suitable for the guest virtual machine
*

View File

@ -25,6 +25,9 @@ import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.vm.VirtualMachineProfile;
import java.util.Map;
public interface VMEntityManager {
@ -34,7 +37,7 @@ public interface VMEntityManager {
String reserveVirtualMachine(VMEntityVO vmEntityVO, String plannerToUse, DeploymentPlan plan, ExcludeList exclude) throws InsufficientCapacityException, ResourceUnavailableException;
void deployVirtualMachine(String reservationId, String caller) throws InsufficientCapacityException, ResourceUnavailableException;
void deployVirtualMachine(String reservationId, String caller, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException;
boolean stopvirtualmachine(VMEntityVO vmEntityVO, String caller) throws ResourceUnavailableException;

View File

@ -58,6 +58,7 @@ import com.cloud.user.dao.UserDao;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.VirtualMachineProfileImpl;
import com.cloud.vm.dao.VMInstanceDao;
@ -188,7 +189,7 @@ public class VMEntityManagerImpl implements VMEntityManager {
}
@Override
public void deployVirtualMachine(String reservationId, String caller) throws InsufficientCapacityException, ResourceUnavailableException{
public void deployVirtualMachine(String reservationId, String caller, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException{
//grab the VM Id and destination using the reservationId.
VMReservationVO vmReservation = _reservationDao.findByReservationId(reservationId);
@ -208,7 +209,7 @@ public class VMEntityManagerImpl implements VMEntityManager {
DataCenterDeployment plan = new DataCenterDeployment(vm.getDataCenterId(), vmReservation.getPodId(), vmReservation.getClusterId(),
vmReservation.getHostId(), poolId , null);
VMInstanceVO vmDeployed = _itMgr.start(vm, null, _userDao.findById(new Long(caller)), _accountDao.findById(vm.getAccountId()), plan);
VMInstanceVO vmDeployed = _itMgr.start(vm, params, _userDao.findById(new Long(caller)), _accountDao.findById(vm.getAccountId()), plan);
}

View File

@ -33,6 +33,7 @@ import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.vm.VirtualMachineProfile;
@Component
public class VirtualMachineEntityImpl implements VirtualMachineEntity {
@ -40,14 +41,14 @@ public class VirtualMachineEntityImpl implements VirtualMachineEntity {
@Inject private VMEntityManager manager;
private VMEntityVO vmEntityVO;
public VirtualMachineEntityImpl() {
}
public void init(String vmId) {
this.vmEntityVO = this.manager.loadVirtualMachine(vmId);
this.vmEntityVO = this.manager.loadVirtualMachine(vmId);
}
public void init(String vmId, String owner, String hostName, String displayName, int cpu, int speed, long memory, List<String> computeTags, List<String> rootDiskTags, List<String> networks) {
init(vmId);
this.vmEntityVO.setOwner(owner);
@ -57,7 +58,7 @@ public class VirtualMachineEntityImpl implements VirtualMachineEntity {
this.vmEntityVO.setComputeTags(computeTags);
this.vmEntityVO.setRootDiskTags(rootDiskTags);
this.vmEntityVO.setNetworkIds(networks);
manager.saveVirtualMachine(vmEntityVO);
}
@ -204,8 +205,8 @@ public class VirtualMachineEntityImpl implements VirtualMachineEntity {
}
@Override
public void deploy(String reservationId, String caller) throws InsufficientCapacityException, ResourceUnavailableException{
manager.deployVirtualMachine(reservationId, caller);
public void deploy(String reservationId, String caller, Map<VirtualMachineProfile.Param, Object> params) throws InsufficientCapacityException, ResourceUnavailableException{
manager.deployVirtualMachine(reservationId, caller, params);
}
@Override

View File

@ -105,6 +105,29 @@
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
<executions>
<execution>
<id>generate-resource</id>
<phase>generate-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<copy
todir="${basedir}/target/test-classes/">
<fileset dir="${basedir}/../../../utils/conf/">
<include name="db.properties" />
</fileset>
</copy>
</target>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>

View File

@ -0,0 +1,439 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import javax.inject.Inject;
import junit.framework.Assert;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProvider;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreProviderManager;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mockito;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.DataCenter.NetworkType;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DataCenterDeployment;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.org.Cluster.ClusterType;
import com.cloud.org.Managed.ManagedState;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolDetailVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.StoragePoolDetailsDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.utils.component.ComponentContext;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachineProfile;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(locations = "classpath:/storageContext.xml")
public class StorageAllocatorTest {
@Inject
StoragePoolDao storagePoolDao;
@Inject
StorageManager storageMgr;
@Inject
DiskOfferingDao diskOfferingDao;
@Inject
VolumeDao volumeDao;
@Inject
HostPodDao podDao;
@Inject
ClusterDao clusterDao;
@Inject
DataCenterDao dcDao;
@Inject
StoragePoolDetailsDao poolDetailsDao;
@Inject
DataStoreProviderManager providerMgr;
Long dcId = 1l;
Long podId = 1l;
Long clusterId = 1l;
Long volumeId = null;
Long diskOfferingId = null;
Long storagePoolId = null;
VolumeVO volume = null;
DiskOfferingVO diskOffering = null;
StoragePoolVO storage = null;
@Before
public void setup() throws Exception {
ComponentContext.initComponentsLifeCycle();
}
protected void createDb() {
DataCenterVO dc = new DataCenterVO(UUID.randomUUID().toString(), "test", "8.8.8.8", null, "10.0.0.1", null, "10.0.0.1/24",
null, null, NetworkType.Basic, null, null, true, true, null, null);
dc = dcDao.persist(dc);
dcId = dc.getId();
HostPodVO pod = new HostPodVO(UUID.randomUUID().toString(), dc.getId(), "255.255.255.255", "", 8, "test");
pod = podDao.persist(pod);
podId = pod.getId();
ClusterVO cluster = new ClusterVO(dc.getId(), pod.getId(), "devcloud cluster");
cluster.setHypervisorType(HypervisorType.XenServer.toString());
cluster.setClusterType(ClusterType.CloudManaged);
cluster.setManagedState(ManagedState.Managed);
cluster = clusterDao.persist(cluster);
clusterId = cluster.getId();
DataStoreProvider provider = providerMgr.getDataStoreProvider("ancient primary data store provider");
storage = new StoragePoolVO();
storage.setDataCenterId(dcId);
storage.setPodId(podId);
storage.setPoolType(StoragePoolType.NetworkFilesystem);
storage.setClusterId(clusterId);
storage.setStatus(StoragePoolStatus.Up);
storage.setScope(ScopeType.CLUSTER);
storage.setAvailableBytes(1000);
storage.setCapacityBytes(20000);
storage.setHostAddress(UUID.randomUUID().toString());
storage.setPath(UUID.randomUUID().toString());
storage.setStorageProviderId(provider.getId());
storage = storagePoolDao.persist(storage);
storagePoolId = storage.getId();
storageMgr.createCapacityEntry(storage.getId());
diskOffering = new DiskOfferingVO();
diskOffering.setDiskSize(500);
diskOffering.setName("test-disk");
diskOffering.setSystemUse(false);
diskOffering.setUseLocalStorage(false);
diskOffering.setCustomized(false);
diskOffering.setRecreatable(false);
diskOffering = diskOfferingDao.persist(diskOffering);
diskOfferingId = diskOffering.getId();
volume = new VolumeVO(Volume.Type.ROOT, "volume", dcId, 1, 1, diskOffering.getId(), diskOffering.getDiskSize());
volume = volumeDao.persist(volume);
volumeId = volume.getId();
}
@Inject
List<StoragePoolAllocator> allocators;
@Test
public void testClusterAllocatorMultiplePools() {
Long newStorageId = null;
try {
createDb();
DataStoreProvider provider = providerMgr.getDataStoreProvider("ancient primary data store provider");
storage = new StoragePoolVO();
storage.setDataCenterId(dcId);
storage.setPodId(podId);
storage.setPoolType(StoragePoolType.NetworkFilesystem);
storage.setClusterId(clusterId);
storage.setStatus(StoragePoolStatus.Up);
storage.setScope(ScopeType.CLUSTER);
storage.setAvailableBytes(1000);
storage.setCapacityBytes(20000);
storage.setHostAddress(UUID.randomUUID().toString());
storage.setPath(UUID.randomUUID().toString());
storage.setStorageProviderId(provider.getId());
StoragePoolVO newStorage = storagePoolDao.persist(storage);
newStorageId = newStorage.getId();
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.size(), 1);
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
if (newStorageId != null) {
storagePoolDao.remove(newStorageId);
}
Assert.fail();
}
}
@Test
public void testClusterAllocator() {
try {
createDb();
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test
public void testClusterAllocatorWithTags() {
try {
createDb();
StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true");
poolDetailsDao.persist(detailVO);
DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
List<String> tags = new ArrayList<String>();
tags.add("high");
diskOff.setTagsArray(tags);
diskOfferingDao.update(diskOff.getId(), diskOff);
DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test
public void testClusterAllocatorWithWrongTag() {
try {
createDb();
StoragePoolDetailVO detailVO = new StoragePoolDetailVO(this.storagePoolId, "high", "true");
poolDetailsDao.persist(detailVO);
DiskOfferingVO diskOff = this.diskOfferingDao.findById(diskOffering.getId());
List<String> tags = new ArrayList<String>();
tags.add("low");
diskOff.setTagsArray(tags);
diskOfferingDao.update(diskOff.getId(), diskOff);
DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
foundAcct++;
}
}
if (foundAcct != 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test
public void testZoneWideStorageAllocator() {
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setScope(ScopeType.ZONE);
storagePoolDao.update(pool.getId(), pool);
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.KVM);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(vmProfile.getHypervisorType()).thenReturn(HypervisorType.KVM);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test
public void testPoolStateIsNotUp() {
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Maintenance);
storagePoolDao.update(pool.getId(), pool);
DiskProfile profile = new DiskProfile(volume, diskOffering, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct == 1) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
@Test
public void testLocalStorageAllocator() {
try {
createDb();
StoragePoolVO pool = storagePoolDao.findById(storagePoolId);
pool.setScope(ScopeType.HOST);
storagePoolDao.update(pool.getId(), pool);
DiskOfferingVO diskOff = diskOfferingDao.findById(diskOfferingId);
diskOff.setUseLocalStorage(true);
diskOfferingDao.update(diskOfferingId, diskOff);
DiskProfile profile = new DiskProfile(volume, diskOff, HypervisorType.XenServer);
VirtualMachineProfile vmProfile = Mockito.mock(VirtualMachineProfile.class);
Mockito.when(storageMgr.storagePoolHasEnoughSpace(
Mockito.anyListOf(Volume.class), Mockito.any(StoragePool.class))).thenReturn(true);
DeploymentPlan plan = new DataCenterDeployment(dcId, podId, clusterId, null, null, null);
int foundAcct = 0;
for (StoragePoolAllocator allocator : allocators) {
List<StoragePool> pools = allocator.allocateToPool(profile, vmProfile, plan, new ExcludeList(), 1);
if (!pools.isEmpty()) {
Assert.assertEquals(pools.get(0).getId(), storage.getId());
foundAcct++;
}
}
if (foundAcct > 1 || foundAcct == 0) {
Assert.fail();
}
} catch (Exception e) {
cleanDb();
Assert.fail();
}
}
protected void cleanDb() {
if (volumeId != null) {
volumeDao.remove(volumeId);
volumeId = null;
}
if (diskOfferingId != null) {
diskOfferingDao.remove(diskOfferingId);
diskOfferingId = null;
}
if (storagePoolId != null) {
storagePoolDao.remove(storagePoolId);
storagePoolId = null;
}
if (clusterId != null) {
clusterDao.remove(clusterId);
clusterId = null;
}
if (podId != null) {
podDao.remove(podId);
podId = null;
}
if (dcId != null) {
dcDao.remove(dcId);
dcId = null;
}
}
}

View File

@ -0,0 +1,79 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import java.io.IOException;
import org.apache.cloudstack.storage.allocator.StorageAllocatorTestConfiguration.Library;
import org.mockito.Mockito;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.ComponentScan;
import org.springframework.context.annotation.ComponentScan.Filter;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.FilterType;
import org.springframework.core.type.classreading.MetadataReader;
import org.springframework.core.type.classreading.MetadataReaderFactory;
import org.springframework.core.type.filter.TypeFilter;
import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
import com.cloud.configuration.dao.ConfigurationDaoImpl;
import com.cloud.dc.dao.ClusterDaoImpl;
import com.cloud.dc.dao.DataCenterDaoImpl;
import com.cloud.domain.dao.DomainDaoImpl;
import com.cloud.host.dao.HostDaoImpl;
import com.cloud.host.dao.HostDetailsDaoImpl;
import com.cloud.host.dao.HostTagsDaoImpl;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.StoragePoolDaoImpl;
import com.cloud.storage.dao.StoragePoolDetailsDaoImpl;
import com.cloud.storage.dao.VMTemplateDaoImpl;
import com.cloud.utils.component.SpringComponentScanUtils;
import com.cloud.vm.UserVmManager;
@Configuration
@ComponentScan(basePackageClasses={
StoragePoolDetailsDaoImpl.class,
StoragePoolDaoImpl.class,
VMTemplateDaoImpl.class,
HostDaoImpl.class,
DomainDaoImpl.class,
DataCenterDaoImpl.class,
},
includeFilters={@Filter(value=Library.class, type=FilterType.CUSTOM)},
useDefaultFilters=false
)
public class StorageAllocatorTestConfiguration {
@Bean
public UserVmManager UserVmManager() {
return Mockito.mock(UserVmManager.class);
}
@Bean
public StorageManager StorageManager() {
return Mockito.mock(StorageManager.class);
}
public static class Library implements TypeFilter {
@Override
public boolean match(MetadataReader mdr, MetadataReaderFactory arg1) throws IOException {
mdr.getClassMetadata().getClassName();
ComponentScan cs = StorageAllocatorTestConfiguration.class.getAnnotation(ComponentScan.class);
return SpringComponentScanUtils.includedInBasePackageClasses(mdr.getClassMetadata().getClassName(), cs);
}
}
}

View File

@ -20,6 +20,7 @@ import java.io.IOException;
import org.apache.cloudstack.acl.APIChecker;
import org.apache.cloudstack.engine.service.api.OrchestrationService;
import org.apache.cloudstack.framework.rpc.RpcProvider;
import org.apache.cloudstack.storage.HostEndpointRpcServer;
import org.apache.cloudstack.storage.endpoint.EndPointSelector;
import org.apache.cloudstack.storage.test.ChildTestConfiguration.Library;
@ -35,46 +36,55 @@ import org.springframework.core.type.filter.TypeFilter;
import com.cloud.agent.AgentManager;
import com.cloud.alert.AlertManager;
import com.cloud.capacity.dao.CapacityDaoImpl;
import com.cloud.cluster.ClusteredAgentRebalanceService;
import com.cloud.cluster.agentlb.dao.HostTransferMapDao;
import com.cloud.cluster.agentlb.dao.HostTransferMapDaoImpl;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.configuration.dao.ConfigurationDaoImpl;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.ClusterDetailsDaoImpl;
import com.cloud.dc.dao.ClusterDaoImpl;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.DataCenterDaoImpl;
import com.cloud.dc.dao.DataCenterIpAddressDaoImpl;
import com.cloud.dc.dao.DataCenterLinkLocalIpAddressDaoImpl;
import com.cloud.dc.dao.DataCenterVnetDaoImpl;
import com.cloud.dc.dao.DcDetailsDaoImpl;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.dc.dao.HostPodDaoImpl;
import com.cloud.dc.dao.PodVlanDaoImpl;
import com.cloud.domain.dao.DomainDao;
import com.cloud.domain.dao.DomainDaoImpl;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDetailsDao;
import com.cloud.host.dao.HostDaoImpl;
import com.cloud.host.dao.HostDetailsDaoImpl;
import com.cloud.host.dao.HostTagsDao;
import com.cloud.host.dao.HostTagsDaoImpl;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.server.auth.UserAuthenticator;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.service.dao.ServiceOfferingDaoImpl;
import com.cloud.storage.OCFS2ManagerImpl;
import com.cloud.storage.StorageManager;
import com.cloud.storage.VolumeManager;
import com.cloud.storage.dao.DiskOfferingDaoImpl;
import com.cloud.storage.dao.SnapshotDaoImpl;
import com.cloud.storage.dao.StoragePoolHostDaoImpl;
import com.cloud.storage.dao.StoragePoolWorkDaoImpl;
import com.cloud.storage.dao.VMTemplateDaoImpl;
import com.cloud.storage.dao.VMTemplateDetailsDao;
import com.cloud.storage.dao.VMTemplateDetailsDaoImpl;
import com.cloud.storage.dao.VMTemplateHostDaoImpl;
import com.cloud.storage.dao.VMTemplatePoolDaoImpl;
import com.cloud.storage.dao.VMTemplateZoneDao;
import com.cloud.storage.dao.VMTemplateZoneDaoImpl;
import com.cloud.storage.dao.VolumeDaoImpl;
import com.cloud.storage.dao.VolumeHostDaoImpl;
import com.cloud.storage.s3.S3Manager;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.storage.swift.SwiftManager;
import com.cloud.tags.dao.ResourceTagsDaoImpl;
import com.cloud.template.TemplateManager;
import com.cloud.user.dao.UserDaoImpl;
import com.cloud.utils.component.SpringComponentScanUtils;
import com.cloud.vm.VirtualMachineManager;
import com.cloud.vm.dao.ConsoleProxyDaoImpl;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.NicDaoImpl;
import com.cloud.vm.dao.SecondaryStorageVmDaoImpl;
import com.cloud.vm.dao.UserVmDaoImpl;
import com.cloud.vm.dao.UserVmDetailsDaoImpl;
import com.cloud.vm.dao.VMInstanceDaoImpl;
import com.cloud.vm.snapshot.dao.VMSnapshotDaoImpl;
@Configuration
@ComponentScan(basePackageClasses={
NicDaoImpl.class,
@ -85,88 +95,62 @@ import com.cloud.vm.dao.VMInstanceDaoImpl;
VMTemplatePoolDaoImpl.class,
ResourceTagsDaoImpl.class,
VMTemplateDaoImpl.class,
MockStorageMotionStrategy.class
MockStorageMotionStrategy.class,
ConfigurationDaoImpl.class,
ClusterDaoImpl.class,
HostPodDaoImpl.class,
VMTemplateZoneDaoImpl.class,
VMTemplateDetailsDaoImpl.class,
HostDaoImpl.class,
HostDetailsDaoImpl.class,
HostTagsDaoImpl.class,
HostTransferMapDaoImpl.class,
DataCenterIpAddressDaoImpl.class,
DataCenterLinkLocalIpAddressDaoImpl.class,
DataCenterVnetDaoImpl.class,
PodVlanDaoImpl.class,
DcDetailsDaoImpl.class,
DiskOfferingDaoImpl.class,
StoragePoolHostDaoImpl.class,
UserVmDaoImpl.class,
UserVmDetailsDaoImpl.class,
ServiceOfferingDaoImpl.class,
CapacityDaoImpl.class,
SnapshotDaoImpl.class,
VMSnapshotDaoImpl.class,
OCFS2ManagerImpl.class,
ClusterDetailsDaoImpl.class,
SecondaryStorageVmDaoImpl.class,
ConsoleProxyDaoImpl.class,
StoragePoolWorkDaoImpl.class,
UserDaoImpl.class
},
includeFilters={@Filter(value=Library.class, type=FilterType.CUSTOM)},
useDefaultFilters=false
)
public class ChildTestConfiguration extends TestConfiguration {
@Override
@Bean
public HostDao hostDao() {
HostDao dao = super.hostDao();
HostDao nDao = Mockito.spy(dao);
return nDao;
}
@Bean
public EndPointSelector selector() {
return Mockito.mock(EndPointSelector.class);
}
@Bean
public DataCenterDao dcDao() {
return new DataCenterDaoImpl();
}
@Bean
public HostDetailsDao hostDetailsDao() {
return new HostDetailsDaoImpl();
}
@Bean
public HostTagsDao hostTagsDao() {
return new HostTagsDaoImpl();
}
@Bean ClusterDao clusterDao() {
return new ClusterDaoImpl();
}
@Bean HostTransferMapDao hostTransferDao() {
return new HostTransferMapDaoImpl();
}
@Bean DataCenterIpAddressDaoImpl dataCenterIpAddressDaoImpl() {
return new DataCenterIpAddressDaoImpl();
}
@Bean DataCenterLinkLocalIpAddressDaoImpl dataCenterLinkLocalIpAddressDaoImpl() {
return new DataCenterLinkLocalIpAddressDaoImpl();
}
@Bean DataCenterVnetDaoImpl dataCenterVnetDaoImpl() {
return new DataCenterVnetDaoImpl();
}
@Bean PodVlanDaoImpl podVlanDaoImpl() {
return new PodVlanDaoImpl();
}
@Bean DcDetailsDaoImpl dcDetailsDaoImpl() {
return new DcDetailsDaoImpl();
}
@Bean HostPodDao hostPodDao() {
return new HostPodDaoImpl();
}
@Bean StoragePoolHostDao storagePoolHostDao() {
return new StoragePoolHostDaoImpl();
}
@Bean VMTemplateZoneDao templateZoneDao() {
return new VMTemplateZoneDaoImpl();
}
@Bean VMTemplateDetailsDao templateDetailsDao() {
return new VMTemplateDetailsDaoImpl();
}
@Bean ConfigurationDao configDao() {
return new ConfigurationDaoImpl();
}
@Bean
public AgentManager agentMgr() {
return new DirectAgentManagerSimpleImpl();
}
@Bean DomainDao domainDao() {
return new DomainDaoImpl();
}
@Bean
public HostEndpointRpcServer rpcServer() {
return new MockHostEndpointRpcServerDirectCallResource();
}
@Bean
public RpcProvider rpcProvider() {
return Mockito.mock(RpcProvider.class);
}
@Bean
public ClusteredAgentRebalanceService _rebalanceService() {
return Mockito.mock(ClusteredAgentRebalanceService.class);
@ -183,12 +167,50 @@ public class ChildTestConfiguration extends TestConfiguration {
public APIChecker apiChecker() {
return Mockito.mock(APIChecker.class);
}
@Bean
public TemplateManager templateMgr() {
return Mockito.mock(TemplateManager.class);
}
@Bean
public VolumeManager volumeMgr() {
return Mockito.mock(VolumeManager.class);
}
@Bean
public SwiftManager switfMgr() {
return Mockito.mock(SwiftManager.class);
}
@Bean
public ManagementServer server() {
return Mockito.mock(ManagementServer.class);
}
@Bean
public VirtualMachineManager vmMgr() {
return Mockito.mock(VirtualMachineManager.class);
}
@Bean
public S3Manager s3Mgr() {
return Mockito.mock(S3Manager.class);
}
@Bean
public SnapshotManager snapshotMgr() {
return Mockito.mock(SnapshotManager.class);
}
@Bean
public ResourceManager resourceMgr() {
return Mockito.mock(ResourceManager.class);
}
@Bean
public DomainRouterDao domainRouterDao() {
return Mockito.mock(DomainRouterDao.class);
}
@Bean
public StorageManager storageMgr() {
return Mockito.mock(StorageManager.class);
}
@Bean
public AlertManager alertMgr() {
return Mockito.mock(AlertManager.class);
@ -204,9 +226,5 @@ public class ChildTestConfiguration extends TestConfiguration {
}
}
/* @Override
@Bean
public PrimaryDataStoreDao primaryDataStoreDao() {
return Mockito.mock(PrimaryDataStoreDaoImpl.class);
}*/
}

View File

@ -16,16 +16,8 @@
// under the License.
package org.apache.cloudstack.storage.test;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import com.cloud.host.dao.HostDao;
import com.cloud.host.dao.HostDaoImpl;
@Configuration
public class TestConfiguration {
@Bean
public HostDao hostDao() {
return new HostDaoImpl();
}
}

View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. The ASF licenses this file to
you under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required
by applicable law or agreed to in writing, software distributed under the
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
OF ANY KIND, either express or implied. See the License for the specific
language governing permissions and limitations under the License. -->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
xmlns:tx="http://www.springframework.org/schema/tx" xmlns:aop="http://www.springframework.org/schema/aop"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
http://www.springframework.org/schema/tx
http://www.springframework.org/schema/tx/spring-tx-3.0.xsd
http://www.springframework.org/schema/aop
http://www.springframework.org/schema/aop/spring-aop-3.0.xsd
http://www.springframework.org/schema/context
http://www.springframework.org/schema/context/spring-context-3.0.xsd">
<context:annotation-config />
<context:component-scan base-package="org.apache.cloudstack.storage" />
<!-- @DB support -->
<aop:config proxy-target-class="true">
<aop:aspect id="dbContextBuilder" ref="transactionContextBuilder">
<aop:pointcut id="captureAnyMethod" expression="execution(* *(..))" />
<aop:around pointcut-ref="captureAnyMethod" method="AroundAnyMethod" />
</aop:aspect>
</aop:config>
<bean id="transactionContextBuilder" class="com.cloud.utils.db.TransactionContextBuilder" />
<bean id="componentContext" class="com.cloud.utils.component.ComponentContext"/>
<bean class="org.apache.cloudstack.storage.test.ChildTestConfiguration" />
<bean class="org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor">
<property name="requiredParameterValue" value="false" />
</bean>
</beans>

View File

@ -23,10 +23,8 @@
<context:annotation-config />
<context:component-scan
base-package="org.apache.cloudstack.storage" />
<context:component-scan base-package="com.cloud.utils.db" />
<context:component-scan base-package="com.cloud.utils.component" />
<!-- @DB support -->
<aop:config proxy-target-class="true">
<aop:aspect id="dbContextBuilder" ref="transactionContextBuilder">
<aop:pointcut id="captureAnyMethod" expression="execution(* *(..))" />
@ -34,50 +32,12 @@
<aop:around pointcut-ref="captureAnyMethod" method="AroundAnyMethod" />
</aop:aspect>
<aop:aspect id="actionEventInterceptorAspect" ref="actionEventInterceptor">
<aop:pointcut id="captureEventMethod"
expression="execution(* *(..)) and @annotation(com.cloud.event.ActionEvent)" />
<aop:around pointcut-ref="captureEventMethod" method="AroundAnyMethod" />
</aop:aspect>
</aop:config>
<bean id="transactionContextBuilder" class="com.cloud.utils.db.TransactionContextBuilder" />
<bean id="actionEventInterceptor" class="com.cloud.event.ActionEventInterceptor" />
<bean id="componentContext" class="com.cloud.utils.component.ComponentContext"/>
<bean class="org.apache.cloudstack.storage.test.ChildTestConfiguration" />
<bean id="onwireRegistry"
class="org.apache.cloudstack.framework.serializer.OnwireClassRegistry"
init-method="scan">
<property name="packages">
<list>
<value>org.apache.cloudstack.framework</value>
</list>
</property>
<bean class="org.springframework.beans.factory.annotation.AutowiredAnnotationBeanPostProcessor">
<property name="requiredParameterValue" value="false" />
</bean>
<bean id="messageSerializer"
class="org.apache.cloudstack.framework.serializer.JsonMessageSerializer">
<property name="onwireClassRegistry" ref="onwireRegistry" />
</bean>
<bean id="transportProvider"
class="org.apache.cloudstack.framework.server.ServerTransportProvider"
init-method="initialize">
<property name="workerPoolSize" value="5" />
<property name="nodeId" value="Node1" />
<property name="messageSerializer" ref="messageSerializer" />
</bean>
<bean id="rpcProvider" class="org.apache.cloudstack.framework.rpc.RpcProviderImpl"
init-method="initialize">
<constructor-arg ref="transportProvider" />
<property name="messageSerializer" ref="messageSerializer" />
</bean>
<bean id="eventBus" class="org.apache.cloudstack.framework.eventbus.EventBusBase" />
</beans>

View File

@ -40,24 +40,24 @@ public class HypervsiorHostEndPointRpcServer implements HostEndpointRpcServer {
private static final Logger s_logger = Logger.getLogger(HypervsiorHostEndPointRpcServer.class);
@Inject
private RpcProvider _rpcProvider;
private RpcProvider rpcProvider;
public HypervsiorHostEndPointRpcServer() {
}
public HypervsiorHostEndPointRpcServer(RpcProvider rpcProvider) {
_rpcProvider = rpcProvider;
_rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
rpcProvider = rpcProvider;
rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@PostConstruct
public void Initialize() {
_rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
rpcProvider.registerRpcServiceEndpoint(RpcServiceDispatcher.getDispatcher(this));
}
@Override
public void sendCommandAsync(HypervisorHostEndPoint host, final Command command, final AsyncCompletionCallback<Answer> callback) {
_rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener<Answer>() {
rpcProvider.newCall(host.getHostAddr()).addCallbackListener(new RpcCallbackListener<Answer>() {
@Override
public void onSuccess(Answer result) {
callback.complete(result);

View File

@ -0,0 +1,192 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.Volume.Type;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.storage.dao.VolumeDao;
import com.cloud.user.Account;
import com.cloud.utils.NumbersUtil;
import com.cloud.utils.component.AdapterBase;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
public abstract class AbstractStoragePoolAllocator extends AdapterBase implements StoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(AbstractStoragePoolAllocator.class);
@Inject StorageManager storageMgr;
protected @Inject StoragePoolDao _storagePoolDao;
@Inject VolumeDao _volumeDao;
@Inject ConfigurationDao _configDao;
@Inject ClusterDao _clusterDao;
protected @Inject DataStoreManager dataStoreMgr;
protected BigDecimal _storageOverprovisioningFactor = new BigDecimal(1);
long _extraBytesPerVolume = 0;
Random _rand;
boolean _dontMatter;
protected String _allocationAlgorithm = "random";
@Inject
DiskOfferingDao _diskOfferingDao;
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
Map<String, String> configs = _configDao.getConfiguration(null, params);
String globalStorageOverprovisioningFactor = configs.get("storage.overprovisioning.factor");
_storageOverprovisioningFactor = new BigDecimal(NumbersUtil.parseFloat(globalStorageOverprovisioningFactor, 2.0f));
_extraBytesPerVolume = 0;
_rand = new Random(System.currentTimeMillis());
_dontMatter = Boolean.parseBoolean(configs.get("storage.overwrite.provisioning"));
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
if (allocationAlgorithm != null) {
_allocationAlgorithm = allocationAlgorithm;
}
return true;
}
protected abstract List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo);
@Override
public
List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<StoragePool> pools = select(dskCh, vmProfile, plan, avoid, returnUpTo);
return reOrder(pools, vmProfile, plan);
}
protected List<StoragePool> reorderPoolsByNumberOfVolumes(DeploymentPlan plan, List<StoragePool> pools, Account account) {
if(account == null){
return pools;
}
long dcId = plan.getDataCenterId();
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
List<Long> poolIdsByVolCount = _volumeDao.listPoolIdsByVolumeCount(dcId, podId, clusterId, account.getAccountId());
if (s_logger.isDebugEnabled()) {
s_logger.debug("List of pools in ascending order of number of volumes for account id: "+ account.getAccountId() + " is: "+ poolIdsByVolCount);
}
//now filter the given list of Pools by this ordered list
Map<Long, StoragePool> poolMap = new HashMap<Long, StoragePool>();
for (StoragePool pool : pools) {
poolMap.put(pool.getId(), pool);
}
List<Long> matchingPoolIds = new ArrayList<Long>(poolMap.keySet());
poolIdsByVolCount.retainAll(matchingPoolIds);
List<StoragePool> reorderedPools = new ArrayList<StoragePool>();
for(Long id: poolIdsByVolCount){
reorderedPools.add(poolMap.get(id));
}
return reorderedPools;
}
protected List<StoragePool> reOrder(List<StoragePool> pools,
VirtualMachineProfile<? extends VirtualMachine> vmProfile,
DeploymentPlan plan) {
Account account = null;
if(vmProfile.getVirtualMachine() != null){
account = vmProfile.getOwner();
}
if(_allocationAlgorithm.equals("random") || _allocationAlgorithm.equals("userconcentratedpod_random") || (account == null)) {
// Shuffle this so that we don't check the pools in the same order.
Collections.shuffle(pools);
}else if(_allocationAlgorithm.equals("userdispersing")){
pools = reorderPoolsByNumberOfVolumes(plan, pools, account);
}
return pools;
}
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
DeploymentPlan plan) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Checking if storage pool is suitable, name: " + pool.getName()+ " ,poolId: "+ pool.getId());
}
if (avoid.shouldAvoid(pool)) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool is in avoid set, skipping this pool");
}
return false;
}
if(dskCh.getType().equals(Type.ROOT) && pool.getPoolType().equals(StoragePoolType.Iscsi)){
if (s_logger.isDebugEnabled()) {
s_logger.debug("Disk needed for ROOT volume, but StoragePoolType is Iscsi, skipping this and trying other available pools");
}
return false;
}
DiskOfferingVO diskOffering = _diskOfferingDao.findById(dskCh.getDiskOfferingId());
if (diskOffering.getSystemUse() && pool.getPoolType() == StoragePoolType.RBD) {
s_logger.debug("Skipping RBD pool " + pool.getName() + " as a suitable pool. RBD is not supported for System VM's");
return false;
}
Long clusterId = pool.getClusterId();
ClusterVO cluster = _clusterDao.findById(clusterId);
if (!(cluster.getHypervisorType() == dskCh.getHypersorType())) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("StoragePool's Cluster does not have required hypervisorType, skipping this pool");
}
return false;
}
// check capacity
Volume volume = _volumeDao.findById(dskCh.getVolumeId());
List<Volume> requestVolumes = new ArrayList<Volume>();
requestVolumes.add(volume);
return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
}
}

View File

@ -0,0 +1,105 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.offering.ServiceOffering;
import com.cloud.storage.StoragePool;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Component
@Local(value=StoragePoolAllocator.class)
public class ClusterScopeStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(ClusterScopeStoragePoolAllocator.class);
protected String _allocationAlgorithm = "random";
@Inject
DiskOfferingDao _diskOfferingDao;
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
long dcId = plan.getDataCenterId();
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
if(dskCh.getTags() != null && dskCh.getTags().length != 0){
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId + " having tags:" + Arrays.toString(dskCh.getTags()));
}else{
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
}
List<StoragePoolVO> pools = _storagePoolDao.findPoolsByTags(dcId, podId, clusterId, dskCh.getTags());
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
String storageType = dskCh.useLocalStorage() ? ServiceOffering.StorageType.local.toString() : ServiceOffering.StorageType.shared.toString();
s_logger.debug("No storage pools available for " + storageType + " volume allocation, returning");
}
return suitablePools;
}
for (StoragePoolVO pool: pools) {
if(suitablePools.size() == returnUpTo){
break;
}
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("FirstFitStoragePoolAllocator returning "+suitablePools.size() +" suitable storage pools");
}
return suitablePools;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
if (_configDao != null) {
Map<String, String> configs = _configDao.getConfiguration(params);
String allocationAlgorithm = configs.get("vm.allocation.algorithm");
if (allocationAlgorithm != null) {
_allocationAlgorithm = allocationAlgorithm;
}
}
return true;
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.allocator;
package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@ -23,8 +23,8 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.deploy.DeploymentPlan;
@ -36,32 +36,18 @@ import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Component
@Local(value=StoragePoolAllocator.class)
public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(GarbageCollectingStoragePoolAllocator.class);
StoragePoolAllocator _firstFitStoragePoolAllocator;
StoragePoolAllocator _localStoragePoolAllocator;
@Inject StorageManager _storageMgr;
@Inject StorageManager storageMgr;
@Inject ConfigurationDao _configDao;
boolean _storagePoolCleanupEnabled;
@Override
public boolean allocatorIsCorrectType(DiskProfile dskCh) {
return true;
}
public Integer getStorageOverprovisioningFactor() {
return null;
}
public Long getExtraBytesPerVolume() {
return null;
}
@Override
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
if (!_storagePoolCleanupEnabled) {
s_logger.debug("Storage pool cleanup is not enabled, so GarbageCollectingStoragePoolAllocator is being skipped.");
@ -69,10 +55,10 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
}
// Clean up all storage pools
_storageMgr.cleanupStorage(false);
storageMgr.cleanupStorage(false);
// Determine what allocator to use
StoragePoolAllocator allocator;
if (localStorageAllocationNeeded(dskCh)) {
if (dskCh.useLocalStorage()) {
allocator = _localStoragePoolAllocator;
} else {
allocator = _firstFitStoragePoolAllocator;
@ -88,7 +74,7 @@ public class GarbageCollectingStoragePoolAllocator extends AbstractStoragePoolAl
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_firstFitStoragePoolAllocator = ComponentContext.inject(FirstFitStoragePoolAllocator.class);
_firstFitStoragePoolAllocator = ComponentContext.inject(ClusterScopeStoragePoolAllocator.class);
_firstFitStoragePoolAllocator.configure("GCFirstFitStoragePoolAllocator", params);
_localStoragePoolAllocator = ComponentContext.inject(LocalStoragePoolAllocator.class);
_localStoragePoolAllocator.configure("GCLocalStoragePoolAllocator", params);

View File

@ -0,0 +1,126 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.utils.NumbersUtil;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
@Component
@Local(value = StoragePoolAllocator.class)
public class LocalStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(LocalStoragePoolAllocator.class);
@Inject
StoragePoolHostDao _poolHostDao;
@Inject
VMInstanceDao _vmInstanceDao;
@Inject
UserVmDao _vmDao;
@Inject
ServiceOfferingDao _offeringDao;
@Inject
CapacityDao _capacityDao;
@Inject
ConfigurationDao _configDao;
@Override
protected List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator trying to find storage pool to fit the vm");
}
// data disk and host identified from deploying vm (attach volume case)
if (dskCh.getType() == Volume.Type.DATADISK && plan.getHostId() != null) {
List<StoragePoolHostVO> hostPools = _poolHostDao.listByHostId(plan.getHostId());
for (StoragePoolHostVO hostPool: hostPools) {
StoragePoolVO pool = _storagePoolDao.findById(hostPool.getPoolId());
if (pool != null && pool.isLocal()) {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
s_logger.debug("Found suitable local storage pool " + pool.getId() + ", adding to list");
suitablePools.add(pol);
}
}
if (suitablePools.size() == returnUpTo) {
break;
}
}
} else {
List<StoragePoolVO> availablePools = _storagePoolDao.findLocalStoragePoolsByTags(plan.getDataCenterId(), plan.getPodId(), plan.getClusterId(), dskCh.getTags());
for (StoragePoolVO pool : availablePools) {
if (suitablePools.size() == returnUpTo) {
break;
}
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("LocalStoragePoolAllocator returning " + suitablePools.size() + " suitable storage pools");
}
return suitablePools;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
_storageOverprovisioningFactor = new BigDecimal(1);
_extraBytesPerVolume = NumbersUtil.parseLong((String) params.get("extra.bytes.per.volume"), 50 * 1024L * 1024L);
return true;
}
public LocalStoragePoolAllocator() {
}
}

View File

@ -14,7 +14,7 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.allocator;
package org.apache.cloudstack.storage.allocator;
import java.util.List;
import java.util.Map;
@ -23,23 +23,17 @@ import javax.ejb.Local;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.springframework.stereotype.Component;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.host.Host;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume.Type;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Component
@Local(value=StoragePoolAllocator.class)
public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implements StoragePoolAllocator {
@ -55,29 +49,13 @@ public class UseLocalForRootAllocator extends LocalStoragePoolAllocator implemen
return super.allocateToPool(dskCh, vmProfile, plan, avoid, returnUpTo);
}
@Override
public String chooseStorageIp(VirtualMachine vm, Host host, Host storage) {
return null;
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
super.configure(name, params);
return true;
}
@Override
protected boolean localStorageAllocationNeeded(DiskProfile dskCh) {
if (dskCh.getType() == Type.ROOT) {
return true;
} else if (dskCh.getType() == Type.DATADISK) {
return false;
} else {
return super.localStorageAllocationNeeded(dskCh);
}
}
protected UseLocalForRootAllocator() {
}
}

View File

@ -0,0 +1,80 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.allocator;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.StoragePool;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolDao;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Component
public class ZoneWideStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(ZoneWideStoragePoolAllocator.class);
@Inject StoragePoolDao _storagePoolDao;
@Inject DataStoreManager dataStoreMgr;
@Override
protected boolean filter(ExcludeList avoid, StoragePool pool, DiskProfile dskCh,
DeploymentPlan plan) {
Volume volume = _volumeDao.findById(dskCh.getVolumeId());
List<Volume> requestVolumes = new ArrayList<Volume>();
requestVolumes.add(volume);
return storageMgr.storagePoolHasEnoughSpace(requestVolumes, pool);
}
@Override
protected List<StoragePool> select(DiskProfile dskCh,
VirtualMachineProfile<? extends VirtualMachine> vmProfile,
DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
HypervisorType hypervisor = vmProfile.getHypervisorType();
if (hypervisor != null) {
if (hypervisor != HypervisorType.KVM) {
s_logger.debug("Only kvm supports zone wide storage");
return suitablePools;
}
}
List<StoragePoolVO> storagePools = _storagePoolDao.findZoneWideStoragePoolsByTags(plan.getDataCenterId(), dskCh.getTags());
for (StoragePoolVO storage : storagePools) {
if (suitablePools.size() == returnUpTo) {
break;
}
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(storage.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}
return suitablePools;
}
}

View File

@ -100,7 +100,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
@Inject
ConfigurationDao configDao;
@Inject
StorageManager storagMgr;
StorageManager storageMgr;
@Inject
VolumeDao volDao;
@Inject
@ -149,7 +149,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CopyVolumeAnswer cvAnswer = null;
String errMsg = null;
try {
cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool,
cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool,
cvCmd);
} catch (StorageUnavailableException e1) {
s_logger.debug("Failed to copy volume " + srcData.getId() + " to "
@ -231,7 +231,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
"2.1");
Answer answer = null;
try {
answer = this.storagMgr.sendToPool(pool, cmd);
answer = this.storageMgr.sendToPool(pool, cmd);
} catch (StorageUnavailableException e) {
} finally {
snapshotDao.unlockFromLockTable(snapshotId.toString());
@ -268,7 +268,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
+ snapshotId
+ " due to this snapshot is being used, try it later ");
}
answer = (CreateVolumeFromSnapshotAnswer) this.storagMgr
answer = (CreateVolumeFromSnapshotAnswer) this.storageMgr
.sendToPool(pool, createVolumeFromSnapshotCommand);
if (answer != null && answer.getResult()) {
vdiUUID = answer.getVdi();
@ -306,7 +306,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
StoragePool pool = (StoragePool)volume.getDataStore();
String errMsg = null;
try {
answer = storagMgr.sendToPool(pool, null, cmd);
answer = storageMgr.sendToPool(pool, null, cmd);
} catch (StorageUnavailableException e) {
s_logger.debug("Failed to send to storage pool", e);
throw new CloudRuntimeException("Failed to send to storage pool", e);
@ -358,7 +358,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
_copyvolumewait);
CopyVolumeAnswer cvAnswer;
try {
cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(srcPool, cvCmd);
cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(srcPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from the source primary storage pool to secondary storage.",
@ -376,7 +376,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
secondaryStorageVolumePath, destPool,
secondaryStorageURL, false, _copyvolumewait);
try {
cvAnswer = (CopyVolumeAnswer) this.storagMgr.sendToPool(destPool, cvCmd);
cvAnswer = (CopyVolumeAnswer) this.storageMgr.sendToPool(destPool, cvCmd);
} catch (StorageUnavailableException e1) {
throw new CloudRuntimeException(
"Failed to copy the volume from secondary storage to the destination primary storage pool.");
@ -464,7 +464,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
Long volumeId = snapshot.getVolumeId();
String origTemplateInstallPath = null;
List<StoragePoolVO> pools = this.storagMgr
List<StoragePoolVO> pools = this.storageMgr
.ListByDataCenterHypervisor(zoneId,
snapshot.getHypervisorType());
if (pools == null || pools.size() == 0) {
@ -516,7 +516,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
}
Answer answer = null;
try {
answer = this.storagMgr.sendToPool(pool, cmd);
answer = this.storageMgr.sendToPool(pool, cmd);
cmd = null;
} catch (StorageUnavailableException e) {
} finally {
@ -557,7 +557,7 @@ public class AncientDataMotionStrategy implements DataMotionStrategy {
CreatePrivateTemplateAnswer answer = null;
try {
answer = (CreatePrivateTemplateAnswer) this.storagMgr.sendToPool(
answer = (CreatePrivateTemplateAnswer) this.storageMgr.sendToPool(
pool, cmd);
} catch (StorageUnavailableException e) {
throw new CloudRuntimeException(

View File

@ -56,6 +56,7 @@ import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.Host;
import com.cloud.host.HostVO;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.resource.ResourceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.OCFS2Manager;
@ -94,10 +95,10 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.VMInstanceDao;
public class AncientPrimaryDataStoreLifeCyclImpl implements
public class AncientPrimaryDataStoreLifeCycleImpl implements
PrimaryDataStoreLifeCycle {
private static final Logger s_logger = Logger
.getLogger(AncientPrimaryDataStoreLifeCyclImpl.class);
.getLogger(AncientPrimaryDataStoreLifeCycleImpl.class);
@Inject
protected ResourceManager _resourceMgr;
protected List<StoragePoolDiscoverer> _discoverers;
@ -134,9 +135,6 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
protected StoragePoolHostDao _storagePoolHostDao;
@Inject
protected AlertManager _alertMgr;
@Inject
protected ConsoleProxyDao _consoleProxyDao;
@ -223,10 +221,6 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
}
pool = new StoragePoolVO(StoragePoolType.NetworkFilesystem,
storageHost, port, hostPath);
if (clusterId == null) {
throw new IllegalArgumentException(
"NFS need to have clusters specified for XenServers");
}
} else if (scheme.equalsIgnoreCase("file")) {
if (port == -1) {
port = 0;
@ -466,7 +460,18 @@ public class AncientPrimaryDataStoreLifeCyclImpl implements
@Override
public boolean attachZone(DataStore dataStore, ZoneScope scope) {
StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
List<HostVO> hosts = _resourceMgr.listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType.KVM, scope.getScopeId());
for (HostVO host : hosts) {
try {
this.storageMgr.connectHostToSharedPool(host.getId(),
dataStore.getId());
} catch (Exception e) {
s_logger.warn("Unable to establish a connection between " + host
+ " and " + dataStore, e);
}
}
StoragePoolVO pool = this.primaryDataStoreDao.findById(dataStore.getId());
pool.setScope(ScopeType.ZONE);
pool.setStatus(StoragePoolStatus.Up);
this.primaryDataStoreDao.update(pool.getId(), pool);

View File

@ -27,7 +27,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.HypervisorHostListener
import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreDriver;
import org.apache.cloudstack.storage.datastore.PrimaryDataStoreProviderManager;
import org.apache.cloudstack.storage.datastore.driver.AncientPrimaryDataStoreDriverImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCyclImpl;
import org.apache.cloudstack.storage.datastore.lifecycle.AncientPrimaryDataStoreLifeCycleImpl;
import org.springframework.stereotype.Component;
import com.cloud.utils.component.ComponentContext;
@ -55,7 +55,7 @@ public class AncientPrimaryDataStoreProviderImpl implements
@Override
public boolean configure(Map<String, Object> params) {
lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCyclImpl.class);
lifecyle = ComponentContext.inject(AncientPrimaryDataStoreLifeCycleImpl.class);
driver = ComponentContext.inject(AncientPrimaryDataStoreDriverImpl.class);
uuid = (String)params.get("uuid");
id = (Long)params.get("id");

View File

@ -19,15 +19,13 @@
package org.apache.cloudstack.framework.events;
import com.cloud.utils.component.Adapter;
import java.util.UUID;
/**
* Interface to publish and subscribe to CloudStack events
*
*/
public interface EventBus extends Adapter{
public interface EventBus {
/**
* publish an event on to the event bus

View File

@ -67,6 +67,10 @@
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-server</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.geronimo.specs</groupId>
<artifactId>geronimo-servlet_3.0_spec</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>

View File

@ -317,7 +317,7 @@ fi
%post awsapi
if [ -d "%{_datadir}/%{name}-management" ] ; then
ln %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
ln -s %{_datadir}/%{name}-bridge/webapps %{_datadir}/%{name}-management/webapps7080
fi
#No default permission as the permission setup is complex

167
packaging/debian/init/cloud-agent Executable file
View File

@ -0,0 +1,167 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: cloud agent
# Required-Start: $network $local_fs
# Required-Stop: $network $local_fs
# Default-Start: 3 4 5
# Default-Stop: 0 1 2 6
# Short-Description: Start/stop Apache CloudStack Agent
# Description: This scripts Starts/Stops the Apache CloudStack agent
## The CloudStack Agent is a part of the Apache CloudStack project and is used
## for managing KVM-based Hypervisors and performing secondary storage tasks inside
## the Secondary Storage System Virtual Machine.
## JSVC (Java daemonizing) is used for starting and stopping the agent
### END INIT INFO
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
. /lib/lsb/init-functions
SHORTNAME="cloud-agent"
PIDFILE=/var/run/"$SHORTNAME".pid
LOCKFILE=/var/lock/subsys/"$SHORTNAME"
PROGNAME="CloudStack Agent"
CLASS="com.cloud.agent.AgentShell"
PROG="jsvc"
DAEMON="/usr/bin/jsvc"
SHUTDOWN_WAIT="30"
unset OPTIONS
[ -r /etc/default/"$SHORTNAME" ] && source /etc/default/"$SHORTNAME"
# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
JDK_DIRS="/usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun /usr/lib/jvm/java-1.5.0-sun /usr/lib/j2sdk1.5-sun /usr/lib/j2sdk1.5-ibm"
for jdir in $JDK_DIRS; do
if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
JAVA_HOME="$jdir"
fi
done
export JAVA_HOME
# We need to append the JSVC daemon JAR to the classpath
# AgentShell implements the JSVC daemon methods
export CLASSPATH="/usr/share/java/commons-daemon.jar:/usr/share/java/cloud-agent-kvm.jar:/etc/cloud/agent"
wait_for_network() {
i=1
while [ $i -lt 10 ]
do
# Under Ubuntu and Debian libvirt by default creates a bridge called virbr0.
# That's why we want more then 3 lines back from brctl, so that there is a manually created bridge
if [ "$(brctl show|wc -l)" -gt 2 ]; then
break
else
sleep 1
let i=$i+1
continue
fi
done
}
start() {
if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_daemon_msg "$PROGNAME apparently already running"
log_end_msg 0
exit 0
fi
log_daemon_msg "Starting $PROGNAME" "$SHORTNAME"
if hostname --fqdn >/dev/null 2>&1 ; then
true
else
log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
log_end_msg 1
exit 1
fi
wait_for_network
if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -errfile SYSLOG -D jna.nosys=true $CLASS
RETVAL=$?
then
rc=0
sleep 1
if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_failure_msg "$PROG failed to start"
rc=1
fi
else
rc=1
fi
if [ $rc -eq 0 ]; then
log_end_msg 0
else
log_end_msg 1
rm -f "$PIDFILE"
fi
}
stop() {
count="0"
log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME"
killproc -p $PIDFILE $DAEMON
until [ "$count" -gt "$SHUTDOWN_WAIT" ]
do
agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
if [ "$?" -gt "0" ];then
break
fi
sleep 1
let count="${count}+1"
done
agentPid=$(ps aux|grep [j]svc|grep $SHORTNAME)
if [ "$?" -eq "0" ]; then
agentPid=$(ps aux|grep [j]svc|awk '{print $2}')
if [ "$agentPid" != "" ]; then
log_warning_msg "$PROG still running, forcing kill"
kill -9 $agentPid
fi
fi
log_end_msg $?
rm -f "$PIDFILE"
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
RETVAL=$?
;;
restart | force-reload)
stop
sleep 3
start
;;
*)
echo "Usage: $0 {start|stop|restart|force-reload|status}"
RETVAL=3
esac
exit $RETVAL

View File

@ -0,0 +1,244 @@
#!/bin/sh
#
# /etc/init.d/tomcat6 -- startup script for the Tomcat 6 servlet engine
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
### BEGIN INIT INFO
# Provides: tomcat-vmops
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Should-Start: $named
# Should-Stop: $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start Tomcat (CloudStack).
# Description: Start the Tomcat servlet engine that runs the CloudStack Management Server.
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin
NAME=cloud-management
DESC="CloudStack-specific Tomcat servlet engine"
DAEMON=/usr/bin/jsvc
CATALINA_HOME=/usr/share/cloud/management
DEFAULT=/etc/cloud/management/tomcat6.conf
JVM_TMP=/tmp/$NAME-temp
# We have to explicitly set the HOME variable to the homedir from the user "cloud"
# This is because various scripts run by the management server read the HOME variable
# and fail when this init script is run manually.
HOME=$(echo ~cloud)
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
# Make sure tomcat is started with system locale
if [ -r /etc/default/locale ]; then
. /etc/default/locale
export LANG
fi
. /lib/lsb/init-functions
. /etc/default/rcS
# The following variables can be overwritten in $DEFAULT
# Run Tomcat 6 as this user ID
TOMCAT6_USER=tomcat6
# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not
# defined in $DEFAULT)
JDK_DIRS="/usr/lib/jvm/java-1.6.0-openjdk-amd64/ /usr/lib/jvm/java-1.6.0-openjdk-i386/ /usr/lib/jvm/java-1.6.0-openjdk/ /usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-sun"
# Look for the right JVM to use
for jdir in $JDK_DIRS; do
if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
JAVA_HOME="$jdir"
fi
done
export JAVA_HOME
# Directory for per-instance configuration files and webapps
CATALINA_BASE=/usr/share/cloud/management
# Use the Java security manager? (yes/no)
TOMCAT6_SECURITY=no
# Default Java options
# Set java.awt.headless=true if JAVA_OPTS is not set so the
# Xalan XSL transformer can work without X11 display on JDK 1.4+
# It also looks like the default heap size of 64M is not enough for most cases
# so the maximum heap size is set to 128M
if [ -z "$JAVA_OPTS" ]; then
JAVA_OPTS="-Djava.awt.headless=true -Xmx128M"
fi
# End of variables that can be overwritten in $DEFAULT
# overwrite settings from default file
if [ -f "$DEFAULT" ]; then
. "$DEFAULT"
fi
if [ ! -f "$CATALINA_HOME/bin/bootstrap.jar" ]; then
log_failure_msg "$NAME is not installed"
exit 1
fi
[ -f "$DAEMON" ] || exit 0
POLICY_CACHE="$CATALINA_BASE/work/catalina.policy"
JAVA_OPTS="$JAVA_OPTS -Djava.endorsed.dirs=$CATALINA_HOME/endorsed -Dcatalina.base=$CATALINA_BASE -Dcatalina.home=$CATALINA_HOME -Djava.io.tmpdir=$JVM_TMP"
# Set the JSP compiler if set in the tomcat6.default file
if [ -n "$JSP_COMPILER" ]; then
JAVA_OPTS="$JAVA_OPTS -Dbuild.compiler=$JSP_COMPILER"
fi
if [ "$TOMCAT6_SECURITY" = "yes" ]; then
JAVA_OPTS="$JAVA_OPTS -Djava.security.manager -Djava.security.policy=$POLICY_CACHE"
fi
# Set juli LogManager if logging.properties is provided
if [ -r "$CATALINA_BASE"/conf/logging.properties ]; then
JAVA_OPTS="$JAVA_OPTS "-Djava.util.logging.manager=org.apache.juli.ClassLoaderLogManager" "-Djava.util.logging.config.file="$CATALINA_BASE/conf/logging.properties"
fi
# Define other required variables
CATALINA_PID="/var/run/$NAME.pid"
BOOTSTRAP_CLASS=org.apache.catalina.startup.Bootstrap
JSVC_CLASSPATH="/usr/share/java/commons-daemon.jar:$CATALINA_HOME/bin/bootstrap.jar"
JSVC_CLASSPATH=$CLASSPATH:$JSVC_CLASSPATH
# Look for Java Secure Sockets Extension (JSSE) JARs
if [ -z "${JSSE_HOME}" -a -r "${JAVA_HOME}/jre/lib/jsse.jar" ]; then
JSSE_HOME="${JAVA_HOME}/jre/"
fi
export JSSE_HOME
case "$1" in
start)
if [ -z "$JAVA_HOME" ]; then
log_failure_msg "no JDK found - please set JAVA_HOME"
exit 1
fi
if [ ! -d "$CATALINA_BASE/conf" ]; then
log_failure_msg "invalid CATALINA_BASE: $CATALINA_BASE"
exit 1
fi
log_daemon_msg "Starting $DESC" "$NAME"
if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
--user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
# Regenerate POLICY_CACHE file
umask 022
echo "// AUTO-GENERATED FILE from /etc/tomcat6/policy.d/" \
> "$POLICY_CACHE"
echo "" >> "$POLICY_CACHE"
if ls $CATALINA_BASE/conf/policy.d/*.policy > /dev/null 2>&1 ; then
cat $CATALINA_BASE/conf/policy.d/*.policy \
>> "$POLICY_CACHE"
fi
# Remove / recreate JVM_TMP directory
rm -rf "$JVM_TMP"
mkdir "$JVM_TMP" || {
log_failure_msg "could not create JVM temporary directory"
exit 1
}
chown $TOMCAT6_USER "$JVM_TMP"
cd "$JVM_TMP"
# fix storage issues on nfs mounts
umask 000
$DAEMON -user "$TOMCAT6_USER" -cp "$JSVC_CLASSPATH" \
-outfile SYSLOG -errfile SYSLOG \
-pidfile "$CATALINA_PID" $JAVA_OPTS "$BOOTSTRAP_CLASS"
sleep 5
if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
--user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
log_end_msg 1
else
log_end_msg 0
fi
else
log_progress_msg "(already running)"
log_end_msg 0
fi
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
--user "$TOMCAT6_USER" --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
log_progress_msg "(not running)"
else
$DAEMON -cp "$JSVC_CLASSPATH" -pidfile "$CATALINA_PID" \
-stop "$BOOTSTRAP_CLASS"
fi
rm -rf "$JVM_TMP"
log_end_msg 0
;;
status)
if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
--user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
if [ -f "$CATALINA_PID" ]; then
log_success_msg "$DESC is not running, but pid file exists."
exit 1
else
log_success_msg "$DESC is not running."
exit 3
fi
else
log_success_msg "$DESC is running with pid `cat $CATALINA_PID`"
fi
;;
restart|force-reload)
if start-stop-daemon --test --stop --pidfile "$CATALINA_PID" \
--user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
$0 stop
sleep 1
fi
$0 start
;;
try-restart)
if start-stop-daemon --test --start --pidfile "$CATALINA_PID" \
--user $TOMCAT6_USER --startas "$JAVA_HOME/bin/java" \
>/dev/null; then
$0 start
fi
;;
*)
log_success_msg "Usage: $0 {start|stop|restart|try-restart|force-reload|status}"
exit 1
;;
esac
exit 0

131
packaging/debian/init/cloud-usage Executable file
View File

@ -0,0 +1,131 @@
#!/bin/bash
### BEGIN INIT INFO
# Provides: cloud usage
# Required-Start: $network $local_fs
# Required-Stop: $network $local_fs
# Default-Start: 3 4 5
# Default-Stop: 0 1 2 6
# Short-Description: Start/stop Apache CloudStack Usage Monitor
# Description: This scripts Starts/Stops the Apache CloudStack Usage Monitor
## The CloudStack Usage Monitor is a part of the Apache CloudStack project and is used
## for storing usage statistics from instances.
## JSVC (Java daemonizing) is used for starting and stopping the usage monitor.
### END INIT INFO
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
. /lib/lsb/init-functions
SHORTNAME="cloud-usage"
PIDFILE=/var/run/"$SHORTNAME".pid
LOGFILE=/var/log/cloud/usage/usage-server.log
PROGNAME="CloudStack Usage Monitor"
CLASS="com.cloud.usage.UsageServer"
PROG="jsvc"
DAEMON="/usr/bin/jsvc"
USER=@MSUSER@
unset OPTIONS
[ -r @SYSCONFDIR@/default/"$SHORTNAME" ] && source @SYSCONFDIR@/default/"$SHORTNAME"
# The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
JDK_DIRS="/usr/lib/jvm/java-6-openjdk /usr/lib/jvm/java-6-openjdk-i386 /usr/lib/jvm/java-6-openjdk-amd64 /usr/lib/jvm/java-6-sun /usr/lib/jvm/jre-1.6.0 /usr/lib/j2sdk1.5-sun /usr/lib/jre-openjdk"
for jdir in $JDK_DIRS; do
if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
JAVA_HOME="$jdir"
fi
done
export JAVA_HOME
SCP="@SYSTEMCLASSPATH@"
DCP="@DEPSCLASSPATH@"
UCP="@USAGECLASSPATH@"
JCP="/usr/share/java/commons-daemon.jar"
# We need to append the JSVC daemon JAR to the classpath
# AgentShell implements the JSVC daemon methods
export CLASSPATH="$SCP:$DCP:$UCP:$JCP:@USAGESYSCONFDIR@"
start() {
if [ -s "$PIDFILE" ] && kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_daemon_msg "$PROGNAME apparently already running"
log_end_msg 0
exit 0
fi
log_daemon_msg "Starting $PROGNAME" "$SHORTNAME"
if hostname --fqdn >/dev/null 2>&1 ; then
true
else
log_failure_msg "The host name does not resolve properly to an IP address. Cannot start $PROGNAME"
log_end_msg 1
exit 1
fi
if start_daemon -p $PIDFILE $DAEMON -cp "$CLASSPATH" -pidfile "$PIDFILE" -user "$USER" -outfile SYSLOG -errfile SYSLOG -Dpid=$$ $CLASS
RETVAL=$?
then
rc=0
sleep 1
if ! kill -0 $(cat "$PIDFILE") >/dev/null 2>&1; then
log_failure_msg "$PROG failed to start"
rc=1
fi
else
rc=1
fi
if [ $rc -eq 0 ]; then
log_end_msg 0
else
log_end_msg 1
rm -f "$PIDFILE"
fi
}
stop() {
log_daemon_msg "Stopping $PROGNAME" "$SHORTNAME"
killproc -p $PIDFILE $DAEMON
log_end_msg $?
rm -f "$PIDFILE"
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status_of_proc -p "$PIDFILE" "$PROG" "$SHORTNAME"
RETVAL=$?
;;
restart | force-reload)
stop
sleep 3
start
;;
*)
echo "Usage: $0 {start|stop|restart|force-reload|status}"
RETVAL=3
esac
exit $RETVAL

View File

@ -37,12 +37,12 @@ CONFIGUREVARS=
DEPSCLASSPATH=
DOCDIR=
IPALOCATORLOG=/var/log/cloud/management/ipallocator.log
JAVADIR=/usr/share/java
JAVADIR=/usr/share/cloudstack-management/webapps/client/WEB-INF/lib
LIBEXECDIR=/usr/libexec
LOCKDIR=/var/lock
MSCLASSPATH=
MSCONF=/etc/cloud/management
MSENVIRON=/usr/share/cloud/management
MSENVIRON=/usr/share/cloudstack-management
MSLOG=/var/log/cloud/management/management-server.log
MSLOGDIR=/var/log/cloud/management/
MSMNTDIR=/var/lib/cloud/mnt
@ -52,7 +52,7 @@ PLUGINJAVADIR=
PREMIUMJAVADIR=
PYTHONDIR=/usr/lib/python2.6/site-packages/
SERVERSYSCONFDIR=/etc/cloud/server
SETUPDATADIR=/usr/share/cloud/setup
SETUPDATADIR=/usr/share/cloudstack-management/setup
SYSCONFDIR=/etc
SYSTEMCLASSPATH=
SYSTEMJARS=

View File

@ -22,6 +22,8 @@ do
while read line
do
ip=`echo $line|cut -d " " -f 2|cut -d "/" -f 1`
arping -I $i -A $ip -c 2 >> [RROUTER_LOG] 2>&1
arping -I $i -A $ip -c 1 >> [RROUTER_LOG] 2>&1
arping -I $i -A $ip -c 1 >> [RROUTER_LOG] 2>&1
done < /tmp/iplist_$i
done < /tmp/iflist
sleep 1

View File

@ -40,13 +40,17 @@ import java.util.concurrent.Executors;
public class RabbitMQEventBus extends ManagerBase implements EventBus {
// details of AMQP server
private static String _amqpHost;
private static Integer _port;
private static String _username;
private static String _password;
private static String amqpHost;
private static Integer port;
private static String username;
private static String password;
// AMQP exchange name where all CloudStack events will be published
private static String _amqpExchangeName;
private static String amqpExchangeName;
private String name;
private static Integer retryInterval;
// hashmap to book keep the registered subscribers
private static ConcurrentHashMap<String, Ternary<String, Channel, EventSubscriber>> _subscribers;
@ -58,59 +62,76 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
private static boolean _autoAck = true;
private ExecutorService executorService;
private String _name;
private static DisconnectHandler disconnectHandler;
private static Integer _retryInterval;
private static final Logger s_logger = Logger.getLogger(RabbitMQEventBus.class);
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
_amqpHost = (String) params.get("server");
if (_amqpHost == null || _amqpHost.isEmpty()) {
throw new ConfigurationException("Unable to get the AMQP server details");
}
_username = (String) params.get("username");
if (_username == null || _username.isEmpty()) {
throw new ConfigurationException("Unable to get the username details");
}
_password = (String) params.get("password");
if (_password == null || _password.isEmpty()) {
throw new ConfigurationException("Unable to get the password details");
}
_amqpExchangeName = (String) params.get("exchangename");
if (_amqpExchangeName == null || _amqpExchangeName.isEmpty()) {
throw new ConfigurationException("Unable to get the _exchange details on the AMQP server");
}
try {
String portStr = (String) params.get("port");
if (portStr == null || portStr.isEmpty()) {
if (amqpHost == null || amqpHost.isEmpty()) {
throw new ConfigurationException("Unable to get the AMQP server details");
}
if (username == null || username.isEmpty()) {
throw new ConfigurationException("Unable to get the username details");
}
if (password == null || password.isEmpty()) {
throw new ConfigurationException("Unable to get the password details");
}
if (amqpExchangeName == null || amqpExchangeName.isEmpty()) {
throw new ConfigurationException("Unable to get the _exchange details on the AMQP server");
}
if (port == null) {
throw new ConfigurationException("Unable to get the port details of AMQP server");
}
_port = Integer.parseInt(portStr);
String retryIntervalStr = (String) params.get("retryinterval");
if (retryIntervalStr == null || retryIntervalStr.isEmpty()) {
// default to 10s to try out reconnect
retryIntervalStr = "10000";
if (retryInterval == null) {
retryInterval = 10000;// default to 10s to try out reconnect
}
_retryInterval = Integer.parseInt(retryIntervalStr);
} catch (NumberFormatException e) {
throw new ConfigurationException("Invalid port number/retry interval");
}
_subscribers = new ConcurrentHashMap<String, Ternary<String, Channel, EventSubscriber>>();
executorService = Executors.newCachedThreadPool();
disconnectHandler = new DisconnectHandler();
_name = name;
return true;
}
public void setServer(String amqpHost) {
this.amqpHost = amqpHost;
}
public void setUsername(String username) {
this.username = username;
}
public void setPassword(String password) {
this.password = password;
}
public void setPort(Integer port) {
this.port = port;
}
public void setName(String name) {
this.name = name;
}
public void setExchange(String exchange) {
this.amqpExchangeName = exchange;
}
public void setRetryInterval(Integer retryInterval) {
this.retryInterval = retryInterval;
}
/** Call to subscribe to interested set of events
*
* @param topic defines category and type of the events being subscribed to
@ -141,9 +162,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
Channel channel = createChannel(connection);
// create a queue and bind it to the exchange with binding key formed from event topic
createExchange(channel, _amqpExchangeName);
createExchange(channel, amqpExchangeName);
channel.queueDeclare(queueName, false, false, false, null);
channel.queueBind(queueName, _amqpExchangeName, bindingKey);
channel.queueBind(queueName, amqpExchangeName, bindingKey);
// register a callback handler to receive the events that a subscriber subscribed to
channel.basicConsume(queueName, _autoAck, queueName,
@ -216,8 +237,8 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
try {
Connection connection = getConnection();
Channel channel = createChannel(connection);
createExchange(channel, _amqpExchangeName);
publishEventToExchange(channel, _amqpExchangeName, routingKey, eventDescription);
createExchange(channel, amqpExchangeName);
publishEventToExchange(channel, amqpExchangeName, routingKey, eventDescription);
channel.close();
} catch (AlreadyClosedException e) {
closeConnection();
@ -315,11 +336,11 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
private synchronized Connection createConnection() throws Exception {
try {
ConnectionFactory factory = new ConnectionFactory();
factory.setUsername(_username);
factory.setPassword(_password);
factory.setUsername(username);
factory.setPassword(password);
factory.setVirtualHost("/");
factory.setHost(_amqpHost);
factory.setPort(_port);
factory.setHost(amqpHost);
factory.setPort(port);
Connection connection = factory.newConnection();
connection.addShutdownListener(disconnectHandler);
_connection = connection;
@ -481,7 +502,7 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
while (!connected) {
try {
Thread.sleep(_retryInterval);
Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
// ignore timer interrupts
}
@ -504,9 +525,9 @@ public class RabbitMQEventBus extends ManagerBase implements EventBus {
* with binding key formed from event topic
*/
Channel channel = createChannel(connection);
createExchange(channel, _amqpExchangeName);
createExchange(channel, amqpExchangeName);
channel.queueDeclare(subscriberId, false, false, false, null);
channel.queueBind(subscriberId, _amqpExchangeName, bindingKey);
channel.queueBind(subscriberId, amqpExchangeName, bindingKey);
// register a callback handler to receive the events that a subscriber subscribed to
channel.basicConsume(subscriberId, _autoAck, subscriberId,

View File

@ -43,6 +43,11 @@
<artifactId>libvirt</artifactId>
<version>0.4.9</version>
</dependency>
<dependency>
<groupId>net.java.dev.jna</groupId>
<artifactId>jna</artifactId>
<version>3.5.1</version>
</dependency>
</dependencies>
<build>
<defaultGoal>install</defaultGoal>
@ -50,68 +55,23 @@
<testSourceDirectory>test</testSourceDirectory>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<version>2.3</version>
<configuration>
<finalName>kvm-agent</finalName>
<appendAssemblyId>false</appendAssemblyId>
<descriptors>
<descriptor>agent-descriptor.xml</descriptor>
</descriptors>
</configuration>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>make-agent</id>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>single</goal>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>${project.build.directory}/dependencies</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<artifactId>maven-resources-plugin</artifactId>
<version>2.6</version>
<executions>
<execution>
<id>copy-resources</id>
<!-- here the phase you need -->
<phase>package</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<outputDirectory>dist</outputDirectory>
<resources>
<resource>
<directory>target</directory>
<includes>
<include>kvm-agent.zip</include>
</includes>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-dependency-plugin</artifactId>
<version>2.5.1</version>
<executions>
<execution>
<id>copy-dependencies</id>
<phase>package</phase>
<goals>
<goal>copy-dependencies</goal>
</goals>
<configuration>
<outputDirectory>target/dependencies</outputDirectory>
<includeScope>runtime</includeScope>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -2934,7 +2934,21 @@ ServerResource {
vm.addComp(grd);
CpuTuneDef ctd = new CpuTuneDef();
ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed());
/**
A 4.0.X/4.1.X management server doesn't send the correct JSON
command for getMinSpeed, it only sends a 'speed' field.
So if getMinSpeed() returns null we fall back to getSpeed().
This way a >4.1 agent can work communicate a <=4.1 management server
This change is due to the overcommit feature in 4.2
*/
if (vmTO.getMinSpeed() != null) {
ctd.setShares(vmTO.getCpus() * vmTO.getMinSpeed());
} else {
ctd.setShares(vmTO.getCpus() * vmTO.getSpeed());
}
vm.addComp(ctd);
FeaturesDef features = new FeaturesDef();

View File

@ -0,0 +1,184 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.cloud.hypervisor.kvm.resource;
import org.junit.Test;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef;
import com.cloud.template.VirtualMachineTemplate.BootloaderType;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.Type;
import java.util.Random;
import static org.junit.Assert.assertEquals;
public class LibvirtComputingResourceTest {
String _hyperVisorType = "kvm";
Random _random = new Random();
/**
This test tests if the Agent can handle a vmSpec coming
from a <=4.1 management server.
The overcommit feature has not been merged in there and thus
only 'speed' is set.
*/
@Test
public void testCreateVMFromSpecLegacy() {
int id = _random.nextInt(65534);
String name = "test-instance-1";
int cpus = _random.nextInt(7) + 1;
int speed = 1024;
int minRam = 256 * 1024;
int maxRam = 512 * 1024;
String os = "Ubuntu";
boolean haEnabled = false;
boolean limitCpuUse = false;
String vncAddr = "1.2.3.4";
String vncPassword = "mySuperSecretPassword";
LibvirtComputingResource lcr = new LibvirtComputingResource();
VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, speed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
to.setVncAddr(vncAddr);
LibvirtVMDef vm = lcr.createVMFromSpec(to);
vm.setHvsType(_hyperVisorType);
String vmStr = "<domain type='" + _hyperVisorType + "'>\n";
vmStr += "<name>" + name + "</name>\n";
vmStr += "<uuid>b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9</uuid>\n";
vmStr += "<description>" + os + "</description>\n";
vmStr += "<clock offset='utc'>\n";
vmStr += "</clock>\n";
vmStr += "<features>\n";
vmStr += "<pae/>\n";
vmStr += "<apic/>\n";
vmStr += "<acpi/>\n";
vmStr += "</features>\n";
vmStr += "<devices>\n";
vmStr += "<serial type='pty'>\n";
vmStr += "<target port='0'/>\n";
vmStr += "</serial>\n";
vmStr += "<graphics type='vnc' autoport='yes' listen='" + vncAddr + "'/>\n";
vmStr += "<console type='pty'>\n";
vmStr += "<target port='0'/>\n";
vmStr += "</console>\n";
vmStr += "<input type='tablet' bus='usb'/>\n";
vmStr += "</devices>\n";
vmStr += "<memory>" + maxRam / 1024 + "</memory>\n";
vmStr += "<currentMemory>" + minRam / 1024 + "</currentMemory>\n";
vmStr += "<devices>\n";
vmStr += "<memballoon model='virtio'/>\n";
vmStr += "</devices>\n";
vmStr += "<vcpu>" + cpus + "</vcpu>\n";
vmStr += "<os>\n";
vmStr += "<type machine='pc'>hvm</type>\n";
vmStr += "<boot dev='cdrom'/>\n";
vmStr += "<boot dev='hd'/>\n";
vmStr += "</os>\n";
vmStr += "<cputune>\n";
vmStr += "<shares>" + (cpus * speed) + "</shares>\n";
vmStr += "</cputune>\n";
vmStr += "<on_reboot>restart</on_reboot>\n";
vmStr += "<on_poweroff>destroy</on_poweroff>\n";
vmStr += "<on_crash>destroy</on_crash>\n";
vmStr += "</domain>\n";
assertEquals(vmStr, vm.toString());
}
/**
This test tests if the Agent can handle a vmSpec coming
from a >4.1 management server.
It tests if the Agent can handle a vmSpec with overcommit
data like minSpeed and maxSpeed in there
*/
@Test
public void testCreateVMFromSpec() {
int id = _random.nextInt(65534);
String name = "test-instance-1";
int cpus = _random.nextInt(7) + 1;
int minSpeed = 1024;
int maxSpeed = 2048;
int minRam = 256 * 1024;
int maxRam = 512 * 1024;
String os = "Ubuntu";
boolean haEnabled = false;
boolean limitCpuUse = false;
String vncAddr = "1.2.3.4";
String vncPassword = "mySuperSecretPassword";
LibvirtComputingResource lcr = new LibvirtComputingResource();
VirtualMachineTO to = new VirtualMachineTO(id, name, VirtualMachine.Type.User, cpus, minSpeed, maxSpeed, minRam, maxRam, BootloaderType.HVM, os, false, false, vncPassword);
to.setVncAddr(vncAddr);
LibvirtVMDef vm = lcr.createVMFromSpec(to);
vm.setHvsType(_hyperVisorType);
String vmStr = "<domain type='" + _hyperVisorType + "'>\n";
vmStr += "<name>" + name + "</name>\n";
vmStr += "<uuid>b0f0a72d-7efb-3cad-a8ff-70ebf30b3af9</uuid>\n";
vmStr += "<description>" + os + "</description>\n";
vmStr += "<clock offset='utc'>\n";
vmStr += "</clock>\n";
vmStr += "<features>\n";
vmStr += "<pae/>\n";
vmStr += "<apic/>\n";
vmStr += "<acpi/>\n";
vmStr += "</features>\n";
vmStr += "<devices>\n";
vmStr += "<serial type='pty'>\n";
vmStr += "<target port='0'/>\n";
vmStr += "</serial>\n";
vmStr += "<graphics type='vnc' autoport='yes' listen='" + vncAddr + "'/>\n";
vmStr += "<console type='pty'>\n";
vmStr += "<target port='0'/>\n";
vmStr += "</console>\n";
vmStr += "<input type='tablet' bus='usb'/>\n";
vmStr += "</devices>\n";
vmStr += "<memory>" + maxRam / 1024 + "</memory>\n";
vmStr += "<currentMemory>" + minRam / 1024 + "</currentMemory>\n";
vmStr += "<devices>\n";
vmStr += "<memballoon model='virtio'/>\n";
vmStr += "</devices>\n";
vmStr += "<vcpu>" + cpus + "</vcpu>\n";
vmStr += "<os>\n";
vmStr += "<type machine='pc'>hvm</type>\n";
vmStr += "<boot dev='cdrom'/>\n";
vmStr += "<boot dev='hd'/>\n";
vmStr += "</os>\n";
vmStr += "<cputune>\n";
vmStr += "<shares>" + (cpus * minSpeed) + "</shares>\n";
vmStr += "</cputune>\n";
vmStr += "<on_reboot>restart</on_reboot>\n";
vmStr += "<on_poweroff>destroy</on_poweroff>\n";
vmStr += "<on_crash>destroy</on_crash>\n";
vmStr += "</domain>\n";
assertEquals(vmStr, vm.toString());
}
}

View File

@ -29,10 +29,13 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import org.apache.cloudstack.api.ApiConstants;
import com.cloud.agent.api.StartupCommand;
import com.cloud.agent.api.StartupRoutingCommand;
import com.cloud.alert.AlertManager;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter.NetworkType;
@ -41,6 +44,7 @@ import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.exception.DiscoveredWithErrorException;
import com.cloud.exception.DiscoveryException;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.host.HostVO;
import com.cloud.host.dao.HostDao;
import com.cloud.hypervisor.Hypervisor;
@ -49,10 +53,14 @@ import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao;
import com.cloud.hypervisor.vmware.manager.VmwareManager;
import com.cloud.hypervisor.vmware.mo.ClusterMO;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.mo.VirtualSwitchType;
import com.cloud.hypervisor.vmware.resource.VmwareContextFactory;
import com.cloud.hypervisor.vmware.resource.VmwareResource;
import com.cloud.hypervisor.vmware.util.VmwareContext;
import com.cloud.network.NetworkModel;
import com.cloud.network.Networks.TrafficType;
import com.cloud.network.PhysicalNetwork;
import com.cloud.network.VmwareTrafficLabel;
import com.cloud.network.dao.CiscoNexusVSMDeviceDao;
import com.cloud.resource.Discoverer;
import com.cloud.resource.DiscovererBase;
@ -60,9 +68,9 @@ import com.cloud.resource.ResourceManager;
import com.cloud.resource.ResourceStateAdapter;
import com.cloud.resource.ServerResource;
import com.cloud.resource.UnableDeleteHostException;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.user.Account;
import com.cloud.utils.UriUtils;
@ -98,6 +106,9 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
NetworkModel _netmgr;
@Inject
HypervisorCapabilitiesDao _hvCapabilitiesDao;
protected Map<String, String> _urlParams;
protected boolean useDVS = false;
protected boolean nexusDVS = false;
public VmwareServerDiscoverer() {
s_logger.info("VmwareServerDiscoverer is constructed");
@ -137,13 +148,85 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
String publicTrafficLabel = null;
String guestTrafficLabel = null;
Map<String, String> vsmCredentials = null;
VirtualSwitchType defaultVirtualSwitchType = VirtualSwitchType.StandardVirtualSwitch;
String paramGuestVswitchType = null;
String paramGuestVswitchName = null;
String paramPublicVswitchType = null;
String paramPublicVswitchName = null;
VmwareTrafficLabel guestTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Guest);
VmwareTrafficLabel publicTrafficLabelObj = new VmwareTrafficLabel(TrafficType.Public);
Map<String, String> clusterDetails = _clusterDetailsDao.findDetails(clusterId);
_readGlobalConfigParameters();
// Set default physical network end points for public and guest traffic
// Private traffic will be only on standard vSwitch for now. See below TODO.
if (useDVS) {
// Parse url parameters for type of vswitch and name of vswitch specified at cluster level
paramGuestVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC);
paramGuestVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC);
paramPublicVswitchType = _urlParams.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC);
paramPublicVswitchName = _urlParams.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC);
defaultVirtualSwitchType = getDefaultVirtualSwitchType(nexusDVS);
}
// Get zone wide traffic labels for Guest traffic and Public traffic
guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(dcId, HypervisorType.VMware);
publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(dcId, HypervisorType.VMware);
// Process traffic label information provided at zone level and cluster level
guestTrafficLabelObj = getTrafficInfo(TrafficType.Guest, guestTrafficLabel, defaultVirtualSwitchType, paramGuestVswitchType, paramGuestVswitchName, clusterId);
publicTrafficLabelObj = getTrafficInfo(TrafficType.Public, publicTrafficLabel, defaultVirtualSwitchType, paramPublicVswitchType, paramPublicVswitchName, clusterId);
// Zone level vSwitch Type depends on zone level traffic labels
//
// User can override Zone wide vswitch type (for public and guest) by providing following optional parameters in addClusterCmd
// param "guestvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs
// param "publicvswitchtype" with valid values vmwaredvs, vmwaresvs, nexusdvs
//
// Format of label is <VSWITCH>,<VLANID>,<VSWITCHTYPE>
// If a field <VLANID> OR <VSWITCHTYPE> is not present leave it empty.
// Ex: 1) vswitch0
// 2) dvswitch0,200,vmwaredvs
// 3) nexusepp0,300,nexusdvs
// 4) vswitch1,400,vmwaresvs
// 5) vswitch0
// default vswitchtype is 'vmwaresvs'.
// <VSWITCHTYPE> 'vmwaresvs' is for vmware standard vswitch
// <VSWITCHTYPE> 'vmwaredvs' is for vmware distributed virtual switch
// <VSWITCHTYPE> 'nexusdvs' is for cisco nexus distributed virtual switch
// Configuration Check: A physical network cannot be shared by different types of virtual switches.
//
// Check if different vswitch types are chosen for same physical network
// 1. Get physical network for guest traffic - multiple networks
// 2. Get physical network for public traffic - single network
// See if 2 is in 1
// if no - pass
// if yes - compare publicTrafficLabelObj.getVirtualSwitchType() == guestTrafficLabelObj.getVirtualSwitchType()
// true - pass
// false - throw exception - fail cluster add operation
List<? extends PhysicalNetwork> pNetworkListGuestTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Guest);
List<? extends PhysicalNetwork> pNetworkListPublicTraffic = _netmgr.getPhysicalNtwksSupportingTrafficType(dcId, TrafficType.Public);
// Public network would be on single physical network hence getting first object of the list would suffice.
PhysicalNetwork pNetworkPublic = pNetworkListPublicTraffic.get(0);
if (pNetworkListGuestTraffic.contains(pNetworkPublic)) {
if (publicTrafficLabelObj.getVirtualSwitchType() != guestTrafficLabelObj.getVirtualSwitchType()) {
String msg = "Both public traffic and guest traffic is over same physical network " + pNetworkPublic +
". And virtual switch type chosen for each traffic is different" +
". A physical network cannot be shared by different types of virtual switches.";
s_logger.error(msg);
throw new InvalidParameterValueException(msg);
}
}
privateTrafficLabel = _netmgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.VMware);
if (privateTrafficLabel != null) {
s_logger.info("Detected private network label : " + privateTrafficLabel);
}
if (_vmwareMgr.getNexusVSwitchGlobalParameter()) {
if (nexusDVS) {
DataCenterVO zone = _dcDao.findById(dcId);
NetworkType zoneType = zone.getNetworkType();
if (zoneType != NetworkType.Basic) {
@ -168,7 +251,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
context.registerStockObject("privateTrafficLabel",
privateTrafficLabel);
if (_vmwareMgr.getNexusVSwitchGlobalParameter()) {
if (nexusDVS) {
if (vsmCredentials != null) {
s_logger.info("Stocking credentials of Nexus VSM");
context.registerStockObject("vsmcredentials",
@ -190,8 +273,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
}
ManagedObjectReference morCluster = null;
Map<String, String> clusterDetails = _clusterDetailsDao
.findDetails(clusterId);
clusterDetails = _clusterDetailsDao.findDetails(clusterId);
if (clusterDetails.get("url") != null) {
URI uriFromCluster = new URI(
UriUtils.encodeURIComponent(clusterDetails.get("url")));
@ -247,13 +329,8 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
params.put("private.network.vswitch.name",
privateTrafficLabel);
}
if (publicTrafficLabel != null) {
params.put("public.network.vswitch.name",
publicTrafficLabel);
}
if (guestTrafficLabel != null) {
params.put("guest.network.vswitch.name", guestTrafficLabel);
}
params.put("guestTrafficInfo", guestTrafficLabelObj);
params.put("publicTrafficInfo", publicTrafficLabelObj);
VmwareResource resource = new VmwareResource();
try {
@ -415,4 +492,153 @@ public class VmwareServerDiscoverer extends DiscovererBase implements
.getSimpleName());
return super.stop();
}
private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, VirtualSwitchType defaultVirtualSwitchType, String vSwitchType, String vSwitchName, Long clusterId) {
VmwareTrafficLabel trafficLabelObj = null;
Map<String, String> clusterDetails = null;
try {
trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defaultVirtualSwitchType);
} catch (InvalidParameterValueException e) {
s_logger.error("Failed to recognize virtual switch type specified for " + trafficType +
" traffic due to " + e.getMessage());
throw e;
}
if (defaultVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)|| (vSwitchType == null && vSwitchName == null)) {
// Case of no cluster level override configuration defined.
// Depend only on zone wide traffic label
// If global param for dvSwitch is false return default traffic info object with vmware standard vswitch
return trafficLabelObj;
} else {
// Need to persist cluster level override configuration to db
clusterDetails = _clusterDetailsDao.findDetails(clusterId);
}
if (vSwitchName != null) {
trafficLabelObj.setVirtualSwitchName(vSwitchName);
if (trafficType == TrafficType.Guest) {
clusterDetails.put(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC, vSwitchName);
} else {
clusterDetails.put(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC, vSwitchName);
}
}
if (vSwitchType != null) {
validateVswitchType(vSwitchType);
trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(vSwitchType));
if (trafficType == TrafficType.Guest) {
clusterDetails.put(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, vSwitchType);
} else {
clusterDetails.put(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, vSwitchType);
}
}
// Save cluster level override configuration to cluster details
_clusterDetailsDao.persist(clusterId, clusterDetails);
return trafficLabelObj;
}
private VmwareTrafficLabel getTrafficInfo(TrafficType trafficType, String zoneWideTrafficLabel, Map<String, String> clusterDetails, VirtualSwitchType defVirtualSwitchType) {
VmwareTrafficLabel trafficLabelObj = null;
try {
trafficLabelObj = new VmwareTrafficLabel(zoneWideTrafficLabel, trafficType, defVirtualSwitchType);
} catch (InvalidParameterValueException e) {
s_logger.error("Failed to recognize virtual switch type specified for " + trafficType +
" traffic due to " + e.getMessage());
throw e;
}
if(defVirtualSwitchType.equals(VirtualSwitchType.StandardVirtualSwitch)) {
return trafficLabelObj;
}
if (trafficType == TrafficType.Guest) {
if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC)) {
trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_GUEST_TRAFFIC));
}
if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC)) {
trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC)));
}
} else if (trafficType == TrafficType.Public) {
if(clusterDetails.containsKey(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC)) {
trafficLabelObj.setVirtualSwitchName(clusterDetails.get(ApiConstants.VSWITCH_NAME_PUBLIC_TRAFFIC));
}
if(clusterDetails.containsKey(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC)) {
trafficLabelObj.setVirtualSwitchType(VirtualSwitchType.getType(clusterDetails.get(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC)));
}
}
return trafficLabelObj;
}
private void _readGlobalConfigParameters() {
String value;
if (_configDao != null) {
value = _configDao.getValue(Config.VmwareUseDVSwitch.key());
useDVS = Boolean.parseBoolean(value);
value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key());
nexusDVS = Boolean.parseBoolean(value);
}
}
@Override
protected HashMap<String, Object> buildConfigParams(HostVO host) {
HashMap<String, Object> params = super.buildConfigParams(host);
Map<String, String> clusterDetails = _clusterDetailsDao.findDetails(host.getClusterId());
// Get zone wide traffic labels from guest traffic and public traffic
String guestTrafficLabel = _netmgr.getDefaultGuestTrafficLabel(host.getDataCenterId(), HypervisorType.VMware);
String publicTrafficLabel = _netmgr.getDefaultPublicTrafficLabel(host.getDataCenterId(), HypervisorType.VMware);
_readGlobalConfigParameters();
VirtualSwitchType defaultVirtualSwitchType = getDefaultVirtualSwitchType(nexusDVS);
params.put("guestTrafficInfo", getTrafficInfo(TrafficType.Guest, guestTrafficLabel, clusterDetails, defaultVirtualSwitchType));
params.put("publicTrafficInfo", getTrafficInfo(TrafficType.Public, publicTrafficLabel, clusterDetails, defaultVirtualSwitchType));
return params;
}
private VirtualSwitchType getDefaultVirtualSwitchType(boolean nexusDVS) {
return nexusDVS ? VirtualSwitchType.NexusDistributedVirtualSwitch : VirtualSwitchType.VMwareDistributedVirtualSwitch;
}
@Override
public ServerResource reloadResource(HostVO host) {
String resourceName = host.getResource();
ServerResource resource = getResource(resourceName);
if (resource != null) {
_hostDao.loadDetails(host);
HashMap<String, Object> params = buildConfigParams(host);
try {
resource.configure(host.getName(), params);
} catch (ConfigurationException e) {
s_logger.warn("Unable to configure resource due to " + e.getMessage());
return null;
}
if (!resource.start()) {
s_logger.warn("Unable to start the resource");
return null;
}
}
return resource;
}
private void validateVswitchType(String inputVswitchType) {
VirtualSwitchType vSwitchType = VirtualSwitchType.getType(inputVswitchType);
if (vSwitchType == VirtualSwitchType.None) {
s_logger.error("Unable to resolve " + inputVswitchType + " to a valid virtual switch type in VMware environment.");
throw new InvalidParameterValueException("Invalid virtual switch type : " + inputVswitchType);
}
}
@Override
public void putParam(Map<String, String> params) {
if (_urlParams == null) {
_urlParams = new HashMap<String, String>();
}
_urlParams.putAll(params);
}
}

View File

@ -59,17 +59,11 @@ public interface VmwareManager {
boolean beginExclusiveOperation(int timeOutSeconds);
void endExclusiveOperation();
boolean getNexusVSwitchGlobalParameter();
boolean getFullCloneFlag();
Map<String, String> getNexusVSMCredentialsByClusterId(Long clusterId);
String getPrivateVSwitchName(long dcId, HypervisorType hypervisorType);
String getPublicVSwitchName(long dcId, HypervisorType hypervisorType);
String getGuestVSwitchName(long dcId, HypervisorType hypervisorType);
public String getRootDiskController();
}

View File

@ -126,10 +126,9 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
String _mountParent;
StorageLayer _storage;
String _privateNetworkVSwitchName = "vSwitch0";
String _privateNetworkVSwitchName;
String _publicNetworkVSwitchName;
String _guestNetworkVSwitchName;
int _portsPerDvPortGroup = 256;
boolean _nexusVSwitchActive;
boolean _fullCloneFlag;
String _serviceConsoleName;
@ -195,13 +194,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_storage = new JavaStorageLayer();
_storage.configure("StorageLayer", params);
}
value = _configDao.getValue(Config.VmwareUseNexusVSwitch.key());
if(value == null) {
_nexusVSwitchActive = false;
}
else {
_nexusVSwitchActive = Boolean.parseBoolean(value);
}
value = _configDao.getValue(Config.VmwareCreateFullClone.key());
if (value == null) {
@ -210,36 +202,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_fullCloneFlag = Boolean.parseBoolean(value);
}
_privateNetworkVSwitchName = _configDao.getValue(Config.VmwarePrivateNetworkVSwitch.key());
if (_privateNetworkVSwitchName == null) {
if (_nexusVSwitchActive) {
_privateNetworkVSwitchName = "privateEthernetPortProfile";
} else {
_privateNetworkVSwitchName = "vSwitch0";
}
}
_publicNetworkVSwitchName = _configDao.getValue(Config.VmwarePublicNetworkVSwitch.key());
if (_publicNetworkVSwitchName == null) {
if (_nexusVSwitchActive) {
_publicNetworkVSwitchName = "publicEthernetPortProfile";
} else {
_publicNetworkVSwitchName = "vSwitch0";
}
}
_guestNetworkVSwitchName = _configDao.getValue(Config.VmwareGuestNetworkVSwitch.key());
if (_guestNetworkVSwitchName == null) {
if (_nexusVSwitchActive) {
_guestNetworkVSwitchName = "guestEthernetPortProfile";
} else {
_guestNetworkVSwitchName = "vSwitch0";
}
}
_serviceConsoleName = _configDao.getValue(Config.VmwareServiceConsole.key());
if(_serviceConsoleName == null) {
_serviceConsoleName = "Service Console";
@ -318,11 +280,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
return true;
}
@Override
public boolean getNexusVSwitchGlobalParameter() {
return _nexusVSwitchActive;
}
@Override
public boolean getFullCloneFlag() {
return _fullCloneFlag;
@ -338,15 +295,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
return _netMgr.getDefaultManagementTrafficLabel(dcId, hypervisorType);
}
@Override
public String getPublicVSwitchName(long dcId, HypervisorType hypervisorType) {
return _netMgr.getDefaultPublicTrafficLabel(dcId, hypervisorType);
}
@Override
public String getGuestVSwitchName(long dcId, HypervisorType hypervisorType) {
return _netMgr.getDefaultGuestTrafficLabel(dcId, hypervisorType);
}
private void prepareHost(HostMO hostMo, String privateTrafficLabel) throws Exception {
// For ESX host, we need to enable host firewall to allow VNC access
@ -368,12 +316,8 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
}
s_logger.info("Preparing network on host " + hostMo.getContext().toString() + " for " + privateTrafficLabel);
if(!_nexusVSwitchActive) {
HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000, false);
}
else {
HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000);
}
HypervisorHostHelper.prepareNetwork(vSwitchName, "cloud.private", hostMo, vlanId, null, null, 180000, false);
}
@Override
@ -504,10 +448,6 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
@Override
public void setupResourceStartupParams(Map<String, Object> params) {
params.put("private.network.vswitch.name", _privateNetworkVSwitchName);
params.put("public.network.vswitch.name", _publicNetworkVSwitchName);
params.put("guest.network.vswitch.name", _guestNetworkVSwitchName);
params.put("vmware.use.nexus.vswitch", _nexusVSwitchActive);
params.put("vmware.create.full.clone", _fullCloneFlag);
params.put("service.console.name", _serviceConsoleName);
params.put("management.portgroup.name", _managemetPortGroupName);
@ -515,6 +455,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
params.put("vmware.reserve.mem", _reserveMem);
params.put("vmware.root.disk.controller", _rootDiskController);
params.put("vmware.recycle.hung.wokervm", _recycleHungWorker);
params.put("ports.per.dvportgroup", _portsPerDvPortGroup);
}
@Override

View File

@ -200,6 +200,8 @@ import com.cloud.network.HAProxyConfigurator;
import com.cloud.network.LoadBalancerConfigurator;
import com.cloud.network.Networks;
import com.cloud.network.Networks.BroadcastDomainType;
import com.cloud.network.Networks.TrafficType;
import com.cloud.network.VmwareTrafficLabel;
import com.cloud.network.rules.FirewallRule;
import com.cloud.resource.ServerResource;
import com.cloud.serializer.GsonHelper;
@ -286,10 +288,9 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
protected String _vCenterAddress;
protected String _privateNetworkVSwitchName;
protected String _publicNetworkVSwitchName;
protected String _guestNetworkVSwitchName;
protected VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
protected boolean _nexusVSwitch = false;
protected VmwareTrafficLabel _guestTrafficInfo = new VmwareTrafficLabel(TrafficType.Guest);
protected VmwareTrafficLabel _publicTrafficInfo = new VmwareTrafficLabel(TrafficType.Public);
protected int _portsPerDvPortGroup;
protected boolean _fullCloneFlag = false;
protected boolean _reserveCpu = false;
@ -1319,7 +1320,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
NicTO nicTo = cmd.getNic();
VirtualDevice nic;
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo);
if (mgr.getNexusVSwitchGlobalParameter()) {
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
@ -1550,13 +1551,16 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
private void plugPublicNic(VirtualMachineMO vmMo, final String vlanId, final String vifMacAddress) throws Exception {
// TODO : probably need to set traffic shaping
Pair<ManagedObjectReference, String> networkInfo = null;
if (!_nexusVSwitch) {
networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
VirtualSwitchType vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
if (_publicTrafficInfo != null) {
vSwitchType = _publicTrafficInfo.getVirtualSwitchType();
}
if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) {
networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, true);
} else {
networkInfo = HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout);
networkInfo = HypervisorHostHelper.prepareNetwork(this._publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
vmMo.getRunningHost(), vlanId, null, null, this._ops_timeout, vSwitchType, _portsPerDvPortGroup);
}
int nicIndex = allocPublicNicIndex(vmMo);
@ -1566,7 +1570,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
VirtualEthernetCard device = (VirtualEthernetCard) nicDevices[nicIndex];
if (!_nexusVSwitch) {
if (VirtualSwitchType.StandardVirtualSwitch == vSwitchType) {
VirtualEthernetCardNetworkBackingInfo nicBacking = new VirtualEthernetCardNetworkBackingInfo();
nicBacking.setDeviceName(networkInfo.second());
nicBacking.setNetwork(networkInfo.first());
@ -2265,7 +2269,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
s_logger.info("Prepare NIC device based on NicTO: " + _gson.toJson(nicTo));
Pair<ManagedObjectReference, String> networkInfo = prepareNetworkFromNicInfo(vmMo.getRunningHost(), nicTo);
if (mgr.getNexusVSwitchGlobalParameter()) {
if (VmwareHelper.isDvPortGroup(networkInfo.first())) {
String dvSwitchUuid;
ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter();
DatacenterMO dataCenterMo = new DatacenterMO(context, dcMor);
@ -2464,21 +2468,36 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
private Pair<ManagedObjectReference, String> prepareNetworkFromNicInfo(HostMO hostMo, NicTO nicTo) throws Exception {
Pair<String, String> switchName;
TrafficType trafficType;
VirtualSwitchType switchType;
switchName = getTargetSwitch(nicTo);
trafficType = nicTo.getType();
// Get switch type from resource property which is dictated by cluster property
// If a virtual switch type is specified while adding cluster that will be used.
// Else If virtual switch type is specified in physical traffic label that will be used
// Else use standard vSwitch
switchType = VirtualSwitchType.StandardVirtualSwitch;
if (trafficType == TrafficType.Guest && _guestTrafficInfo != null) {
switchType = _guestTrafficInfo.getVirtualSwitchType();
} else if (trafficType == TrafficType.Public && _publicTrafficInfo != null) {
switchType = _publicTrafficInfo.getVirtualSwitchType();
}
Pair<String, String> switchName = getTargetSwitch(nicTo);
String namePrefix = getNetworkNamePrefix(nicTo);
Pair<ManagedObjectReference, String> networkInfo = null;
s_logger.info("Prepare network on vSwitch: " + switchName + " with name prefix: " + namePrefix);
s_logger.info("Prepare network on " + switchType + " " + switchName + " with name prefix: " + namePrefix);
if(!_nexusVSwitch) {
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout,
if (VirtualSwitchType.StandardVirtualSwitch == switchType) {
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout,
!namePrefix.startsWith("cloud.private"));
}
else {
networkInfo = HypervisorHostHelper.prepareNetwork(switchName.first(), namePrefix, hostMo, getVlanInfo(nicTo, switchName.second()),
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout);
nicTo.getNetworkRateMbps(), nicTo.getNetworkRateMulticastMbps(), _ops_timeout, switchType, _portsPerDvPortGroup);
}
return networkInfo;
@ -2488,8 +2507,10 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
private Pair<String, String> getTargetSwitch(NicTO nicTo) throws Exception {
if(nicTo.getName() != null && !nicTo.getName().isEmpty()) {
String[] tokens = nicTo.getName().split(",");
if(tokens.length == 2) {
// Format of network traffic label is <VSWITCH>,<VLANID>,<VSWITCHTYPE>
// If all 3 fields are mentioned then number of tokens would be 3.
// If only <VSWITCH>,<VLANID> are mentioned then number of tokens would be 2.
if(tokens.length == 2 || tokens.length == 3) {
return new Pair<String, String>(tokens[0], tokens[1]);
} else {
return new Pair<String, String>(nicTo.getName(), Vlan.UNTAGGED);
@ -2497,11 +2518,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
}
if (nicTo.getType() == Networks.TrafficType.Guest) {
return new Pair<String, String>(this._guestNetworkVSwitchName, Vlan.UNTAGGED);
return new Pair<String, String>(this._guestTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Control || nicTo.getType() == Networks.TrafficType.Management) {
return new Pair<String, String>(this._privateNetworkVSwitchName, Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Public) {
return new Pair<String, String>(this._publicNetworkVSwitchName, Vlan.UNTAGGED);
return new Pair<String, String>(this._publicTrafficInfo.getVirtualSwitchName(), Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Storage) {
return new Pair<String, String>(this._privateNetworkVSwitchName, Vlan.UNTAGGED);
} else if (nicTo.getType() == Networks.TrafficType.Vpn) {
@ -4542,7 +4563,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(!"untagged".equalsIgnoreCase(tokens[2]))
vlanId = tokens[2];
HypervisorHostHelper.prepareNetwork(this._publicNetworkVSwitchName, "cloud.public",
HypervisorHostHelper.prepareNetwork(_publicTrafficInfo.getVirtualSwitchName(), "cloud.public",
hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false);
} else {
s_logger.info("Skip suspecious cloud network " + networkName);
@ -4559,7 +4580,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(!"untagged".equalsIgnoreCase(tokens[2]))
vlanId = tokens[2];
HypervisorHostHelper.prepareNetwork(this._guestNetworkVSwitchName, "cloud.guest",
HypervisorHostHelper.prepareNetwork(_guestTrafficInfo.getVirtualSwitchName(), "cloud.guest",
hostMo, vlanId, networkRateMbps, null, this._ops_timeout, false);
} else {
s_logger.info("Skip suspecious cloud network " + networkName);
@ -4875,6 +4896,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_morHyperHost.setType(hostTokens[0]);
_morHyperHost.set_value(hostTokens[1]);
_guestTrafficInfo = (VmwareTrafficLabel) params.get("guestTrafficInfo");
_publicTrafficInfo = (VmwareTrafficLabel) params.get("publicTrafficInfo");
VmwareContext context = getServiceContext();
try {
VmwareManager mgr = context.getStockObject(VmwareManager.CONTEXT_STOCK_NAME);
@ -4882,12 +4905,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
CustomFieldsManagerMO cfmMo = new CustomFieldsManagerMO(context, context.getServiceContent().getCustomFieldsManager());
cfmMo.ensureCustomFieldDef("Datastore", CustomFieldConstants.CLOUD_UUID);
if (mgr.getNexusVSwitchGlobalParameter()) {
if (_publicTrafficInfo != null && _publicTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch ||
_guestTrafficInfo != null && _guestTrafficInfo.getVirtualSwitchType() != VirtualSwitchType.StandardVirtualSwitch) {
cfmMo.ensureCustomFieldDef("DistributedVirtualPortgroup", CustomFieldConstants.CLOUD_GC_DVP);
} else {
cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
}
cfmMo.ensureCustomFieldDef("Network", CustomFieldConstants.CLOUD_GC);
cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_UUID);
cfmMo.ensureCustomFieldDef("VirtualMachine", CustomFieldConstants.CLOUD_NIC_MASK);
@ -4895,15 +4917,14 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_hostName = hostMo.getHyperHostName();
Map<String, String> vsmCredentials;
if (mgr.getNexusVSwitchGlobalParameter()) {
if (_guestTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch ||
_publicTrafficInfo.getVirtualSwitchType() == VirtualSwitchType.NexusDistributedVirtualSwitch) {
vsmCredentials = mgr.getNexusVSMCredentialsByClusterId(Long.parseLong(_cluster));
if (vsmCredentials != null) {
s_logger.info("Stocking credentials while configuring resource.");
context.registerStockObject("vsmcredentials", vsmCredentials);
}
_privateNetworkVSwitchName = mgr.getPrivateVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
_publicNetworkVSwitchName = mgr.getPublicVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
_guestNetworkVSwitchName = mgr.getGuestVSwitchName(Long.parseLong(_dcId), HypervisorType.VMware);
}
} catch (Exception e) {
@ -4912,12 +4933,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if(_privateNetworkVSwitchName == null) {
_privateNetworkVSwitchName = (String) params.get("private.network.vswitch.name");
}
if(_publicNetworkVSwitchName == null) {
_publicNetworkVSwitchName = (String) params.get("public.network.vswitch.name");
}
if(_guestNetworkVSwitchName == null) {
_guestNetworkVSwitchName = (String) params.get("guest.network.vswitch.name");
}
String value = (String) params.get("vmware.reserve.cpu");
@ -4938,9 +4953,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
else
_rootDiskController = DiskControllerType.ide;
value = params.get("vmware.use.nexus.vswitch").toString();
if(value != null && value.equalsIgnoreCase("true"))
_nexusVSwitch = true;
Integer intObj = (Integer) params.get("ports.per.dvportgroup");
if (intObj != null)
_portsPerDvPortGroup = intObj.intValue();
s_logger.info("VmwareResource network configuration info." +
" private traffic over vSwitch: " + _privateNetworkVSwitchName + ", public traffic over " +
this._publicTrafficInfo.getVirtualSwitchType() + " : " + this._publicTrafficInfo.getVirtualSwitchName() +
", guest traffic over " + this._guestTrafficInfo.getVirtualSwitchType() + " : " +
this._guestTrafficInfo.getVirtualSwitchName());
value = params.get("vmware.create.full.clone").toString();
if (value != null && value.equalsIgnoreCase("true")) {
@ -4949,9 +4970,6 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
_fullCloneFlag = false;
}
s_logger.info("VmwareResource network configuration info. private vSwitch: " + _privateNetworkVSwitchName + ", public vSwitch: " + _publicNetworkVSwitchName + ", guest network: "
+ _guestNetworkVSwitchName);
return true;
}

View File

@ -0,0 +1,118 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.network;
import com.cloud.exception.InvalidParameterValueException;
import com.cloud.hypervisor.vmware.mo.VirtualSwitchType;
import com.cloud.network.Networks.TrafficType;
public class VmwareTrafficLabel implements TrafficLabel {
public static final String DEFAULT_VSWITCH_NAME = "vSwitch0";
public static final String DEFAULT_DVSWITCH_NAME = "dvSwitch0";
public static final String DEFAULT_NDVSWITCH_NAME = "epp0";
public static final int MAX_FIELDS_VMWARE_LABEL = 3;
public static final int VMWARE_LABEL_FIELD_INDEX_NAME = 0;
public static final int VMWARE_LABEL_FIELD_INDEX_VLANID = 1;
public static final int VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE = 2;
TrafficType _trafficType = TrafficType.None;
VirtualSwitchType _vSwitchType = VirtualSwitchType.StandardVirtualSwitch;
String _vSwitchName = DEFAULT_VSWITCH_NAME;
String _vlanId = null;
public VmwareTrafficLabel(String networkLabel, TrafficType trafficType, VirtualSwitchType defVswitchType) {
_trafficType = trafficType;
_parseLabel(networkLabel, defVswitchType);
}
public VmwareTrafficLabel(String networkLabel, TrafficType trafficType) {
_trafficType = trafficType;
_parseLabel(networkLabel, VirtualSwitchType.StandardVirtualSwitch);
}
public VmwareTrafficLabel(TrafficType trafficType, VirtualSwitchType defVswitchType) {
_trafficType = trafficType; // Define traffic label with specific traffic type
_parseLabel(null, defVswitchType);
}
public VmwareTrafficLabel(TrafficType trafficType) {
_trafficType = trafficType; // Define traffic label with specific traffic type
_parseLabel(null, VirtualSwitchType.StandardVirtualSwitch);
}
public VmwareTrafficLabel() {
}
private void _parseLabel(String networkLabel, VirtualSwitchType defVswitchType) {
if (networkLabel == null || networkLabel.isEmpty()) {
// Set defaults for label in case of distributed vSwitch
if (defVswitchType.equals(VirtualSwitchType.VMwareDistributedVirtualSwitch)) {
_vSwitchName = DEFAULT_DVSWITCH_NAME;
_vSwitchType = VirtualSwitchType.VMwareDistributedVirtualSwitch;
} else if (defVswitchType.equals(VirtualSwitchType.NexusDistributedVirtualSwitch)) {
_vSwitchName = DEFAULT_NDVSWITCH_NAME;
_vSwitchType = VirtualSwitchType.NexusDistributedVirtualSwitch;
}
return;
}
String[] tokens = networkLabel.split(",");
if (tokens.length > VMWARE_LABEL_FIELD_INDEX_NAME) {
_vSwitchName = tokens[VMWARE_LABEL_FIELD_INDEX_NAME].trim();
}
if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VLANID) {
_vlanId = tokens[VMWARE_LABEL_FIELD_INDEX_VLANID].trim();
}
if (tokens.length > VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE) {
_vSwitchType = VirtualSwitchType.getType(tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim());
if(VirtualSwitchType.None == _vSwitchType) {
throw new InvalidParameterValueException("Invalid virtual switch type : " + tokens[VMWARE_LABEL_FIELD_INDEX_VSWITCH_TYPE].trim());
}
}
if (tokens.length > MAX_FIELDS_VMWARE_LABEL ) {
throw new InvalidParameterValueException("Found extraneous fields in vmware traffic label : " + networkLabel);
}
}
@Override
public TrafficType getTrafficType() {
return _trafficType;
}
@Override
public String getNetworkLabel() {
return null;
}
public VirtualSwitchType getVirtualSwitchType() {
return _vSwitchType;
}
public String getVirtualSwitchName() {
return _vSwitchName;
}
public String getVlanId() {
return _vlanId;
}
public void setVirtualSwitchName(String vSwitchName) {
_vSwitchName = vSwitchName;
}
public void setVirtualSwitchType(VirtualSwitchType vSwitchType) {
_vSwitchType = vSwitchType;
}
}

View File

@ -16,7 +16,8 @@
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-storage-allocator-random</artifactId>
<name>Apache CloudStack Plugin - Storage Allocator Random</name>
@ -26,4 +27,11 @@
<version>4.2.0-SNAPSHOT</version>
<relativePath>../../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-engine-storage</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</project>

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package com.cloud.storage.allocator;
package org.apache.cloudstack.storage.allocator;
import java.util.ArrayList;
import java.util.Collections;
@ -21,44 +21,32 @@ import java.util.List;
import javax.ejb.Local;
import org.apache.cloudstack.engine.subsystem.api.storage.ScopeType;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.server.StatsCollector;
import com.cloud.storage.StoragePool;
import com.cloud.storage.VMTemplateVO;
import com.cloud.vm.DiskProfile;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachineProfile;
@Component
@Local(value=StoragePoolAllocator.class)
public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
private static final Logger s_logger = Logger.getLogger(RandomStoragePoolAllocator.class);
@Override
public boolean allocatorIsCorrectType(DiskProfile dskCh) {
return true;
}
@Override
public List<StoragePool> allocateToPool(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
public List<StoragePool> select(DiskProfile dskCh, VirtualMachineProfile<? extends VirtualMachine> vmProfile, DeploymentPlan plan, ExcludeList avoid, int returnUpTo) {
List<StoragePool> suitablePools = new ArrayList<StoragePool>();
VMTemplateVO template = (VMTemplateVO)vmProfile.getTemplate();
// Check that the allocator type is correct
if (!allocatorIsCorrectType(dskCh)) {
return suitablePools;
}
long dcId = plan.getDataCenterId();
Long podId = plan.getPodId();
Long clusterId = plan.getClusterId();
s_logger.debug("Looking for pools in dc: " + dcId + " pod:" + podId + " cluster:" + clusterId);
List<StoragePoolVO> pools = _storagePoolDao.listBy(dcId, podId, clusterId);
List<StoragePoolVO> pools = _storagePoolDao.listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
if (pools.size() == 0) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("No storage pools available for allocation, returning");
@ -66,8 +54,6 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
return suitablePools;
}
StatsCollector sc = StatsCollector.getInstance();
Collections.shuffle(pools);
if (s_logger.isDebugEnabled()) {
s_logger.debug("RandomStoragePoolAllocator has " + pools.size() + " pools to check for allocation");
@ -76,8 +62,9 @@ public class RandomStoragePoolAllocator extends AbstractStoragePoolAllocator {
if(suitablePools.size() == returnUpTo){
break;
}
if (checkPool(avoid, pool, dskCh, template, null, sc, plan)) {
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
StoragePool pol = (StoragePool)this.dataStoreMgr.getPrimaryDataStore(pool.getId());
if (filter(avoid, pol, dskCh, plan)) {
suitablePools.add(pol);
}
}

View File

@ -90,6 +90,7 @@
<cs.commons-io.version>1.4</cs.commons-io.version>
<cs.reflections.version>0.9.8</cs.reflections.version>
<cs.java-ipv6.version>0.10</cs.java-ipv6.version>
<cs.replace.properties>build/replace.properties</cs.replace.properties>
</properties>
<distributionManagement>
@ -161,9 +162,9 @@
<module>usage</module>
<module>utils</module>
<module>deps/XenServerJava</module>
<module>engine</module>
<module>plugins</module>
<module>patches</module>
<module>engine</module>
<module>framework</module>
<module>services</module>
<module>test</module>

View File

@ -130,6 +130,43 @@
</excludes>
</configuration>
</plugin>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<version>1.7</version>
<executions>
<execution>
<id>generate-resource</id>
<phase>generate-resources</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<copy overwrite="true"
todir="${basedir}/target/conf">
<fileset dir="${basedir}/conf">
<include name="*.in" />
</fileset>
<globmapper from="*.in" to="*" />
<filterchain>
<filterreader
classname="org.apache.tools.ant.filters.ReplaceTokens">
<param type="propertiesfile"
value="${cs.replace.properties}" />
</filterreader>
</filterchain>
</copy>
<copy
todir="${basedir}/target/conf">
<fileset dir="${basedir}/conf">
<exclude name="*.in" />
</fileset>
</copy>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -2190,12 +2190,15 @@ public class QueryManagerImpl extends ManagerBase implements QueryService {
Long domainId = cmd.getDomainId();
Long id = cmd.getId();
String keyword = cmd.getKeyword();
String name = cmd.getName();
Filter searchFilter = new Filter(DataCenterJoinVO.class, null, false, cmd.getStartIndex(), cmd.getPageSizeVal());
SearchCriteria<DataCenterJoinVO> sc = _dcJoinDao.createSearchCriteria();
if (id != null) {
sc.addAnd("id", SearchCriteria.Op.EQ, id);
} else if (name != null) {
sc.addAnd("name", SearchCriteria.Op.EQ, name);
} else {
if (keyword != null) {
SearchCriteria<DataCenterJoinVO> ssc = _dcJoinDao.createSearchCriteria();

View File

@ -19,6 +19,7 @@ package com.cloud.cluster.agentlb.dao;
import java.util.Date;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.ejb.Local;
import org.apache.log4j.Logger;
@ -37,30 +38,35 @@ import com.cloud.utils.db.SearchCriteria;
public class HostTransferMapDaoImpl extends GenericDaoBase<HostTransferMapVO, Long> implements HostTransferMapDao {
private static final Logger s_logger = Logger.getLogger(HostTransferMapDaoImpl.class);
protected final SearchBuilder<HostTransferMapVO> AllFieldsSearch;
protected final SearchBuilder<HostTransferMapVO> IntermediateStateSearch;
protected final SearchBuilder<HostTransferMapVO> ActiveSearch;
protected SearchBuilder<HostTransferMapVO> AllFieldsSearch;
protected SearchBuilder<HostTransferMapVO> IntermediateStateSearch;
protected SearchBuilder<HostTransferMapVO> ActiveSearch;
public HostTransferMapDaoImpl() {
AllFieldsSearch = createSearchBuilder();
AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
AllFieldsSearch.done();
IntermediateStateSearch = createSearchBuilder();
IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
IntermediateStateSearch.done();
ActiveSearch = createSearchBuilder();
ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
ActiveSearch.done();
super();
}
@PostConstruct
public void init() {
AllFieldsSearch = createSearchBuilder();
AllFieldsSearch.and("id", AllFieldsSearch.entity().getId(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("initialOwner", AllFieldsSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("futureOwner", AllFieldsSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ);
AllFieldsSearch.done();
IntermediateStateSearch = createSearchBuilder();
IntermediateStateSearch.and("futureOwner", IntermediateStateSearch.entity().getFutureOwner(), SearchCriteria.Op.EQ);
IntermediateStateSearch.and("initialOwner", IntermediateStateSearch.entity().getInitialOwner(), SearchCriteria.Op.EQ);
IntermediateStateSearch.and("state", IntermediateStateSearch.entity().getState(), SearchCriteria.Op.IN);
IntermediateStateSearch.done();
ActiveSearch = createSearchBuilder();
ActiveSearch.and("created", ActiveSearch.entity().getCreated(), SearchCriteria.Op.GT);
ActiveSearch.and("id", ActiveSearch.entity().getId(), SearchCriteria.Op.EQ);
ActiveSearch.and("state", ActiveSearch.entity().getState(), SearchCriteria.Op.EQ);
ActiveSearch.done();
}
@Override

View File

@ -20,6 +20,8 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import com.cloud.agent.AgentManager;
import com.cloud.consoleproxy.ConsoleProxyManager;
import com.cloud.ha.HighAvailabilityManager;
@ -28,7 +30,6 @@ import com.cloud.network.NetworkManager;
import com.cloud.network.router.VpcVirtualNetworkApplianceManager;
import com.cloud.server.ManagementServer;
import com.cloud.storage.StorageManager;
import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.secondary.SecondaryStorageVmManager;
import com.cloud.storage.snapshot.SnapshotManager;
import com.cloud.template.TemplateManager;
@ -251,11 +252,10 @@ public enum Config {
XenGuestNetwork("Hidden", ManagementServer.class, String.class, "xen.guest.network.device", null, "Specify for guest network name label", null),
XenMaxNics("Advanced", AgentManager.class, Integer.class, "xen.nics.max", "7", "Maximum allowed nics for Vms created on Xen", null),
// VMware
VmwarePrivateNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.private.vswitch", null, "Specify the vSwitch on host for private network", null),
VmwarePublicNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.public.vswitch", null, "Specify the vSwitch on host for public network", null),
VmwareGuestNetworkVSwitch("Hidden", ManagementServer.class, String.class, "vmware.guest.vswitch", null, "Specify the vSwitch on host for guest network", null),
VmwareUseNexusVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.nexus.vswitch", "false", "Enable/Disable Cisco Nexus 1000v vSwitch in VMware environment", null),
VmwareCreateFullClone("Advanced", ManagementServer.class, Boolean.class, "vmware.create.full.clone", "false", "If set to true, creates guest VMs as full clones on ESX", null),
VmwareUseDVSwitch("Network", ManagementServer.class, Boolean.class, "vmware.use.dvswitch", "false", "Enable/Disable Nexus/Vmware dvSwitch in VMware environment", null),
VmwarePortsPerDVPortGroup("Network", ManagementServer.class, Integer.class, "vmware.ports.per.dvportgroup", "256", "Default number of ports per Vmware dvPortGroup in VMware environment", null),
VmwareCreateFullClone("Advanced", ManagementServer.class, Boolean.class, "vmware.create.full.clone", "false", "If set to true, creates guest VMs as full clones on ESX", null),
VmwareServiceConsole("Advanced", ManagementServer.class, String.class, "vmware.service.console", "Service Console", "Specify the service console network name(for ESX hosts)", null),
VmwareManagementPortGroup("Advanced", ManagementServer.class, String.class, "vmware.management.portgroup", "Management Network", "Specify the management network name(for ESXi hosts)", null),
VmwareAdditionalVncPortRangeStart("Advanced", ManagementServer.class, Integer.class, "vmware.additional.vnc.portrange.start", "50000", "Start port number of additional VNC port range", null),

View File

@ -28,17 +28,23 @@ import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import com.cloud.dc.*;
import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator;
import org.apache.log4j.Logger;
import com.cloud.agent.manager.allocator.HostAllocator;
import com.cloud.api.ApiDBUtils;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.CapacityVO;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.configuration.Config;
import com.cloud.configuration.dao.ConfigurationDao;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
import com.cloud.dc.Pod;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
@ -58,7 +64,6 @@ import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.Volume;
import com.cloud.storage.VolumeVO;
import com.cloud.storage.allocator.StoragePoolAllocator;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;

View File

@ -26,22 +26,23 @@ import com.cloud.user.UserContext;
import com.cloud.user.dao.AccountDao;
import com.cloud.user.dao.UserDao;
import com.cloud.utils.component.AnnotationInterceptor;
import com.cloud.utils.component.ComponentContext;
import net.sf.cglib.proxy.Callback;
import net.sf.cglib.proxy.MethodInterceptor;
import net.sf.cglib.proxy.MethodProxy;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.lang.reflect.AnnotatedElement;
import java.lang.reflect.Method;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
@Component
public class ActionEventUtils {
private static final Logger s_logger = Logger.getLogger(ActionEventUtils.class);
@ -49,14 +50,12 @@ public class ActionEventUtils {
private static EventDao _eventDao;
private static AccountDao _accountDao;
protected static UserDao _userDao;
// get the event bus provider if configured
protected static EventBus _eventBus;
protected static EventBus _eventBus = null;
@Inject EventDao eventDao;
@Inject AccountDao accountDao;
@Inject UserDao userDao;
public ActionEventUtils() {
}
@ -65,8 +64,6 @@ public class ActionEventUtils {
_eventDao = eventDao;
_accountDao = accountDao;
_userDao = userDao;
// TODO we will do injection of event bus later
}
public static Long onActionEvent(Long userId, Long accountId, Long domainId, String type, String description) {
@ -156,7 +153,9 @@ public class ActionEventUtils {
private static void publishOnEventBus(long userId, long accountId, String eventCategory,
String eventType, Event.State state) {
if (_eventBus == null) {
try {
_eventBus = ComponentContext.getComponent(EventBus.class);
} catch(NoSuchBeanDefinitionException nbe) {
return; // no provider is configured to provide events bus, so just return
}

View File

@ -22,16 +22,17 @@ import com.cloud.dc.HostPodVO;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.server.ManagementServer;
import org.apache.cloudstack.framework.events.*;
import com.cloud.utils.component.ComponentContext;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.Map;
@Component
public class AlertGenerator {
@ -39,13 +40,11 @@ public class AlertGenerator {
private static final Logger s_logger = Logger.getLogger(AlertGenerator.class);
private static DataCenterDao _dcDao;
private static HostPodDao _podDao;
// get the event bus provider if configured
protected static EventBus _eventBus = null;
@Inject DataCenterDao dcDao;
@Inject HostPodDao podDao;
public AlertGenerator() {
}
@ -56,8 +55,10 @@ public class AlertGenerator {
}
public static void publishAlertOnEventBus(String alertType, long dataCenterId, Long podId, String subject, String body) {
if (_eventBus == null) {
return; // no provider is configured to provider events bus, so just return
try {
_eventBus = ComponentContext.getComponent(EventBus.class);
} catch(NoSuchBeanDefinitionException nbe) {
return; // no provider is configured to provide events bus, so just return
}
org.apache.cloudstack.framework.events.Event event =

View File

@ -23,17 +23,18 @@ import com.cloud.event.dao.UsageEventDao;
import com.cloud.server.ManagementServer;
import com.cloud.user.Account;
import com.cloud.user.dao.AccountDao;
import org.apache.cloudstack.framework.events.EventBus;
import com.cloud.utils.component.ComponentContext;
import org.apache.cloudstack.framework.events.Event;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.stereotype.Component;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.PostConstruct;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.Map;
@Component
public class UsageEventUtils {
@ -42,14 +43,12 @@ public class UsageEventUtils {
private static AccountDao _accountDao;
private static DataCenterDao _dcDao;
private static final Logger s_logger = Logger.getLogger(UsageEventUtils.class);
// get the event bus provider if configured
protected static EventBus _eventBus;
protected static EventBus _eventBus = null;
@Inject UsageEventDao usageEventDao;
@Inject AccountDao accountDao;
@Inject DataCenterDao dcDao;
public UsageEventUtils() {
}
@ -116,8 +115,10 @@ public class UsageEventUtils {
private static void publishUsageEvent(String usageEventType, Long accountId, Long zoneId, String resourceType, String resourceUUID) {
if (_eventBus == null) {
return; // no provider is configured to provider events bus, so just return
try {
_eventBus = ComponentContext.getComponent(EventBus.class);
} catch(NoSuchBeanDefinitionException nbe) {
return; // no provider is configured to provide events bus, so just return
}
Account account = _accountDao.findById(accountId);

View File

@ -128,6 +128,7 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
@Inject protected ClusterDao _clusterDao;
public HostDaoImpl() {
super();
}
@PostConstruct
@ -261,7 +262,11 @@ public class HostDaoImpl extends GenericDaoBase<HostVO, Long> implements HostDao
* UnmanagedDirectConnectSearch.and("lastPinged", UnmanagedDirectConnectSearch.entity().getLastPinged(),
* SearchCriteria.Op.LTEQ); UnmanagedDirectConnectSearch.cp(); UnmanagedDirectConnectSearch.cp();
*/
try {
HostTransferSearch = _hostTransferDao.createSearchBuilder();
} catch (Throwable e) {
s_logger.debug("error", e);
}
HostTransferSearch.and("id", HostTransferSearch.entity().getId(), SearchCriteria.Op.NULL);
UnmanagedDirectConnectSearch.join("hostTransferSearch", HostTransferSearch, HostTransferSearch.entity().getId(), UnmanagedDirectConnectSearch.entity().getId(), JoinType.LEFTOUTER);
ClusterManagedSearch = _clusterDao.createSearchBuilder();

View File

@ -1930,9 +1930,9 @@ public class NetworkModelImpl extends ManagerBase implements NetworkModel {
}
int cidrSize = NetUtils.getIp6CidrSize(ip6Cidr);
// Ipv6 cidr limit should be at least /64
if (cidrSize < 64) {
throw new InvalidParameterValueException("The cidr size of IPv6 network must be no less than 64 bits!");
// we only support cidr == 64
if (cidrSize != 64) {
throw new InvalidParameterValueException("The cidr size of IPv6 network must be 64 bits!");
}
}

View File

@ -23,24 +23,23 @@ import com.cloud.network.Network.Event;
import com.cloud.network.Network.State;
import com.cloud.network.dao.NetworkDao;
import com.cloud.server.ManagementServer;
import com.cloud.utils.component.ComponentContext;
import com.cloud.utils.fsm.StateListener;
import org.apache.cloudstack.framework.events.EventBus;
import org.apache.cloudstack.framework.events.EventBusException;
import org.apache.log4j.Logger;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.Map;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import javax.inject.Inject;
import java.util.HashMap;
import java.util.Map;
public class NetworkStateListener implements StateListener<State, Event, Network> {
@Inject protected UsageEventDao _usageEventDao;
@Inject protected NetworkDao _networkDao;
// get the event bus provider if configured
@Inject protected EventBus _eventBus;
protected static EventBus _eventBus = null;
private static final Logger s_logger = Logger.getLogger(NetworkStateListener.class);
@ -63,7 +62,9 @@ public class NetworkStateListener implements StateListener<State, Event, Network
private void pubishOnEventBus(String event, String status, Network vo, State oldState, State newState) {
if (_eventBus == null) {
try {
_eventBus = ComponentContext.getComponent(EventBus.class);
} catch(NoSuchBeanDefinitionException nbe) {
return; // no provider is configured to provide events bus, so just return
}

View File

@ -100,6 +100,7 @@ public interface ResourceManager extends ResourceService{
public List<HostVO> listHostsInClusterByStatus(long clusterId, Status status);
public List<HostVO> listAllUpAndEnabledHostsInOneZoneByType(Host.Type type, long dcId);
public List<HostVO> listAllUpAndEnabledHostsInOneZoneByHypervisor(HypervisorType type, long dcId);
public List<HostVO> listAllHostsInOneZoneByType(Host.Type type, long dcId);

Some files were not shown because too many files have changed in this diff Show More