Merge branch 'master' into CLOUDSTACK-9604

This commit is contained in:
Mowgli 2017-04-26 11:48:56 +05:30 committed by GitHub
commit c2c1f01d2e
219 changed files with 6361 additions and 5117 deletions

1
.gitignore vendored
View File

@ -51,7 +51,6 @@ tools/cli/build/
*.jar
*.war
*.mar
*.zip
*.iso
*.tar.gz
*.tgz

View File

@ -48,6 +48,7 @@ env:
smoke/test_list_ids_parameter
smoke/test_loadbalance
smoke/test_login
smoke/test_metrics_api
smoke/test_multipleips_per_nic
smoke/test_network
smoke/test_network_acl

View File

@ -154,7 +154,7 @@ All the rpm packages will be created in `dist/rpmbuild/RPMS/x86_64` directory.
## Notes
If you will be using Xen as your hypervisor, please download [vhd-util](http://download.cloud.com.s3.amazonaws.com/tools/vhd-util)
If you will be using Xen as your hypervisor, please download [vhd-util](http://download.cloudstack.org/tools/vhd-util)
If management server is installed on RHEL/CentOS, then copy vhd-util into:

View File

@ -31,6 +31,7 @@ public interface ServiceOffering extends DiskOffering, InfrastructureEntity, Int
public static final String routerDefaultOffUniqueName = "Cloud.Com-SoftwareRouter";
public static final String elbVmDefaultOffUniqueName = "Cloud.Com-ElasticLBVm";
public static final String internalLbVmDefaultOffUniqueName = "Cloud.Com-InternalLBVm";
// leaving cloud.com references as these are identifyers and no real world adresses (check against DB)
public enum StorageType {
local, shared

View File

@ -53,7 +53,7 @@ public interface ResourceService {
Host reconnectHost(ReconnectHostCmd cmd);
/**
* We will automatically create a cloud.com cluster to attach to the external cluster and return a hyper host to perform
* We will automatically create an Apache CloudStack cluster to attach to the external cluster and return a hyper host to perform
* host related operation within the cluster
*
* @param cmd

View File

@ -401,7 +401,7 @@ public interface ManagementService {
* @return Ternary<List<? extends Host>, List<? extends Host>, Map<Host, Boolean>> List of all Hosts to which a VM
* can be migrated, list of Hosts with enough capacity and hosts requiring storage motion for migration.
*/
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(Long vmId, Long startIndex, Long pageSize);
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> listHostsForMigrationOfVM(Long vmId, Long startIndex, Long pageSize, String keyword);
/**
* List storage pools for live migrating of a volume. The API returns list of all pools in the cluster to which the

View File

@ -143,6 +143,10 @@ public class Storage {
public boolean isShared() {
return shared;
}
public boolean supportsOverProvisioning() {
return this == StoragePoolType.NetworkFilesystem || this == StoragePoolType.VMFS || this == StoragePoolType.PreSetup;
}
}
public static List<StoragePoolType> getNonSharedStoragePoolTypes() {

View File

@ -125,18 +125,22 @@ public class ListClustersCmd extends BaseListCmd {
return s_name;
}
@Override
public void execute() {
protected List<ClusterResponse> getClusterResponses() {
Pair<List<? extends Cluster>, Integer> result = _mgr.searchForClusters(this);
ListResponse<ClusterResponse> response = new ListResponse<ClusterResponse>();
List<ClusterResponse> clusterResponses = new ArrayList<ClusterResponse>();
for (Cluster cluster : result.first()) {
ClusterResponse clusterResponse = _responseGenerator.createClusterResponse(cluster, showCapacities);
clusterResponse.setObjectName("cluster");
clusterResponses.add(clusterResponse);
}
return clusterResponses;
}
response.setResponses(clusterResponses, result.second());
@Override
public void execute() {
List<ClusterResponse> clusterResponses = getClusterResponses();
ListResponse<ClusterResponse> response = new ListResponse<ClusterResponse>();
response.setResponses(clusterResponses, clusterResponses.size());
response.setResponseName(getCommandName());
this.setResponseObject(response);
}

View File

@ -76,7 +76,7 @@ public class FindHostsForMigrationCmd extends BaseListCmd {
Map<Host, Boolean> hostsRequiringStorageMotion;
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigration =
_mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal());
_mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal(), this.getKeyword());
result = hostsForMigration.first();
List<? extends Host> hostsWithCapacity = hostsForMigration.second();
hostsRequiringStorageMotion = hostsForMigration.third();

View File

@ -132,6 +132,10 @@ public class ListHostsCmd extends BaseListCmd {
return state;
}
public void setType(String type) {
this.type = type;
}
public String getType() {
return type;
}
@ -198,19 +202,16 @@ public class ListHostsCmd extends BaseListCmd {
return ApiCommandJobType.Host;
}
@Override
public void execute() {
ListResponse<HostResponse> response = null;
protected ListResponse<HostResponse> getHostResponses() {
ListResponse<HostResponse> response = new ListResponse<>();
if (getVirtualMachineId() == null) {
response = _queryService.searchForServers(this);
} else {
Pair<List<? extends Host>, Integer> result;
Ternary<Pair<List<? extends Host>, Integer>, List<? extends Host>, Map<Host, Boolean>> hostsForMigration =
_mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal());
_mgr.listHostsForMigrationOfVM(getVirtualMachineId(), this.getStartIndex(), this.getPageSizeVal(), null);
result = hostsForMigration.first();
List<? extends Host> hostsWithCapacity = hostsForMigration.second();
response = new ListResponse<HostResponse>();
List<HostResponse> hostResponses = new ArrayList<HostResponse>();
for (Host host : result.first()) {
HostResponse hostResponse = _responseGenerator.createHostResponse(host, getDetails());
@ -222,9 +223,14 @@ public class ListHostsCmd extends BaseListCmd {
hostResponse.setObjectName("host");
hostResponses.add(hostResponse);
}
response.setResponses(hostResponses, result.second());
}
return response;
}
@Override
public void execute() {
ListResponse<HostResponse> response = getHostResponses();
response.setResponseName(getCommandName());
this.setResponseObject(response);
}

View File

@ -50,17 +50,13 @@ public class DedicateGuestVlanRangeCmd extends BaseCmd {
@Parameter(name = ApiConstants.VLAN_RANGE, type = CommandType.STRING, required = true, description = "guest vlan range to be dedicated")
private String vlan;
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, required = true, description = "account who will own the VLAN")
@Parameter(name = ApiConstants.ACCOUNT, type = CommandType.STRING, description = "account who will own the VLAN")
private String accountName;
@Parameter(name = ApiConstants.PROJECT_ID, type = CommandType.UUID, entityType = ProjectResponse.class, description = "project who will own the VLAN")
private Long projectId;
@Parameter(name = ApiConstants.DOMAIN_ID,
type = CommandType.UUID,
entityType = DomainResponse.class,
required = true,
description = "domain ID of the account owning a VLAN")
@Parameter(name = ApiConstants.DOMAIN_ID, type = CommandType.UUID, entityType = DomainResponse.class, description = "domain ID of the account owning a VLAN")
private Long domainId;
@Parameter(name = ApiConstants.PHYSICAL_NETWORK_ID,

View File

@ -75,6 +75,7 @@ public class AutoScaleVmProfileResponse extends BaseResponse implements Controll
@Parameter(name = ApiConstants.CS_URL,
type = CommandType.STRING,
description = "the API URL including port of the CloudStack Management Server example: http://server.cloud.com:8080/client/api?")
// leaving cloud.com reference above as it serves only as an example
private String csUrl;
@SerializedName(ApiConstants.ACCOUNT)

View File

@ -458,4 +458,163 @@ public class HostResponse extends BaseResponse {
}
public String getName() {
return name;
}
public Status getState() {
return state;
}
public Date getDisconnectedOn() {
return disconnectedOn;
}
public Host.Type getHostType() {
return hostType;
}
public String getOsCategoryId() {
return osCategoryId;
}
public String getOsCategoryName() {
return osCategoryName;
}
public String getIpAddress() {
return ipAddress;
}
public String getZoneId() {
return zoneId;
}
public String getZoneName() {
return zoneName;
}
public String getPodId() {
return podId;
}
public String getPodName() {
return podName;
}
public String getVersion() {
return version;
}
public HypervisorType getHypervisor() {
return hypervisor;
}
public Integer getCpuSockets() {
return cpuSockets;
}
public Integer getCpuNumber() {
return cpuNumber;
}
public Long getCpuSpeed() {
return cpuSpeed;
}
public String getCpuUsed() {
return cpuUsed;
}
public Long getAverageLoad() {
return averageLoad;
}
public Long getNetworkKbsRead() {
return networkKbsRead;
}
public Long getNetworkKbsWrite() {
return networkKbsWrite;
}
public Long getMemoryTotal() {
return memoryTotal;
}
public Long getMemoryAllocated() {
return memoryAllocated;
}
public Long getMemoryUsed() {
return memoryUsed;
}
public List<GpuResponse> getGpuGroup() {
return gpuGroup;
}
public Long getDiskSizeTotal() {
return diskSizeTotal;
}
public Long getDiskSizeAllocated() {
return diskSizeAllocated;
}
public String getCapabilities() {
return capabilities;
}
public Date getLastPinged() {
return lastPinged;
}
public Long getManagementServerId() {
return managementServerId;
}
public String getClusterId() {
return clusterId;
}
public String getClusterName() {
return clusterName;
}
public String getClusterType() {
return clusterType;
}
public Boolean getLocalStorageActive() {
return localStorageActive;
}
public Date getCreated() {
return created;
}
public Date getRemoved() {
return removed;
}
public String getEvents() {
return events;
}
public Boolean getHasEnoughCapacity() {
return hasEnoughCapacity;
}
public Boolean getSuitableForMigration() {
return suitableForMigration;
}
public String getHypervisorVersion() {
return hypervisorVersion;
}
public Boolean getHaHost() {
return haHost;
}
}

View File

@ -16,15 +16,14 @@
// under the License.
package org.apache.cloudstack.api.response;
import java.util.List;
import com.cloud.serializer.Param;
import com.cloud.vm.Nic;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponse;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.serializer.Param;
import com.cloud.vm.Nic;
import com.google.gson.annotations.SerializedName;
import java.util.List;
@SuppressWarnings("unused")
@EntityReference(value = Nic.class)
@ -221,4 +220,79 @@ public class NicResponse extends BaseResponse {
this.nsxLogicalSwitchPort = nsxLogicalSwitchPort;
}
public String getNetworkId() {
return networkId;
}
public String getNetworkName() {
return networkName;
}
public String getNetmask() {
return netmask;
}
public String getGateway() {
return gateway;
}
public String getIsolationUri() {
return isolationUri;
}
public String getBroadcastUri() {
return broadcastUri;
}
public String getTrafficType() {
return trafficType;
}
public String getType() {
return type;
}
public Boolean getDefault() {
return isDefault;
}
public String getMacAddress() {
return macAddress;
}
public String getIpaddress() {
return ipaddress;
}
public String getIp6Gateway() {
return ip6Gateway;
}
public String getIp6Cidr() {
return ip6Cidr;
}
public String getIp6Address() {
return ip6Address;
}
public List<NicSecondaryIpResponse> getSecondaryIps() {
return secondaryIps;
}
public String getDeviceId() {
return deviceId;
}
public String getVmId() {
return vmId;
}
public String getNsxLogicalSwitch() {
return nsxLogicalSwitch;
}
public String getNsxLogicalSwitchPort() {
return nsxLogicalSwitchPort;
}
}

View File

@ -309,4 +309,12 @@ public class StoragePoolResponse extends BaseResponse {
public void setOverProvisionFactor(String overProvisionFactor) {
this.overProvisionFactor = overProvisionFactor;
}
public String getOverProvisionFactor() {
return overProvisionFactor;
}
public Boolean getSuitableForMigration() {
return suitableForMigration;
}
}

View File

@ -813,4 +813,28 @@ public class UserVmResponse extends BaseResponseWithTagInformation implements Co
public void setOsTypeId(Long osTypeId) {
this.osTypeId = osTypeId;
}
public Set<Long> getTagIds() {
return tagIds;
}
public void setTagIds(Set<Long> tagIds) {
this.tagIds = tagIds;
}
public Map getDetails() {
return details;
}
public Boolean getDynamicallyScalable() {
return isDynamicallyScalable;
}
public void setDynamicallyScalable(Boolean dynamicallyScalable) {
isDynamicallyScalable = dynamicallyScalable;
}
public Long getOsTypeId() {
return osTypeId;
}
}

View File

@ -16,18 +16,17 @@
// under the License.
package org.apache.cloudstack.api.response;
import java.util.Date;
import java.util.LinkedHashSet;
import java.util.Set;
import com.cloud.serializer.Param;
import com.cloud.storage.Volume;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.BaseResponseWithTagInformation;
import org.apache.cloudstack.api.EntityReference;
import com.cloud.serializer.Param;
import com.cloud.storage.Volume;
import com.google.gson.annotations.SerializedName;
import java.util.Date;
import java.util.LinkedHashSet;
import java.util.Set;
@EntityReference(value = Volume.class)
@SuppressWarnings("unused")
@ -514,4 +513,140 @@ public class VolumeResponse extends BaseResponseWithTagInformation implements Co
public void setTags(Set<ResourceTagResponse> tags) {
this.tags = tags;
}
public String getName() {
return name;
}
public String getZoneId() {
return zoneId;
}
public String getZoneName() {
return zoneName;
}
public String getVolumeType() {
return volumeType;
}
public Long getDeviceId() {
return deviceId;
}
public String getVirtualMachineId() {
return virtualMachineId;
}
public String getVirtualMachineName() {
return virtualMachineName;
}
public String getVirtualMachineDisplayName() {
return virtualMachineDisplayName;
}
public String getVirtualMachineState() {
return virtualMachineState;
}
public String getProvisioningType() {
return provisioningType;
}
public Long getSize() {
return size;
}
public Long getMinIops() {
return minIops;
}
public Long getMaxIops() {
return maxIops;
}
public Date getCreated() {
return created;
}
public String getState() {
return state;
}
public String getAccountName() {
return accountName;
}
public String getProjectId() {
return projectId;
}
public String getProjectName() {
return projectName;
}
public String getDomainId() {
return domainId;
}
public String getDomainName() {
return domainName;
}
public String getStorageType() {
return storageType;
}
public String getHypervisor() {
return hypervisor;
}
public String getDiskOfferingId() {
return diskOfferingId;
}
public String getDiskOfferingName() {
return diskOfferingName;
}
public String getDiskOfferingDisplayText() {
return diskOfferingDisplayText;
}
public String getStoragePoolName() {
return storagePoolName;
}
public String getSnapshotId() {
return snapshotId;
}
public Date getAttached() {
return attached;
}
public String getServiceOfferingId() {
return serviceOfferingId;
}
public String getServiceOfferingName() {
return serviceOfferingName;
}
public String getServiceOfferingDisplayText() {
return serviceOfferingDisplayText;
}
public Boolean getExtractable() {
return extractable;
}
public String getStatus() {
return status;
}
public Boolean getDisplayVolume() {
return displayVolume;
}
}

View File

@ -239,4 +239,92 @@ public class ZoneResponse extends BaseResponse {
}
this.resourceDetails = new HashMap<>(details);
}
public String getId() {
return id;
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
public String getDns1() {
return dns1;
}
public String getDns2() {
return dns2;
}
public String getInternalDns1() {
return internalDns1;
}
public String getInternalDns2() {
return internalDns2;
}
public String getGuestCidrAddress() {
return guestCidrAddress;
}
public String getStatus() {
return status;
}
public String getDisplayText() {
return displayText;
}
public String getDomain() {
return domain;
}
public String getDomainId() {
return domainId;
}
public String getDomainName() {
return domainName;
}
public String getNetworkType() {
return networkType;
}
public boolean isSecurityGroupsEnabled() {
return securityGroupsEnabled;
}
public String getAllocationState() {
return allocationState;
}
public String getZoneToken() {
return zoneToken;
}
public String getDhcpProvider() {
return dhcpProvider;
}
public List<CapacityResponse> getCapacitites() {
return capacitites;
}
public boolean isLocalStorageEnabled() {
return localStorageEnabled;
}
public Set<ResourceTagResponse> getTags() {
return tags;
}
public Map<String, String> getResourceDetails() {
return resourceDetails;
}
}

View File

@ -108,6 +108,11 @@
<artifactId>cloud-plugin-user-authenticator-sha256salted</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-metrics</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-nvp</artifactId>
@ -143,11 +148,6 @@
<artifactId>cloud-plugin-network-bigswitch</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-midonet</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-ssp</artifactId>
@ -1002,6 +1002,21 @@
</dependency>
</dependencies>
</profile>
<profile>
<id>midonet</id>
<activation>
<property>
<name>midonet</name>
</property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-plugin-network-midonet</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
</profile>
<profile>
<id>srx</id>
<activation>

View File

@ -1,3 +1,4 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
@ -14,17 +15,29 @@
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.dao;
//
import java.util.List;
package com.cloud.agent.api;
import org.apache.cloudstack.api.response.StorageTagResponse;
import java.util.Map;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.utils.db.GenericDao;
public class SetHostParamsCommand extends Command {
public interface StorageTagDao extends GenericDao<StorageTagVO, Long> {
StorageTagResponse newStorageTagResponse(StorageTagVO storageTag);
Map<String, String> params;
List<StorageTagVO> searchByIds(Long... storageTagIds);
public SetHostParamsCommand(Map<String, String> params) {
this.params = params;
}
public Map<String, String> getParams() {
return params;
}
protected SetHostParamsCommand() {
}
@Override
public boolean executeInSequence() {
return true;
}
}

View File

@ -75,6 +75,7 @@ public class VirtualRoutingResource {
private int _retry;
private int _port;
private Duration _eachTimeout;
private Map<String, Object> _params;
private String _cfgVersion = "1.0";
@ -259,8 +260,18 @@ public class VirtualRoutingResource {
return new GetDomRVersionAnswer(cmd, result.getDetails(), lines[0], lines[1]);
}
public boolean configureHostParams(final Map<String, String> params) {
if (_params.get("router.aggregation.command.each.timeout") == null) {
String value = (String)params.get("router.aggregation.command.each.timeout");
_eachTimeout = Duration.standardSeconds(NumbersUtil.parseInt(value, 10));
}
return true;
}
public boolean configure(final String name, final Map<String, Object> params) throws ConfigurationException {
_name = name;
_params = params;
String value = (String)params.get("ssh.sleep");
_sleep = NumbersUtil.parseInt(value, 10) * 1000;

View File

@ -22,6 +22,7 @@ package com.cloud.network.resource;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLEncoder;
@ -53,6 +54,7 @@ import com.cloud.agent.api.StartupTrafficMonitorCommand;
import com.cloud.host.Host;
import com.cloud.resource.ServerResource;
import com.cloud.utils.exception.ExecutionException;
import java.net.HttpURLConnection;
public class TrafficSentinelResource implements ServerResource {
@ -205,14 +207,25 @@ public class TrafficSentinelResource implements ServerResource {
_exclZones = cmd.getExcludeZones();
}
BufferedReader in = null;
OutputStream os = null;
try {
//Query traffic Sentinel
trafficSentinel =
new URL(_url + "/inmsf/Query?script=" + URLEncoder.encode(getScript(cmd.getPublicIps(), cmd.getStart(), cmd.getEnd()), "UTF-8") +
"&authenticate=basic&resultFormat=txt");
//Query traffic Sentinel using POST method. 3 parts to the connection call and subsequent writing.
BufferedReader in = new BufferedReader(new InputStreamReader(trafficSentinel.openStream()));
//Part 1 - Connect to the URL of the traffic sentinel's instance.
trafficSentinel = new URL(_url + "/inmsf/Query");
String postData = "script="+URLEncoder.encode(getScript(cmd.getPublicIps(), cmd.getStart(), cmd.getEnd()), "UTF-8")+"&authenticate=basic&resultFormat=txt";
HttpURLConnection con = (HttpURLConnection) trafficSentinel.openConnection();
con.setRequestMethod("POST");
con.setRequestProperty("Content-Length", String.valueOf(postData.length()));
con.setDoOutput(true);
//Part 2 - Write Data
os = con.getOutputStream();
os.write(postData.getBytes("UTF-8"));
//Part 3 - Read response of the request
in = new BufferedReader(new InputStreamReader(con.getInputStream()));
String inputLine;
while ((inputLine = in.readLine()) != null) {
@ -228,13 +241,19 @@ public class TrafficSentinelResource implements ServerResource {
answer.put(publicIp, bytesSentAndReceived);
}
}
in.close();
} catch (MalformedURLException e1) {
s_logger.info("Invalid Traffic Sentinel URL", e1);
throw new ExecutionException(e1.getMessage());
} catch (IOException e) {
s_logger.debug("Error in direct network usage accounting", e);
throw new ExecutionException(e.getMessage());
} finally {
if (os != null) {
os.close();
}
if (in != null) {
in.close();
}
}
} catch (Exception e) {
s_logger.debug(e);

View File

@ -21,7 +21,8 @@
/etc/cloudstack/management/ehcache.xml
/etc/cloudstack/management/server-ssl.xml
/etc/cloudstack/management/server-nonssl.xml
/etc/cloudstack/management/server.xml
/etc/cloudstack/management/server7-ssl.xml
/etc/cloudstack/management/server7-nonssl.xml
/etc/cloudstack/management/classpath.conf
/etc/cloudstack/management/db.properties
/etc/cloudstack/management/tomcat6-ssl.conf
@ -33,6 +34,7 @@
/etc/cloudstack/management/log4j-cloud.xml
/etc/cloudstack/management/tomcat-users.xml
/etc/cloudstack/management/context.xml
/etc/default/cloudstack-management
/etc/init.d/cloudstack-management
/etc/security/limits.d/cloudstack-limits.conf
/etc/sudoers.d/cloudstack

View File

@ -60,7 +60,26 @@ if [ "$1" = configure ]; then
chmod 0640 ${CONFDIR}/${DBPROPS}
chgrp cloud ${CONFDIR}/${DBPROPS}
invoke-rc.d tomcat6 stop || true
if [ -d "/usr/share/tomcat7" ]; then
rm -f /usr/share/cloudstack-management/bin
rm -f /usr/share/cloudstack-management/lib
ln -s /usr/share/tomcat7/bin /usr/share/cloudstack-management/bin
ln -s /usr/share/tomcat7/lib /usr/share/cloudstack-management/lib
invoke-rc.d tomcat7 stop || true
if [ ! -f "/etc/cloudstack/management/server.xml" ]; then
ln -s /etc/cloudstack/management/server7-nonssl.xml /etc/cloudstack/management/server.xml
fi
elif [ -d "/usr/share/tomcat6" ]; then
rm -f /usr/share/cloudstack-management/bin
rm -f /usr/share/cloudstack-management/lib
ln -s /usr/share/tomcat6/bin /usr/share/cloudstack-management/bin
ln -s /usr/share/tomcat6/lib /usr/share/cloudstack-management/lib
invoke-rc.d tomcat6 stop || true
if [ ! -f "/etc/cloudstack/management/server.xml" ]; then
ln -s /etc/cloudstack/management/server-nonssl.xml /etc/cloudstack/management/server.xml
fi
fi
# Add jdbc MySQL driver settings to db.properties if not present
grep -s -q "db.cloud.driver=jdbc:mysql" ${CONFDIR}/${DBPROPS} || sed -i -e "\$adb.cloud.driver=jdbc:mysql" ${CONFDIR}/${DBPROPS}

10
debian/control vendored
View File

@ -3,7 +3,7 @@ Section: libs
Priority: extra
Maintainer: Wido den Hollander <wido@widodh.nl>
Build-Depends: debhelper (>= 9), openjdk-8-jdk | java8-sdk | java8-jdk, genisoimage,
python-mysql.connector, maven (>= 3) | maven3, python (>= 2.7), lsb-release, dh-systemd
python-mysql.connector, maven (>= 3) | maven3, python (>= 2.7), lsb-release, dh-systemd, python-setuptools
Standards-Version: 3.8.1
Homepage: http://www.cloudstack.org/
@ -15,15 +15,14 @@ Description: A common package which contains files which are shared by several C
Package: cloudstack-management
Architecture: all
Depends: ${misc:Depends}, ${python:Depends}, openjdk-8-jre-headless | java8-runtime-headless | java8-runtime, cloudstack-common (= ${source:Version}), tomcat6 | tomcat7, sudo, jsvc, python-mysql.connector, libmysql-java, augeas-tools, mysql-client, adduser, bzip2, ipmitool, lsb-release
Depends: ${python:Depends}, openjdk-8-jre-headless | java8-runtime-headless | java8-runtime | openjdk-7-jre-headless, cloudstack-common (= ${source:Version}), tomcat6 | tomcat7, sudo, jsvc, python-mysql.connector, libmysql-java, augeas-tools, mysql-client, adduser, bzip2, ipmitool, lsb-release, init-system-helpers (>= 1.14~)
Conflicts: cloud-server, cloud-client, cloud-client-ui
Description: CloudStack server library
The CloudStack management server
Package: cloudstack-agent
Architecture: all
Depends: ${python:Depends}, openjdk-8-jre-headless | java8-runtime-headless | java8-runtime, cloudstack-common (= ${source:Version}), lsb-base (>= 4.0), libcommons-daemon-java, openssh-client, qemu-kvm (>= 1.0), libvirt-bin (>= 0.9.8), uuid-runtime, iproute, ebtables, vlan, jsvc, ipset, python-libvirt, ethtool, iptables, lsb-release
Recommends: init-system-helpers
Depends: ${python:Depends}, openjdk-8-jre-headless | java8-runtime-headless | java8-runtime, cloudstack-common (= ${source:Version}), lsb-base (>= 4.0), libcommons-daemon-java, openssh-client, qemu-kvm (>= 1.0), libvirt-bin (>= 0.9.8), uuid-runtime, iproute, ebtables, vlan, jsvc, ipset, python-libvirt, ethtool, iptables, lsb-release, init-system-helpers (>= 1.14~)
Conflicts: cloud-agent, cloud-agent-libs, cloud-agent-deps, cloud-agent-scripts
Description: CloudStack agent
The CloudStack agent is in charge of managing shared computing resources in
@ -32,8 +31,7 @@ Description: CloudStack agent
Package: cloudstack-usage
Architecture: all
Depends: openjdk-8-jre-headless | java8-runtime-headless | java8-runtime, cloudstack-common (= ${source:Version}), jsvc, libmysql-java
Recommends: init-system-helpers
Depends: openjdk-8-jre-headless | java8-runtime-headless | java8-runtime, cloudstack-common (= ${source:Version}), jsvc, libmysql-java, init-system-helpers (>= 1.14~)
Description: CloudStack usage monitor
The CloudStack usage monitor provides usage accounting across the entire cloud for
cloud operators to charge based on usage parameters.

9
debian/rules vendored
View File

@ -85,19 +85,20 @@ override_dh_auto_install:
chmod 0440 $(DESTDIR)/$(SYSCONFDIR)/sudoers.d/$(PACKAGE)
ln -s tomcat6-nonssl.conf $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/tomcat6.conf
ln -s server-nonssl.xml $(DESTDIR)/$(SYSCONFDIR)/$(PACKAGE)/management/server.xml
install -D packaging/debian/init/cloud-management $(DESTDIR)/$(SYSCONFDIR)/init.d/$(PACKAGE)-management
install -D client/target/utilities/bin/cloud-update-xenserver-licenses $(DESTDIR)/usr/bin/cloudstack-update-xenserver-licenses
ln -s /usr/share/tomcat6/bin $(DESTDIR)/usr/share/$(PACKAGE)-management/bin
# Remove configuration in /ur/share/cloudstack-management/webapps/client/WEB-INF
# This should all be in /etc/cloudstack/management
rm $(DESTDIR)/usr/share/$(PACKAGE)-management/webapps/client/WEB-INF/classes/*.*
ln -s ../../..$(SYSCONFDIR)/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/conf
ln -s ../../../usr/share/tomcat6/lib $(DESTDIR)/usr/share/$(PACKAGE)-management/lib
ln -s ../../../var/log/$(PACKAGE)/management $(DESTDIR)/usr/share/$(PACKAGE)-management/logs
ln -s ../../../var/cache/$(PACKAGE)/management/temp $(DESTDIR)/usr/share/$(PACKAGE)-management/temp
ln -s ../../../var/cache/$(PACKAGE)/management/work $(DESTDIR)/usr/share/$(PACKAGE)-management/work
install -d -m0755 debian/$(PACKAGE)-management/lib/systemd/system
install -m0644 packaging/systemd/$(PACKAGE)-management.service.ubuntu debian/$(PACKAGE)-management/lib/systemd/system/$(PACKAGE)-management.service
install -m0644 packaging/systemd/$(PACKAGE)-management.default.ubuntu $(DESTDIR)/$(SYSCONFDIR)/default/$(PACKAGE)-management
# cloudstack-common
mkdir -p $(DESTDIR)/usr/share/$(PACKAGE)-common
mkdir $(DESTDIR)/usr/share/$(PACKAGE)-common/scripts
@ -149,7 +150,7 @@ override_dh_installinit:
dh_installinit -pcloudstack-management -pcloudstack-agent -pcloudstack-usage --onlyscripts --no-start
override_dh_systemd_enable:
dh_systemd_enable -pcloudstack-agent -pcloudstack-usage
dh_systemd_enable -pcloudstack-management -pcloudstack-agent -pcloudstack-usage
override_dh_installdocs:
dh_installdocs -A tools/whisker/LICENSE tools/whisker/NOTICE INSTALL.md

View File

@ -43,6 +43,9 @@ public interface IpAddressManager {
"If true, when account has dedicated public ip range(s), once the ips dedicated to the account have been consumed ips will be acquired from the system pool",
true, ConfigKey.Scope.Account);
static final ConfigKey<Boolean> RulesContinueOnError = new ConfigKey<Boolean>("Advanced", Boolean.class, "network.rule.delete.ignoreerror", "true",
"When true, ip address delete (ipassoc) failures are ignored", true);
/**
* Assigns a new public ip address.
*

View File

@ -61,6 +61,7 @@ import com.cloud.agent.api.PingCommand;
import com.cloud.agent.api.PingRoutingCommand;
import com.cloud.agent.api.ReadyAnswer;
import com.cloud.agent.api.ReadyCommand;
import com.cloud.agent.api.SetHostParamsCommand;
import com.cloud.agent.api.ShutdownCommand;
import com.cloud.agent.api.StartupAnswer;
import com.cloud.agent.api.StartupCommand;
@ -214,6 +215,8 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
registerForHostEvents(new BehindOnPingListener(), true, true, false);
registerForHostEvents(new SetHostParamsListener(), true, true, false);
_executor = new ThreadPoolExecutor(threads, threads, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentTaskPool"));
_connectExecutor = new ThreadPoolExecutor(100, 500, 60l, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new NamedThreadFactory("AgentConnectTaskPool"));
@ -1710,4 +1713,73 @@ public class AgentManagerImpl extends ManagerBase implements AgentManager, Handl
DirectAgentThreadCap };
}
protected class SetHostParamsListener implements Listener {
@Override
public boolean isRecurring() {
return false;
}
@Override
public boolean processAnswers(final long agentId, final long seq, final Answer[] answers) {
return false;
}
@Override
public boolean processCommands(final long agentId, final long seq, final Command[] commands) {
return false;
}
@Override
public AgentControlAnswer processControlCommand(final long agentId, final AgentControlCommand cmd) {
return null;
}
@Override
public void processHostAdded(long hostId) {
}
@Override
public void processConnect(final Host host, final StartupCommand cmd, final boolean forRebalance) {
if (cmd instanceof StartupRoutingCommand) {
if (((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.KVM || ((StartupRoutingCommand)cmd).getHypervisorType() == HypervisorType.LXC) {
Map<String, String> params = new HashMap<String, String>();
params.put("router.aggregation.command.each.timeout", _configDao.getValue("router.aggregation.command.each.timeout"));
try {
SetHostParamsCommand cmds = new SetHostParamsCommand(params);
Commands c = new Commands(cmds);
send(host.getId(), c, this);
} catch (AgentUnavailableException e) {
s_logger.debug("Failed to send host params on host: " + host.getId());
}
}
}
}
@Override
public boolean processDisconnect(final long agentId, final Status state) {
return true;
}
@Override
public void processHostAboutToBeRemoved(long hostId) {
}
@Override
public void processHostRemoved(long hostId, long clusterId) {
}
@Override
public boolean processTimeout(final long agentId, final long seq) {
return false;
}
@Override
public int getTimeout() {
return -1;
}
}
}

View File

@ -747,14 +747,17 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
protected <T extends VMInstanceVO> boolean changeState(final T vm, final Event event, final Long hostId, final ItWorkVO work, final Step step) throws NoTransitionException {
// FIXME: We should do this better.
final Step previousStep = work.getStep();
_workDao.updateStep(work, step);
Step previousStep = null;
if (work != null) {
previousStep = work.getStep();
_workDao.updateStep(work, step);
}
boolean result = false;
try {
result = stateTransitTo(vm, event, hostId);
return result;
} finally {
if (!result) {
if (!result && work != null) {
_workDao.updateStep(work, previousStep);
}
}
@ -1517,12 +1520,13 @@ public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMac
if (doCleanup) {
if (cleanup(vmGuru, new VirtualMachineProfileImpl(vm), work, Event.StopRequested, cleanUpEvenIfUnableToStop)) {
try {
if (s_logger.isDebugEnabled()) {
if (s_logger.isDebugEnabled() && work != null) {
s_logger.debug("Updating work item to Done, id:" + work.getId());
}
if (!changeState(vm, Event.AgentReportStopped, null, work, Step.Done)) {
throw new CloudRuntimeException("Unable to stop " + vm);
}
} catch (final NoTransitionException e) {
s_logger.warn("Unable to cleanup " + vm);
throw new CloudRuntimeException("Unable to stop " + vm, e);

View File

@ -267,7 +267,6 @@
<bean id="storagePoolDetailsDaoImpl" class="com.cloud.storage.dao.StoragePoolDetailsDaoImpl" />
<bean id="storagePoolJoinDaoImpl" class="com.cloud.api.query.dao.StoragePoolJoinDaoImpl" />
<bean id="storagePoolTagsDaoImpl" class="com.cloud.storage.dao.StoragePoolTagsDaoImpl" />
<bean id="storageTagDaoImpl" class="com.cloud.api.query.dao.StorageTagDaoImpl" />
<bean id="hostTagDaoImpl" class="com.cloud.api.query.dao.HostTagDaoImpl" />
<bean id="storagePoolWorkDaoImpl" class="com.cloud.storage.dao.StoragePoolWorkDaoImpl" />
<bean id="templatePrimaryDataStoreDaoImpl" class="org.apache.cloudstack.storage.volume.db.TemplatePrimaryDataStoreDaoImpl" />

View File

@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import javax.inject.Inject;
@ -356,11 +355,11 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
switch (level) {
case 1: // List all the capacities grouped by zone, capacity Type
finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2);
finalQuery.append(LIST_CAPACITY_GROUP_BY_ZONE_TYPE_PART2);
break;
case 2: // List all the capacities grouped by pod, capacity Type
finalQuery.append(LIST_CAPACITY_GROUP_BY_CLUSTER_TYPE_PART2);
finalQuery.append(LIST_CAPACITY_GROUP_BY_POD_TYPE_PART2);
break;
case 3: // List all the capacities grouped by cluster, capacity Type
@ -392,22 +391,7 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
results.add(summedCapacity);
}
HashMap<Integer, SummedCapacity> capacityMap = new HashMap<Integer, SummedCapacity>();
for (SummedCapacity result: results) {
if (capacityMap.containsKey(result.getCapacityType().intValue())) {
SummedCapacity tempCapacity = capacityMap.get(result.getCapacityType().intValue());
tempCapacity.setUsedCapacity(tempCapacity.getUsedCapacity()+result.getUsedCapacity());
tempCapacity.setReservedCapacity(tempCapacity.getReservedCapacity()+result.getReservedCapacity());
tempCapacity.setSumTotal(tempCapacity.getTotalCapacity()+result.getTotalCapacity());
}else {
capacityMap.put(result.getCapacityType().intValue(),result);
}
}
List<SummedCapacity> summedCapacityList = new ArrayList<SummedCapacity>();
for (Entry<Integer, SummedCapacity> entry : capacityMap.entrySet()) {
summedCapacityList.add(entry.getValue());
}
return summedCapacityList;
return results;
} catch (SQLException e) {
throw new CloudRuntimeException("DB Exception on: " + finalQuery, e);
} catch (Throwable e) {

View File

@ -16,13 +16,13 @@
// under the License.
package com.cloud.dc.dao;
import java.util.List;
import java.util.Map;
import com.cloud.dc.ClusterVO;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.utils.db.GenericDao;
import java.util.List;
import java.util.Map;
public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<ClusterVO> listByPodId(long podId);
@ -44,7 +44,7 @@ public interface ClusterDao extends GenericDao<ClusterVO, Long> {
List<ClusterVO> listClustersByDcId(long zoneId);
List<Long> listAllCusters(long zoneId);
List<Long> listAllClusters(Long zoneId);
boolean getSupportsResigning(long clusterId);
}

View File

@ -16,18 +16,6 @@
// under the License.
package com.cloud.dc.dao;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import org.springframework.stereotype.Component;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
@ -43,6 +31,16 @@ import com.cloud.utils.db.SearchCriteria.Func;
import com.cloud.utils.db.SearchCriteria.Op;
import com.cloud.utils.db.TransactionLegacy;
import com.cloud.utils.exception.CloudRuntimeException;
import org.springframework.stereotype.Component;
import javax.inject.Inject;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Component
public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements ClusterDao {
@ -259,9 +257,11 @@ public class ClusterDaoImpl extends GenericDaoBase<ClusterVO, Long> implements C
}
@Override
public List<Long> listAllCusters(long zoneId) {
public List<Long> listAllClusters(Long zoneId) {
SearchCriteria<Long> sc = ClusterIdSearch.create();
sc.setParameters("dataCenterId", zoneId);
if (zoneId != null) {
sc.setParameters("dataCenterId", zoneId);
}
return customSearch(sc, null);
}

View File

@ -31,5 +31,5 @@ public interface HostPodDao extends GenericDao<HostPodVO, Long> {
public List<Long> listDisabledPods(long zoneId);
public List<Long> listAllPods(long zoneId);
public List<Long> listAllPods(Long zoneId);
}

View File

@ -130,9 +130,11 @@ public class HostPodDaoImpl extends GenericDaoBase<HostPodVO, Long> implements H
}
@Override
public List<Long> listAllPods(long zoneId) {
public List<Long> listAllPods(Long zoneId) {
SearchCriteria<Long> sc = PodIdSearch.create();
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
if (zoneId != null) {
sc.addAnd("dataCenterId", SearchCriteria.Op.EQ, zoneId);
}
return customSearch(sc, null);
}
}

View File

@ -18,6 +18,8 @@ package com.cloud.storage.dao;
import java.util.List;
import org.apache.cloudstack.api.response.StorageTagResponse;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.GenericDao;
@ -26,5 +28,7 @@ public interface StoragePoolTagsDao extends GenericDao<StoragePoolTagVO, Long> {
void persist(long poolId, List<String> storagePoolTags);
List<String> getStoragePoolTags(long poolId);
void deleteTags(long poolId);
List<StoragePoolTagVO> searchByIds(Long... stIds);
StorageTagResponse newStorageTagResponse(StoragePoolTagVO tag);
}

View File

@ -18,6 +18,12 @@ package com.cloud.storage.dao;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
@ -26,12 +32,21 @@ import com.cloud.utils.db.TransactionLegacy;
public class StoragePoolTagsDaoImpl extends GenericDaoBase<StoragePoolTagVO, Long> implements StoragePoolTagsDao {
@Inject
private ConfigurationDao _configDao;
protected final SearchBuilder<StoragePoolTagVO> StoragePoolSearch;
private final SearchBuilder<StoragePoolTagVO> StoragePoolIdsSearch;
private static final int DEFAULT_BATCH_QUERY_SIZE = 2000;
public StoragePoolTagsDaoImpl() {
StoragePoolSearch = createSearchBuilder();
StoragePoolSearch.and("poolId", StoragePoolSearch.entity().getPoolId(), SearchCriteria.Op.EQ);
StoragePoolSearch.done();
StoragePoolIdsSearch = createSearchBuilder();
StoragePoolIdsSearch.and("idIN", StoragePoolIdsSearch.entity().getId(), SearchCriteria.Op.IN);
StoragePoolIdsSearch.done();
}
@Override
@ -77,4 +92,69 @@ public class StoragePoolTagsDaoImpl extends GenericDaoBase<StoragePoolTagVO, Lon
txn.commit();
}
@Override
public List<StoragePoolTagVO> searchByIds(Long... stIds) {
final int detailsBatchSize = getDetailsBatchSize();
// query details by batches
List<StoragePoolTagVO> uvList = new ArrayList<StoragePoolTagVO>();
int curr_index = 0;
while ((curr_index + detailsBatchSize) <= stIds.length) {
searchForStoragePoolIdsInternal(curr_index, detailsBatchSize, stIds, uvList);
curr_index += detailsBatchSize;
}
if (curr_index < stIds.length) {
int batch_size = (stIds.length - curr_index);
searchForStoragePoolIdsInternal(curr_index, batch_size, stIds, uvList);
}
return uvList;
}
/**
* Search for storage pools based on their IDs.
* The search is executed in batch, this means that we will load a batch of size {@link StoragePoolTagsDaoImpl#getDetailsBatchSize()}
* {@link StoragePoolTagVO} at each time.
* The loaded storage pools are added in the pools parameter.
* @param currIndex current index
* @param batchSize batch size
* @param stIds storage tags array
* @param pools list in which storage pools are added
*/
protected void searchForStoragePoolIdsInternal(int currIndex, int batchSize, Long[] stIds, List<StoragePoolTagVO> pools) {
Long[] ids = new Long[batchSize];
for (int k = 0, j = currIndex; j < currIndex + batchSize; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StoragePoolTagVO> sc = StoragePoolIdsSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StoragePoolTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
pools.addAll(vms);
}
}
/**
* Retrieve {@code detail.batch.query.size} configuration value. If not available, return default value {@link StoragePoolTagsDaoImpl#DEFAULT_BATCH_QUERY_SIZE}
* @return detail.batch.query.size configuration value
*/
protected int getDetailsBatchSize() {
String batchCfg = _configDao.getValue("detail.batch.query.size");
return batchCfg != null ? Integer.parseInt(batchCfg) : DEFAULT_BATCH_QUERY_SIZE;
}
@Override
public StorageTagResponse newStorageTagResponse(StoragePoolTagVO tag) {
StorageTagResponse tagResponse = new StorageTagResponse();
tagResponse.setName(tag.getTag());
tagResponse.setPoolId(tag.getPoolId());
tagResponse.setObjectName("storagetag");
return tagResponse;
}
}

View File

@ -84,6 +84,12 @@ public class Upgrade4920to41000 implements DbUpgrade {
case Hyperv:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Hyperv);
break;
case LXC:
hypervisorsListInUse.add(Hypervisor.HypervisorType.LXC);
break;
case Ovm3:
hypervisorsListInUse.add(Hypervisor.HypervisorType.Ovm3);
break;
default: // no action on cases Any, BareMetal, None, Ovm,
// Parralels, Simulator and VirtualBox:
break;
@ -100,6 +106,8 @@ public class Upgrade4920to41000 implements DbUpgrade {
put(Hypervisor.HypervisorType.VMware, "systemvm-vmware-4.10");
put(Hypervisor.HypervisorType.KVM, "systemvm-kvm-4.10");
put(Hypervisor.HypervisorType.Hyperv, "systemvm-hyperv-4.10");
put(Hypervisor.HypervisorType.LXC, "systemvm-lxc-4.10");
put(Hypervisor.HypervisorType.Ovm3, "systemvm-ovm3-4.10");
}
};
@ -109,6 +117,8 @@ public class Upgrade4920to41000 implements DbUpgrade {
put(Hypervisor.HypervisorType.VMware, "router.template.vmware");
put(Hypervisor.HypervisorType.KVM, "router.template.kvm");
put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv");
put(Hypervisor.HypervisorType.LXC, "router.template.lxc");
put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3");
}
};
@ -118,6 +128,8 @@ public class Upgrade4920to41000 implements DbUpgrade {
put(Hypervisor.HypervisorType.VMware, "https://download.cloudstack.org/systemvm/4.10/systemvm64template-master-4.10.0-vmware.ova");
put(Hypervisor.HypervisorType.KVM, "https://download.cloudstack.org/systemvm/4.10/systemvm64template-master-4.10.0-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.Hyperv, "https://download.cloudstack.org/systemvm/4.10/systemvm64template-master-4.10.0-hyperv.vhd.zip");
put(Hypervisor.HypervisorType.LXC, "https://download.cloudstack.org/systemvm/4.10/systemvm64template-master-4.10.0-kvm.qcow2.bz2");
put(Hypervisor.HypervisorType.Ovm3, "https://download.cloudstack.org/systemvm/4.10/systemvm64template-master-4.10.0-ovm.raw.bz2");
}
};
@ -127,6 +139,8 @@ public class Upgrade4920to41000 implements DbUpgrade {
put(Hypervisor.HypervisorType.VMware, "970bfb070a80bd74820881d8149643c1");
put(Hypervisor.HypervisorType.KVM, "bc2eac46f16a2ece6c19d4b89db41de3");
put(Hypervisor.HypervisorType.Hyperv, "0adb35bd9f92e80d3fc63fcdd9bb55e5");
put(Hypervisor.HypervisorType.LXC, "bc2eac46f16a2ece6c19d4b89db41de3");
put(Hypervisor.HypervisorType.Ovm3, "94a41f0a5361933813bb34a51df56f56");
}
};
@ -183,7 +197,7 @@ public class Upgrade4920to41000 implements DbUpgrade {
// Change value of global configuration parameter
// minreq.sysvmtemplate.version for the ACS version
try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = ?");) {
update_pstmt.setString(1, getUpgradedVersion());
update_pstmt.setString(1, "4.10.0");
update_pstmt.setString(2, "minreq.sysvmtemplate.version");
update_pstmt.executeUpdate();
} catch (final SQLException e) {

View File

@ -53,7 +53,7 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
*/
void updateCapacityIops(long id, long capacityIops);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details);
StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags);
/**
* Find pool by name.
@ -100,7 +100,7 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
Map<String, String> getDetails(long poolId);
List<String> searchForStoragePoolDetails(long poolId, String value);
List<String> searchForStoragePoolTags(long poolId);
List<StoragePoolVO> findIfDuplicatePoolsExistByUUID(String uuid);
@ -121,4 +121,6 @@ public interface PrimaryDataStoreDao extends GenericDao<StoragePoolVO, Long> {
List<StoragePoolVO> findLocalStoragePoolsByHostAndTags(long hostId, String[] tags);
List<StoragePoolVO> listLocalStoragePoolByPath(long datacenterId, String path);
void deletePoolTags(long poolId);
}

View File

@ -20,19 +20,22 @@ import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.inject.Inject;
import javax.naming.ConfigurationException;
import org.apache.commons.collections.CollectionUtils;
import com.cloud.host.Status;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.storage.ScopeType;
import com.cloud.storage.StoragePoolHostVO;
import com.cloud.storage.StoragePoolStatus;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.utils.db.DB;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.GenericSearchBuilder;
@ -58,15 +61,28 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
protected StoragePoolDetailsDao _detailsDao;
@Inject
protected StoragePoolHostDao _hostDao;
@Inject
protected StoragePoolTagsDao _tagsDao;
private final String DetailsSqlPrefix =
protected final String DetailsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
private final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
private final String ZoneWideDetailsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_details ON storage_pool.id = storage_pool_details.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
private final String ZoneWideDetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String DetailsSqlSuffix = ") GROUP BY storage_pool_details.pool_id HAVING COUNT(storage_pool_details.name) >= ?";
protected final String ZoneWideTagsSqlPrefix =
"SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and storage_pool.scope = ? and (";
protected final String ZoneWideTagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
private final String FindPoolTagDetails = "SELECT storage_pool_details.name FROM storage_pool_details WHERE pool_id = ? and value = ?";
// Storage tags are now separate from storage_pool_details, leaving only details on that table
protected final String TagsSqlPrefix = "SELECT storage_pool.* from storage_pool LEFT JOIN storage_pool_tags ON storage_pool.id = storage_pool_tags.pool_id WHERE storage_pool.removed is null and storage_pool.status = 'Up' and storage_pool.data_center_id = ? and (storage_pool.pod_id = ? or storage_pool.pod_id is null) and storage_pool.scope = ? and (";
protected final String TagsSqlSuffix = ") GROUP BY storage_pool_tags.pool_id HAVING COUNT(storage_pool_tags.tag) >= ?";
protected final String FindPoolTags = "SELECT storage_pool_tags.tag FROM storage_pool_tags WHERE pool_id = ?";
/**
* Used in method findPoolsByDetailsOrTagsInternal
*/
protected enum ValueType {
DETAILS, TAGS;
}
public PrimaryDataStoreDaoImpl() {
AllFieldSearch = createSearchBuilder();
@ -256,7 +272,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
@Override
@DB
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details) {
public StoragePoolVO persist(StoragePoolVO pool, Map<String, String> details, List<String> tags) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
txn.start();
pool = super.persist(pool);
@ -266,57 +282,132 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
_detailsDao.persist(vo);
}
}
if (CollectionUtils.isNotEmpty(tags)) {
_tagsDao.persist(pool.getId(), tags);
}
txn.commit();
return pool;
}
/**
* Internal helper method to retrieve storage pools by given details or storage tags.
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope score
* @param sqlValues sql string containing details or storage tags values required to query
* @param valuesType enumerate to indicate if values are related to details or storage tags
* @param valuesLength values length
* @return list of storage pools matching conditions
*/
protected List<StoragePoolVO> findPoolsByDetailsOrTagsInternal(long dcId, long podId, Long clusterId, ScopeType scope, String sqlValues, ValueType valuesType, int valuesLength) {
String sqlPrefix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlPrefix : TagsSqlPrefix;
String sqlSuffix = valuesType.equals(ValueType.DETAILS) ? DetailsSqlSuffix : TagsSqlSuffix;
String sql = getSqlPreparedStatement(sqlPrefix, sqlSuffix, sqlValues, clusterId);
return searchStoragePoolsPreparedStatement(sql, dcId, podId, clusterId, scope, valuesLength);
}
/**
* Search storage pools in a transaction
* @param sql prepared statement sql
* @param dcId data center id
* @param podId pod id
* @param clusterId cluster id
* @param scope scope
* @param valuesLength values length
* @return storage pools matching criteria
*/
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
StringBuilder sql = new StringBuilder(DetailsSqlPrefix);
protected List<StoragePoolVO> searchStoragePoolsPreparedStatement(String sql, long dcId, Long podId, Long clusterId, ScopeType scope, int valuesLength) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
try (PreparedStatement pstmt = txn.prepareStatement(sql);){
if (pstmt != null) {
int i = 1;
pstmt.setLong(i++, dcId);
if (podId != null) {
pstmt.setLong(i++, podId);
}
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, valuesLength);
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
}
/**
* Return SQL prepared statement given prefix, values and suffix
* @param sqlPrefix prefix
* @param sqlSuffix suffix
* @param sqlValues tags or details values
* @param clusterId cluster id
* @return sql prepared statement
*/
protected String getSqlPreparedStatement(String sqlPrefix, String sqlSuffix, String sqlValues, Long clusterId) {
StringBuilder sql = new StringBuilder(sqlPrefix);
if (clusterId != null) {
sql.append("storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (");
}
sql.append(sqlValues);
sql.append(sqlSuffix);
return sql.toString();
}
/**
* Return SQL string from details, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param details storage pool details
* @return SQL string containing storage tag values to be Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if details is null
* @throws IndexOutOfBoundsException if details is not null, but empty
*/
protected String getSqlValuesFromDetails(Map<String, String> details) {
StringBuilder sqlValues = new StringBuilder();
for (Map.Entry<String, String> detail : details.entrySet()) {
sql.append("((storage_pool_details.name='")
sqlValues.append("((storage_pool_details.name='")
.append(detail.getKey())
.append("') AND (storage_pool_details.value='")
.append(detail.getValue())
.append("')) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(DetailsSqlSuffix);
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());){
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setLong(i++, podId);
pstmt.setString(i++, scope.toString());
if (clusterId != null) {
pstmt.setLong(i++, clusterId);
}
pstmt.setInt(i++, details.size());
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to execute :" + e.getMessage(), e);
}
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
protected Map<String, String> tagsToDetails(String[] tags) {
Map<String, String> details = new HashMap<String, String>(tags.length);
/**
* Return SQL string from storage tags, to be placed between SQL Prefix and SQL Suffix when creating storage tags PreparedStatement.
* @param tags storage tags array
* @return SQL string containing storage tag values to be placed between Prefix and Suffix when creating PreparedStatement.
* @throws NullPointerException if tags is null
* @throws IndexOutOfBoundsException if tags is not null, but empty
*/
protected String getSqlValuesFromStorageTags(String[] tags) throws NullPointerException, IndexOutOfBoundsException {
StringBuilder sqlValues = new StringBuilder();
for (String tag : tags) {
details.put(tag, "true");
sqlValues.append("(storage_pool_tags.tag='")
.append(tag)
.append("') OR ");
}
return details;
sqlValues.delete(sqlValues.length() - 4, sqlValues.length());
return sqlValues.toString();
}
@DB
@Override
public List<StoragePoolVO> findPoolsByDetails(long dcId, long podId, Long clusterId, Map<String, String> details, ScopeType scope) {
String sqlValues = getSqlValuesFromDetails(details);
return findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, scope, sqlValues, ValueType.DETAILS, details.size());
}
@Override
@ -325,8 +416,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.CLUSTER);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.CLUSTER);
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.CLUSTER, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
@ -358,8 +449,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
if (tags == null || tags.length == 0) {
storagePools = listBy(dcId, podId, clusterId, ScopeType.HOST);
} else {
Map<String, String> details = tagsToDetails(tags);
storagePools = findPoolsByDetails(dcId, podId, clusterId, details, ScopeType.HOST);
String sqlValues = getSqlValuesFromStorageTags(tags);
storagePools = findPoolsByDetailsOrTagsInternal(dcId, podId, clusterId, ScopeType.HOST, sqlValues, ValueType.TAGS, tags.length);
}
return storagePools;
@ -369,7 +460,7 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
public List<StoragePoolVO> findLocalStoragePoolsByHostAndTags(long hostId, String[] tags) {
SearchBuilder<StoragePoolVO> hostSearch = createSearchBuilder();
SearchBuilder<StoragePoolHostVO> hostPoolSearch = _hostDao.createSearchBuilder();
SearchBuilder<StoragePoolDetailVO> tagPoolSearch = _detailsDao.createSearchBuilder();;
SearchBuilder<StoragePoolTagVO> tagPoolSearch = _tagsDao.createSearchBuilder();;
// Search for pools on the host
hostPoolSearch.and("hostId", hostPoolSearch.entity().getHostId(), Op.EQ);
@ -380,9 +471,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
hostSearch.join("hostJoin", hostPoolSearch, hostSearch.entity().getId(), hostPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
if (!(tags == null || tags.length == 0 )) {
tagPoolSearch.and("name", tagPoolSearch.entity().getName(), Op.EQ);
tagPoolSearch.and("value", tagPoolSearch.entity().getValue(), Op.EQ);
hostSearch.join("tagJoin", tagPoolSearch, hostSearch.entity().getId(), tagPoolSearch.entity().getResourceId(), JoinBuilder.JoinType.INNER);
tagPoolSearch.and("tag", tagPoolSearch.entity().getTag(), Op.EQ);
hostSearch.join("tagJoin", tagPoolSearch, hostSearch.entity().getId(), tagPoolSearch.entity().getPoolId(), JoinBuilder.JoinType.INNER);
}
SearchCriteria<StoragePoolVO> sc = hostSearch.create();
@ -391,10 +481,8 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.setParameters("status", Status.Up.toString());
if (!(tags == null || tags.length == 0 )) {
Map<String, String> details = tagsToDetails(tags);
for (Map.Entry<String, String> detail : details.entrySet()) {
sc.setJoinParameters("tagJoin","name", detail.getKey());
sc.setJoinParameters("tagJoin", "value", detail.getValue());
for (String tag : tags) {
sc.setJoinParameters("tagJoin", "tag", tag);
}
}
return listBy(sc);
@ -409,68 +497,15 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.and(sc.entity().getScope(), Op.EQ, ScopeType.ZONE);
return sc.list();
} else {
Map<String, String> details = tagsToDetails(tags);
StringBuilder sql = new StringBuilder(ZoneWideDetailsSqlPrefix);
for (int i=0;i<details.size();i++){
sql.append("((storage_pool_details.name=?) AND (storage_pool_details.value=?)) OR ");
}
sql.delete(sql.length() - 4, sql.length());
sql.append(ZoneWideDetailsSqlSuffix);
TransactionLegacy txn = TransactionLegacy.currentTxn();
try (PreparedStatement pstmt = txn.prepareStatement(sql.toString());){
List<StoragePoolVO> pools = new ArrayList<StoragePoolVO>();
if (pstmt != null) {
int i = 1;
pstmt.setLong(i++, dcId);
pstmt.setString(i++, ScopeType.ZONE.toString());
for (Map.Entry<String, String> detail : details.entrySet()) {
pstmt.setString(i++, detail.getKey());
pstmt.setString(i++, detail.getValue());
}
pstmt.setInt(i++, details.size());
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
pools.add(toEntityBean(rs, false));
}
}catch (SQLException e) {
throw new CloudRuntimeException("findZoneWideStoragePoolsByTags:Exception:" + e.getMessage(), e);
}
}
return pools;
} catch (SQLException e) {
throw new CloudRuntimeException("findZoneWideStoragePoolsByTags:Exception:" + e.getMessage(), e);
}
String sqlValues = getSqlValuesFromStorageTags(tags);
String sql = getSqlPreparedStatement(ZoneWideTagsSqlPrefix, ZoneWideTagsSqlSuffix, sqlValues, null);
return searchStoragePoolsPreparedStatement(sql, dcId, null, null, ScopeType.ZONE, tags.length);
}
}
@Override
@DB
public List<String> searchForStoragePoolDetails(long poolId, String value) {
StringBuilder sql = new StringBuilder(FindPoolTagDetails);
TransactionLegacy txn = TransactionLegacy.currentTxn();
List<String> tags = new ArrayList<String>();
try(PreparedStatement pstmt = txn.prepareStatement(sql.toString());) {
if (pstmt != null) {
pstmt.setLong(1, poolId);
pstmt.setString(2, value);
try(ResultSet rs = pstmt.executeQuery();) {
while (rs.next()) {
tags.add(rs.getString("name"));
}
}catch (SQLException e) {
throw new CloudRuntimeException("searchForStoragePoolDetails:Exception:" + e.getMessage(), e);
}
}
return tags;
} catch (SQLException e) {
throw new CloudRuntimeException("searchForStoragePoolDetails:Exception:" + e.getMessage(), e);
}
public List<String> searchForStoragePoolTags(long poolId) {
return _tagsDao.getStoragePoolTags(poolId);
}
@Override
@ -530,4 +565,10 @@ public class PrimaryDataStoreDaoImpl extends GenericDaoBase<StoragePoolVO, Long>
sc.and(sc.entity().getHypervisor(), Op.EQ, hypervisorType);
return sc.list();
}
@Override
public void deletePoolTags(long poolId) {
_tagsDao.deleteTags(poolId);
}
}

View File

@ -0,0 +1,128 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.storage.dao;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.Spy;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.utils.db.Filter;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
import static org.mockito.Mockito.when;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.doNothing;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import junit.framework.TestCase;
@RunWith(PowerMockRunner.class)
public class StoragePoolTagsDaoImplTest extends TestCase {
@Mock
ConfigurationDao _configDao;
@Mock
SearchBuilder<StoragePoolTagVO> StoragePoolIdsSearch;
@Spy
@InjectMocks
private StoragePoolTagsDaoImpl _storagePoolTagsDaoImpl = new StoragePoolTagsDaoImpl();
@Mock
StoragePoolTagVO storagePoolTag1;
@Mock
StoragePoolTagVO storagePoolTag2;
private final String batchSizeConfigurationKey = "detail.batch.query.size";
private final String batchSizeValue = "2800";
private final String batchSizeDefaultValue = "2000";
private final String batchSizeLow = "2";
private final Long[] storageTagsIds = {1l,2l,3l,4l,5l};
private final List<StoragePoolTagVO> storagePoolTagList = Arrays.asList(storagePoolTag1, storagePoolTag2);
@Before
public void setup() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(batchSizeValue);
doReturn(storagePoolTagList).when(_storagePoolTagsDaoImpl).searchIncludingRemoved(
Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
}
@Test
public void testGetDetailsBatchSizeNotNull() {
assertEquals(Integer.parseInt(batchSizeValue), _storagePoolTagsDaoImpl.getDetailsBatchSize());
}
@Test
public void testGetDetailsBatchSizeNull() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(null);
assertEquals(Integer.parseInt(batchSizeDefaultValue), _storagePoolTagsDaoImpl.getDetailsBatchSize());
}
@Test
public void testSearchForStoragePoolIdsInternalStorageTagsNotNullSearch() {
List<StoragePoolTagVO> storagePoolTags = new ArrayList<StoragePoolTagVO>();
_storagePoolTagsDaoImpl.searchForStoragePoolIdsInternal(0, storageTagsIds.length, storageTagsIds, storagePoolTags);
verify(_storagePoolTagsDaoImpl).searchIncludingRemoved(Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
assertEquals(2, storagePoolTags.size());
}
@Test
public void testSearchForStoragePoolIdsInternalStorageTagsNullSearch() {
List<StoragePoolTagVO> storagePoolTags = new ArrayList<StoragePoolTagVO>();
doReturn(null).when(_storagePoolTagsDaoImpl).searchIncludingRemoved(
Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
_storagePoolTagsDaoImpl.searchForStoragePoolIdsInternal(0, storageTagsIds.length, storageTagsIds, storagePoolTags);
verify(_storagePoolTagsDaoImpl).searchIncludingRemoved(Matchers.any(SearchCriteria.class), Matchers.isNull(Filter.class), Matchers.isNull(Boolean.class), Matchers.eq(false));
assertEquals(0, storagePoolTags.size());
}
@Test
public void testSearchByIdsStorageTagsIdsGreaterOrEqualThanBatchSize() {
when(_configDao.getValue(batchSizeConfigurationKey)).thenReturn(batchSizeLow);
doNothing().when(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.anyInt(), Matchers.any(Long[].class), Matchers.anyList());
_storagePoolTagsDaoImpl.searchByIds(storageTagsIds);
int batchSize = Integer.parseInt(batchSizeLow);
int difference = storageTagsIds.length - 2 * batchSize;
verify(_storagePoolTagsDaoImpl, Mockito.times(2)).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.eq(batchSize), Matchers.any(Long[].class), Matchers.anyList());
verify(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.eq(2 * batchSize), Matchers.eq(difference), Matchers.any(Long[].class), Matchers.anyList());
}
@Test
public void testSearchByIdsStorageTagsIdsLowerThanBatchSize() {
doNothing().when(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.anyInt(), Matchers.anyInt(), Matchers.any(Long[].class), Matchers.anyList());
_storagePoolTagsDaoImpl.searchByIds(storageTagsIds);
verify(_storagePoolTagsDaoImpl).searchForStoragePoolIdsInternal(Matchers.eq(0), Matchers.eq(storageTagsIds.length), Matchers.any(Long[].class), Matchers.anyList());
}
}

View File

@ -0,0 +1,151 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.storage.datastore.db;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.verify;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDaoImpl.ValueType;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Matchers;
import org.mockito.Mock;
import org.mockito.Spy;
import org.powermock.modules.junit4.PowerMockRunner;
import com.cloud.storage.ScopeType;
import com.cloud.storage.dao.StoragePoolHostDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import junit.framework.TestCase;
@RunWith(PowerMockRunner.class)
public class PrimaryDataStoreDaoImplTest extends TestCase {
@Mock
StoragePoolDetailsDao _detailsDao;
@Mock
StoragePoolHostDao _hostDao;
@Mock
StoragePoolTagsDao _tagsDao;
@Spy
@InjectMocks
private static PrimaryDataStoreDaoImpl primaryDataStoreDao = new PrimaryDataStoreDaoImpl();
@Mock
StoragePoolVO storagePoolVO;
private static final String STORAGE_TAG_1 = "NFS-A";
private static final String STORAGE_TAG_2 = "NFS-B";
private static final String[] STORAGE_TAGS_ARRAY = {STORAGE_TAG_1, STORAGE_TAG_2};
private static final String DETAIL_KEY = "storage.overprovisioning.factor";
private static final String DETAIL_VALUE = "2.0";
private static final Map<String, String> STORAGE_POOL_DETAILS = new HashMap<String, String>(){{ put(DETAIL_KEY, DETAIL_VALUE); }};
private static final String EXPECTED_RESULT_SQL_STORAGE_TAGS = "(storage_pool_tags.tag='" + STORAGE_TAG_1 + "') OR (storage_pool_tags.tag='" + STORAGE_TAG_2 + "')";
private static final String EXPECTED_RESULT_SQL_DETAILS = "((storage_pool_details.name='" + DETAIL_KEY + "') AND (storage_pool_details.value='" + DETAIL_VALUE +"'))";
private static final String SQL_PREFIX = "XXXXXXXXXXXXXXXX";
private static final String SQL_SUFFIX = "ZZZZZZZZZZZZZZZZ";
private static final String SQL_VALUES = "YYYYYYYYYYYYYYYY";
private static final Long DATACENTER_ID = 1l;
private static final Long POD_ID = 1l;
private static final Long CLUSTER_ID = null;
private static final ScopeType SCOPE = ScopeType.ZONE;
@Before
public void setup() {
doReturn(Arrays.asList(storagePoolVO)).when(primaryDataStoreDao).
searchStoragePoolsPreparedStatement(Matchers.anyString(), Matchers.anyLong(), Matchers.anyLong(), Matchers.anyLong(),
Matchers.any(ScopeType.class), Matchers.anyInt());
}
@Test
public void testGetSqlValuesFromStorageTagsNotNullStorageTags() {
assertEquals(EXPECTED_RESULT_SQL_STORAGE_TAGS, primaryDataStoreDao.getSqlValuesFromStorageTags(STORAGE_TAGS_ARRAY));
}
@Test(expected=NullPointerException.class)
public void testGetSqlValuesFromStorageTagsNullStorageTags() {
primaryDataStoreDao.getSqlValuesFromStorageTags(null);
}
@Test(expected=IndexOutOfBoundsException.class)
public void testGetSqlValuesFromStorageTagsEmptyStorageTags() {
String[] emptyStorageTags = {};
primaryDataStoreDao.getSqlValuesFromStorageTags(emptyStorageTags);
}
@Test
public void testGetSqlValuesFromDetailsNotNullDetails() {
assertEquals(EXPECTED_RESULT_SQL_DETAILS, primaryDataStoreDao.getSqlValuesFromDetails(STORAGE_POOL_DETAILS));
}
@Test(expected=NullPointerException.class)
public void testGetSqlValuesFromDetailsNullDetails() {
primaryDataStoreDao.getSqlValuesFromDetails(null);
}
@Test(expected=IndexOutOfBoundsException.class)
public void testGetSqlValuesFromDetailsEmptyDetailss() {
Map<String,String> emptyDetails = new HashMap<String, String>();
primaryDataStoreDao.getSqlValuesFromDetails(emptyDetails);
}
@Test
public void testGetSqlPreparedStatementNullClusterId() {
String sqlPreparedStatement = primaryDataStoreDao.getSqlPreparedStatement(SQL_PREFIX, SQL_SUFFIX, SQL_VALUES, null);
assertEquals(SQL_PREFIX + SQL_VALUES + SQL_SUFFIX, sqlPreparedStatement);
}
@Test
public void testGetSqlPreparedStatementNotNullClusterId() {
String clusterSql = "storage_pool.cluster_id = ? OR storage_pool.cluster_id IS NULL) AND (";
String sqlPreparedStatement = primaryDataStoreDao.getSqlPreparedStatement(SQL_PREFIX, SQL_SUFFIX, SQL_VALUES, 1l);
assertEquals(SQL_PREFIX + clusterSql + SQL_VALUES + SQL_SUFFIX, sqlPreparedStatement);
}
@Test
public void testFindPoolsByDetailsOrTagsInternalStorageTagsType() {
List<StoragePoolVO> storagePools = primaryDataStoreDao.findPoolsByDetailsOrTagsInternal(DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, SQL_VALUES, ValueType.TAGS, STORAGE_TAGS_ARRAY.length);
assertEquals(Arrays.asList(storagePoolVO), storagePools);
verify(primaryDataStoreDao).getSqlPreparedStatement(
primaryDataStoreDao.TagsSqlPrefix, primaryDataStoreDao.TagsSqlSuffix, SQL_VALUES, CLUSTER_ID);
String expectedSql = primaryDataStoreDao.TagsSqlPrefix + SQL_VALUES + primaryDataStoreDao.TagsSqlSuffix;
verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_TAGS_ARRAY.length);
}
@Test
public void testFindPoolsByDetailsOrTagsInternalDetailsType() {
List<StoragePoolVO> storagePools = primaryDataStoreDao.findPoolsByDetailsOrTagsInternal(DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, SQL_VALUES, ValueType.DETAILS, STORAGE_POOL_DETAILS.size());
assertEquals(Arrays.asList(storagePoolVO), storagePools);
verify(primaryDataStoreDao).getSqlPreparedStatement(
primaryDataStoreDao.DetailsSqlPrefix, primaryDataStoreDao.DetailsSqlSuffix, SQL_VALUES, CLUSTER_ID);
String expectedSql = primaryDataStoreDao.DetailsSqlPrefix + SQL_VALUES + primaryDataStoreDao.DetailsSqlSuffix;
verify(primaryDataStoreDao).searchStoragePoolsPreparedStatement(expectedSql, DATACENTER_ID, POD_ID, CLUSTER_ID, SCOPE, STORAGE_POOL_DETAILS.size());
}
}

View File

@ -23,7 +23,7 @@
<parameter name="devcloud-host-ip" value="192.168.56.2"/>
<parameter name="devcloud-host-gateway" value="192.168.56.1"/>
<parameter name="devcloud-host-cidr" value="192.168.56.0/24"/>
<parameter name="template-url" value="http://download.cloud.com/templates/acton/acton-systemvm-02062012.vhd.bz2"/>
<parameter name="template-url" value="http://download.cloudstack.org/templates/acton/acton-systemvm-02062012.vhd.bz2"/>
<parameter name="primary-storage-want-to-add" value="nfs://192.168.56.2/opt/storage/primarynfs"/>
<parameter name="devcloud-local-storage-uuid" value="cd10cac1-4772-92e5-5da6-c2bc16b1ce1b"/>
<parameter name="devcloud-host-uuid" value="759ee4c9-a15a-297b-67c6-ac267d8aa429"/>

View File

@ -20,6 +20,7 @@ package org.apache.cloudstack.storage.volume.datastore;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -118,6 +119,8 @@ public class PrimaryDataStoreHelper {
dataStoreVO.setPath(updatedPath);
}
String tags = params.getTags();
List<String> storageTags = new ArrayList<String>();
if (tags != null) {
String[] tokens = tags.split(",");
@ -126,10 +129,10 @@ public class PrimaryDataStoreHelper {
if (tag.length() == 0) {
continue;
}
details.put(tag, "true");
storageTags.add(tag);
}
}
dataStoreVO = dataStoreDao.persist(dataStoreVO, details);
dataStoreVO = dataStoreDao.persist(dataStoreVO, details, storageTags);
return dataStoreMgr.getDataStore(dataStoreVO.getId(), DataStoreRole.Primary);
}
@ -231,6 +234,7 @@ public class PrimaryDataStoreHelper {
poolVO.setUuid(null);
this.dataStoreDao.update(poolVO.getId(), poolVO);
dataStoreDao.remove(poolVO.getId());
dataStoreDao.deletePoolTags(poolVO.getId());
deletePoolStats(poolVO.getId());
// Delete op_host_capacity entries
this._capacityDao.removeBy(Capacity.CAPACITY_TYPE_STORAGE_ALLOCATED, null, null, null, poolVO.getId());

View File

@ -56,7 +56,7 @@ public class PrimaryDataStoreProviderManagerImpl implements PrimaryDataStoreProv
@Override
public PrimaryDataStore getPrimaryDataStore(long dataStoreId) {
StoragePoolVO dataStoreVO = dataStoreDao.findById(dataStoreId);
StoragePoolVO dataStoreVO = dataStoreDao.findByIdIncludingRemoved(dataStoreId);
if (dataStoreVO == null) {
throw new CloudRuntimeException("Unable to locate datastore with id " + dataStoreId);
}

View File

@ -16,7 +16,7 @@
# specific language governing permissions and limitations
# under the License.
set -e
#set -e
#
# This script builds Debian packages for CloudStack and does
@ -43,11 +43,21 @@ set -e
cd `dirname $0`
cd ..
dpkg-checkbuilddeps
DCH=$(which dch)
if [ -z "$DCH" ] ; then
echo -e "dch not found, please install devscripts at first. \nDEB Build Failed"
exit
fi
VERSION=$(grep '^ <version>' pom.xml| cut -d'>' -f2 |cut -d'<' -f1)
VERSION=$(head -n1 debian/changelog |awk -F [\(\)] '{print $2}')
DISTCODE=$(lsb_release -sc)
dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}"
/bin/cp debian/changelog /tmp/changelog.orig
dpkg-buildpackage -j2 -b -uc -us
dch -b -v "${VERSION}~${DISTCODE}" -u low -m "Apache CloudStack Release ${VERSION}"
sed -i '0,/ UNRELEASED;/s// unstable;/g' debian/changelog
dpkg-checkbuilddeps
dpkg-buildpackage -uc -us
/bin/mv /tmp/changelog.orig debian/changelog

View File

@ -2,6 +2,7 @@
# chkconfig: 35 99 10
# description: Cloud Agent
# pidfile: /var/run/cloudstack-agent.pid
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file

View File

@ -441,11 +441,11 @@ if [ "$1" == "1" ] ; then
fi
grep -s -q "db.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
grep -s -q "db.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.usage.driver=jdbc:mysql" db.properties
grep -s -q "db.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
grep -s -q "db.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in
echo Please download vhd-util from http://download.cloudstack.org/tools/vhd-util and put it in
echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/
fi

View File

@ -2,6 +2,7 @@
# chkconfig: 35 99 10
# description: Cloud Agent
# pidfile: /var/run/cloudstack-agent.pid
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file

View File

@ -398,11 +398,11 @@ if [ "$1" == "1" ] ; then
fi
grep -s -q "db.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.cloud.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
grep -s -q "db.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.usage.driver=jdbc:mysql" db.properties
grep -s -q "db.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.usage.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
grep -s -q "db.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties" || sed -i -e "\$adb.simulator.driver=jdbc:mysql" "%{_sysconfdir}/%{name}/management/db.properties"
if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in
echo Please download vhd-util from http://download.cloudstack.org/tools/vhd-util and put it in
echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/
fi

View File

@ -2,6 +2,7 @@
# chkconfig: 35 99 10
# description: Cloud Agent
# pidfile: /var/run/cloudstack-agent.pid
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file

View File

@ -408,7 +408,7 @@ if [ "$1" == "1" ] ; then
fi
if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in
echo Please download vhd-util from http://download.cloudstack.org/tools/vhd-util and put it in
echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/
fi

View File

@ -2,6 +2,7 @@
# chkconfig: 35 99 10
# description: Cloud Agent
# pidfile: /var/run/cloudstack-agent.pid
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file

View File

@ -408,7 +408,7 @@ if [ "$1" == "1" ] ; then
fi
if [ ! -f %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/vhd-util ] ; then
echo Please download vhd-util from http://download.cloud.com.s3.amazonaws.com/tools/vhd-util and put it in
echo Please download vhd-util from http://download.cloudstack.org/tools/vhd-util and put it in
echo %{_datadir}/cloudstack-common/scripts/vm/hypervisor/xenserver/
fi

View File

@ -0,0 +1,50 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Required for a tocmat cloned service
SERVICE_NAME=cloudstack-management
# Where your java installation lives
#JAVA_HOME="/usr/lib/jvm/java"
# Where your cloudstack-management installation lives
CATALINA_BASE="/usr/share/cloudstack-management"
CATALINA_HOME="/usr/share/cloudstack-management"
JASPER_HOME="/usr/share/cloudstack-management"
CATALINA_TMPDIR="/usr/share/cloudstack-management/temp"
if [ -r "/etc/cloudstack/management/cloudmanagementserver.keystore" ] ; then
JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:PermSize=512M -XX:MaxPermSize=800m -Djavax.net.ssl.trustStore=/etc/cloudstack/management/cloudmanagementserver.keystore -Djavax.net.ssl.trustStorePassword=vmops.com "
else
JAVA_OPTS="-Djava.awt.headless=true -Dcom.sun.management.jmxremote=false -Xmx2g -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/var/log/cloudstack/management/ -XX:PermSize=512M -XX:MaxPermSize=800m"
fi
TOMCAT_USER="cloud"
SECURITY_MANAGER="false"
SHUTDOWN_WAIT="30"
SHUTDOWN_VERBOSE="false"
# Set the TOMCAT_PID location
CATALINA_PID="/var/run/cloudstack-management.pid"
CLASSPATH=/usr/share/java/commons-daemon.jar:/usr/share/cloudstack-management/bin/bootstrap.jar:/usr/share/tomcat7/bin/tomcat-juli.jar:/etc/cloudstack/management:/usr/share/cloudstack-common:/usr/share/cloudstack-management/setup
BOOTSTRAP_CLASS=org.apache.catalina.startup.Bootstrap

View File

@ -0,0 +1,40 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Systemd unit file for CloudStack Management server
#
# clone tomcat service, see /usr/lib/systemd/system/tomcat.service
[Unit]
Description=CloudStack Management Server
After=syslog.target network.target
[Service]
UMask=0022
Type=forking
Environment="NAME=cloudstack-management"
EnvironmentFile=-/etc/default/cloudstack-management
ExecStartPre=/bin/bash -c "/bin/systemctl set-environment JAVA_HOME=$( readlink -f $( which java ) | sed s:bin/.*$:: )"
ExecStart=/usr/bin/jsvc -user "${TOMCAT_USER}" -cp "$CLASSPATH" \
-outfile SYSLOG -errfile SYSLOG \
-pidfile "${CATALINA_PID}" ${JAVA_OPTS} \
-Dcatalina.base="${CATALINA_BASE}" -Dcatalina.home="${CATALINA_HOME}" -Djava.io.tmpdir="${CATALINA_TMPDIR}" "${BOOTSTRAP_CLASS}"
ExecStop=/usr/bin/jsvc -cp "$CLASSPATH" -pidfile "$CATALINA_PID" \
-stop "$BOOTSTRAP_CLASS"
[Install]
WantedBy=multi-user.target

View File

@ -116,6 +116,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.CpuTuneDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DevicesDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.FeaturesDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.FilesystemDef;
@ -125,6 +126,7 @@ import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.GuestResourceDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InputDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.InterfaceDef.GuestNetType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SCSIDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.SerialDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.TermPolicy;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.VideoDef;
@ -162,6 +164,7 @@ import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.VirtualMachine.PowerState;
import com.cloud.vm.VmDetailConstants;
/**
* LibvirtComputingResource execute requests on the computing/routing host using
@ -268,6 +271,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
protected int _rngRateBytes = 2048;
private File _qemuSocketsPath;
private final String _qemuGuestAgentSocketName = "org.qemu.guest_agent.0";
private long _totalMemory;
private final Map <String, String> _pifs = new HashMap<String, String>();
private final Map<String, VmStats> _vmStats = new ConcurrentHashMap<String, VmStats>();
@ -1515,7 +1519,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
private String getBroadcastUriFromBridge(final String brName) {
final String pif = matchPifFileInDirectory(brName);
final Pattern pattern = Pattern.compile("(\\D+)(\\d+)(\\D*)(\\d*)");
final Pattern pattern = Pattern.compile("(\\D+)(\\d+)(\\D*)(\\d*)(\\D*)(\\d*)");
final Matcher matcher = pattern.matcher(pif);
s_logger.debug("getting broadcast uri for pif " + pif + " and bridge " + brName);
if(matcher.find()) {
@ -1523,7 +1527,9 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
return BroadcastDomainType.Vxlan.toUri(matcher.group(2)).toString();
}
else{
if (!matcher.group(4).isEmpty()) {
if (!matcher.group(6).isEmpty()) {
return BroadcastDomainType.Vlan.toUri(matcher.group(6)).toString();
} else if (!matcher.group(4).isEmpty()) {
return BroadcastDomainType.Vlan.toUri(matcher.group(4)).toString();
} else {
//untagged or not matching (eth|bond|team)#.#
@ -2059,6 +2065,19 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
final InputDef input = new InputDef("tablet", "usb");
devices.addDevice(input);
DiskDef.DiskBus busT = getDiskModelFromVMDetail(vmTO);
if (busT == null) {
busT = getGuestDiskModel(vmTO.getPlatformEmulator());
}
// If we're using virtio scsi, then we need to add a virtual scsi controller
if (busT == DiskDef.DiskBus.SCSI) {
final SCSIDef sd = new SCSIDef((short)0, 0, 0, 9, 0);
devices.addDevice(sd);
}
vm.addComp(devices);
return vm;
@ -2142,23 +2161,16 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
// if params contains a rootDiskController key, use its value (this is what other HVs are doing)
DiskDef.DiskBus diskBusType = null;
final Map <String, String> params = vmSpec.getDetails();
if (params != null && params.get("rootDiskController") != null && !params.get("rootDiskController").isEmpty()) {
final String rootDiskController = params.get("rootDiskController");
s_logger.debug("Passed custom disk bus " + rootDiskController);
for (final DiskDef.DiskBus bus : DiskDef.DiskBus.values()) {
if (bus.toString().equalsIgnoreCase(rootDiskController)) {
s_logger.debug("Found matching enum for disk bus " + rootDiskController);
diskBusType = bus;
break;
}
}
}
DiskDef.DiskBus diskBusType = getDiskModelFromVMDetail(vmSpec);
if (diskBusType == null) {
diskBusType = getGuestDiskModel(vmSpec.getPlatformEmulator());
}
// I'm not sure why previously certain DATADISKs were hard-coded VIRTIO and others not, however this
// maintains existing functionality with the exception that SCSI will override VIRTIO.
DiskDef.DiskBus diskBusTypeData = (diskBusType == DiskDef.DiskBus.SCSI) ? diskBusType : DiskDef.DiskBus.VIRTIO;
final DiskDef disk = new DiskDef();
if (volume.getType() == Volume.Type.ISO) {
if (volPath == null) {
@ -2170,6 +2182,11 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
} else {
final int devId = volume.getDiskSeq().intValue();
if (diskBusType == DiskDef.DiskBus.SCSI ) {
disk.setQemuDriver(true);
disk.setDiscard(DiscardType.UNMAP);
}
if (pool.getType() == StoragePoolType.RBD) {
/*
For RBD pools we use the secret mechanism in libvirt.
@ -2188,7 +2205,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.defBlockBasedDisk(physicalDisk.getPath(), devId, diskBusType);
} else {
if (volume.getType() == Volume.Type.DATADISK) {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusTypeData, DiskDef.DiskFmtType.QCOW2);
} else {
disk.defFileBasedDisk(physicalDisk.getPath(), devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
}
@ -2216,6 +2233,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
disk.setCacheMode(DiskDef.DiskCacheMode.valueOf(volumeObjectTO.getCacheMode().toString().toUpperCase()));
}
}
vm.getDevices().addDevice(disk);
}
@ -2334,13 +2352,13 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
DiskDef diskdef = null;
final KVMStoragePool attachingPool = attachingDisk.getPool();
try {
if (!attach) {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String xml = dm.getXMLDesc(0);
parser.parseDomainXML(xml);
disks = parser.getDisks();
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String domXml = dm.getXMLDesc(0);
parser.parseDomainXML(domXml);
disks = parser.getDisks();
if (!attach) {
for (final DiskDef disk : disks) {
final String file = disk.getDiskPath();
if (file != null && file.equalsIgnoreCase(attachingDisk.getPath())) {
@ -2352,17 +2370,31 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
throw new InternalErrorException("disk: " + attachingDisk.getPath() + " is not attached before");
}
} else {
DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO;
for (final DiskDef disk : disks) {
if (disk.getDeviceType() == DeviceType.DISK) {
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
busT = DiskDef.DiskBus.SCSI;
}
break;
}
}
diskdef = new DiskDef();
if (busT == DiskDef.DiskBus.SCSI) {
diskdef.setQemuDriver(true);
diskdef.setDiscard(DiscardType.UNMAP);
}
if (attachingPool.getType() == StoragePoolType.RBD) {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(),
attachingPool.getUuid(), devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
} else if (attachingPool.getType() == StoragePoolType.Gluster) {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null,
null, devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, busT, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT);
}
if (bytesReadRate != null && bytesReadRate > 0) {
diskdef.setBytesReadRate(bytesReadRate);
@ -2453,6 +2485,7 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
public StartupCommand[] initialize() {
final List<Object> info = getHostInfo();
_totalMemory = (Long)info.get(2);
final StartupRoutingCommand cmd =
new StartupRoutingCommand((Integer)info.get(0), (Long)info.get(1), (Long)info.get(2), (Long)info.get(4), (String)info.get(3), _hypervisorType,
@ -2961,19 +2994,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
boolean isGuestPVEnabled(final String guestOSName) {
if (guestOSName == null) {
return false;
}
if (guestOSName.startsWith("Ubuntu") || guestOSName.startsWith("Fedora 13") || guestOSName.startsWith("Fedora 12") || guestOSName.startsWith("Fedora 11") ||
guestOSName.startsWith("Fedora 10") || guestOSName.startsWith("Fedora 9") || guestOSName.startsWith("CentOS 5.3") || guestOSName.startsWith("CentOS 5.4") ||
guestOSName.startsWith("CentOS 5.5") || guestOSName.startsWith("CentOS") || guestOSName.startsWith("Fedora") ||
guestOSName.startsWith("Red Hat Enterprise Linux 5.3") || guestOSName.startsWith("Red Hat Enterprise Linux 5.4") ||
guestOSName.startsWith("Red Hat Enterprise Linux 5.5") || guestOSName.startsWith("Red Hat Enterprise Linux 6") || guestOSName.startsWith("Debian GNU/Linux") ||
guestOSName.startsWith("FreeBSD 10") || guestOSName.startsWith("Oracle") || guestOSName.startsWith("Other PV")) {
return true;
} else {
return false;
}
DiskDef.DiskBus db = getGuestDiskModel(guestOSName);
return db != DiskDef.DiskBus.IDE;
}
public boolean isCentosHost() {
@ -2984,14 +3006,42 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
public DiskDef.DiskBus getDiskModelFromVMDetail(final VirtualMachineTO vmTO) {
Map<String, String> details = vmTO.getDetails();
if (details == null) {
return null;
}
final String rootDiskController = details.get(VmDetailConstants.ROOT_DISK_CONTROLLER);
if (StringUtils.isNotBlank(rootDiskController)) {
s_logger.debug("Passed custom disk bus " + rootDiskController);
for (final DiskDef.DiskBus bus : DiskDef.DiskBus.values()) {
if (bus.toString().equalsIgnoreCase(rootDiskController)) {
s_logger.debug("Found matching enum for disk bus " + rootDiskController);
return bus;
}
}
}
return null;
}
private DiskDef.DiskBus getGuestDiskModel(final String platformEmulator) {
if (isGuestPVEnabled(platformEmulator)) {
if (platformEmulator == null) {
return DiskDef.DiskBus.IDE;
} else if (platformEmulator.startsWith("Other PV Virtio-SCSI")) {
return DiskDef.DiskBus.SCSI;
} else if (platformEmulator.startsWith("Ubuntu") || platformEmulator.startsWith("Fedora 13") || platformEmulator.startsWith("Fedora 12") || platformEmulator.startsWith("Fedora 11") ||
platformEmulator.startsWith("Fedora 10") || platformEmulator.startsWith("Fedora 9") || platformEmulator.startsWith("CentOS 5.3") || platformEmulator.startsWith("CentOS 5.4") ||
platformEmulator.startsWith("CentOS 5.5") || platformEmulator.startsWith("CentOS") || platformEmulator.startsWith("Fedora") ||
platformEmulator.startsWith("Red Hat Enterprise Linux 5.3") || platformEmulator.startsWith("Red Hat Enterprise Linux 5.4") ||
platformEmulator.startsWith("Red Hat Enterprise Linux 5.5") || platformEmulator.startsWith("Red Hat Enterprise Linux 6") || platformEmulator.startsWith("Debian GNU/Linux") ||
platformEmulator.startsWith("FreeBSD 10") || platformEmulator.startsWith("Oracle") || platformEmulator.startsWith("Other PV")) {
return DiskDef.DiskBus.VIRTIO;
} else {
return DiskDef.DiskBus.IDE;
}
}
}
private void cleanupVMNetworks(final Connect conn, final List<InterfaceDef> nics) {
if (nics != null) {
for (final InterfaceDef nic : nics) {
@ -3586,4 +3636,8 @@ public class LibvirtComputingResource extends ServerResourceBase implements Serv
}
}
}
public long getTotalMemory() {
return _totalMemory;
}
}

View File

@ -545,6 +545,23 @@ public class LibvirtVMDef {
}
}
public enum DiscardType {
IGNORE("ignore"), UNMAP("unmap");
String _discardType;
DiscardType(String discardType) {
_discardType = discardType;
}
@Override
public String toString() {
if (_discardType == null) {
return "ignore";
}
return _discardType;
}
}
private DeviceType _deviceType; /* floppy, disk, cdrom */
private DiskType _diskType;
private DiskProtocol _diskProtocol;
@ -566,6 +583,15 @@ public class LibvirtVMDef {
private DiskCacheMode _diskCacheMode;
private String _serial;
private boolean qemuDriver = true;
private DiscardType _discard = DiscardType.IGNORE;
public DiscardType getDiscard() {
return _discard;
}
public void setDiscard(DiscardType discard) {
this._discard = discard;
}
public void setDeviceType(DeviceType deviceType) {
_deviceType = deviceType;
@ -584,18 +610,36 @@ public class LibvirtVMDef {
/* skip iso label */
private String getDevLabel(int devId, DiskBus bus) {
if (devId < 0) {
return "";
}
if (devId == 2) {
devId++;
}
char suffix = (char)('a' + devId);
if (bus == DiskBus.SCSI) {
return "sd" + suffix;
return "sd" + getDevLabelSuffix(devId);
} else if (bus == DiskBus.VIRTIO) {
return "vd" + suffix;
return "vd" + getDevLabelSuffix(devId);
}
return "hd" + suffix;
return "hd" + getDevLabelSuffix(devId);
}
private String getDevLabelSuffix(int deviceIndex) {
if (deviceIndex < 0) {
return "";
}
int base = 'z' - 'a' + 1;
String labelSuffix = "";
do {
char suffix = (char)('a' + (deviceIndex % base));
labelSuffix = suffix + labelSuffix;
deviceIndex = (deviceIndex / base) - 1;
} while (deviceIndex >= 0);
return labelSuffix;
}
public void defFileBasedDisk(String filePath, int devId, DiskBus bus, DiskFmtType diskFmtType) {
@ -716,11 +760,6 @@ public class LibvirtVMDef {
return _diskFmtType;
}
public int getDiskSeq() {
char suffix = _diskLabel.charAt(_diskLabel.length() - 1);
return suffix - 'a';
}
public void setBytesReadRate(Long bytesReadRate) {
_bytesReadRate = bytesReadRate;
}
@ -764,7 +803,11 @@ public class LibvirtVMDef {
diskBuilder.append(">\n");
if(qemuDriver) {
diskBuilder.append("<driver name='qemu'" + " type='" + _diskFmtType
+ "' cache='" + _diskCacheMode + "' " + "/>\n");
+ "' cache='" + _diskCacheMode + "' ");
if(_discard != null && _discard != DiscardType.IGNORE) {
diskBuilder.append("discard='" + _discard.toString() + "' ");
}
diskBuilder.append("/>\n");
}
if (_diskType == DiskType.FILE) {
@ -1345,6 +1388,37 @@ public class LibvirtVMDef {
}
}
public static class SCSIDef {
private short index = 0;
private int domain = 0;
private int bus = 0;
private int slot = 9;
private int function = 0;
public SCSIDef(short index, int domain, int bus, int slot, int function) {
this.index = index;
this.domain = domain;
this.bus = bus;
this.slot = slot;
this.function = function;
}
public SCSIDef() {
}
@Override
public String toString() {
StringBuilder scsiBuilder = new StringBuilder();
scsiBuilder.append(String.format("<controller type='scsi' index='%d' mode='virtio-scsi'>\n", this.index ));
scsiBuilder.append(String.format("<address type='pci' domain='0x%04X' bus='0x%02X' slot='0x%02X' function='0x%01X'/>\n",
this.domain, this.bus, this.slot, this.function ) );
scsiBuilder.append("</controller>");
return scsiBuilder.toString();
}
}
public static class InputDef {
private final String _type; /* tablet, mouse */
private final String _bus; /* ps2, usb, xen */

View File

@ -0,0 +1,45 @@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
package com.cloud.hypervisor.kvm.resource.wrapper;
import java.util.Map;
import com.cloud.agent.api.Answer;
import com.cloud.agent.api.SetHostParamsCommand;
import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.resource.CommandWrapper;
import com.cloud.resource.ResourceWrapper;
@ResourceWrapper(handles = SetHostParamsCommand.class)
public final class LibvirtSetHostParamsCommandWrapper extends CommandWrapper<SetHostParamsCommand, Answer, LibvirtComputingResource> {
@Override
public Answer execute(final SetHostParamsCommand command, final LibvirtComputingResource libvirtComputingResource) {
final Map<String, String> params = command.getParams();
boolean success = libvirtComputingResource.getVirtRouterResource().configureHostParams(params);
if (!success) {
return new Answer(command, false, "Failed to set host parameters");
} else {
return new Answer(command, true, null);
}
}
}

View File

@ -24,6 +24,7 @@ import java.util.List;
import org.apache.log4j.Logger;
import org.libvirt.Connect;
import org.libvirt.Domain;
import org.libvirt.DomainInfo.DomainState;
import org.libvirt.LibvirtException;
@ -60,6 +61,17 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = libvirtComputingResource.getLibvirtUtilitiesHelper();
Connect conn = null;
try {
vm = libvirtComputingResource.createVMFromSpec(vmSpec);
conn = libvirtUtilitiesHelper.getConnectionByType(vm.getHvsType());
Long remainingMem = getFreeMemory(conn, libvirtComputingResource);
if (remainingMem == null){
return new StartAnswer(command, "failed to get free memory");
} else if (remainingMem < vmSpec.getMinRam()) {
return new StartAnswer(command, "Not enough memory on the host, remaining: " + remainingMem + ", asking: " + vmSpec.getMinRam());
}
final NicTO[] nics = vmSpec.getNics();
for (final NicTO nic : nics) {
@ -68,8 +80,6 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
}
}
vm = libvirtComputingResource.createVMFromSpec(vmSpec);
conn = libvirtUtilitiesHelper.getConnectionByType(vm.getHvsType());
libvirtComputingResource.createVbd(conn, vmSpec, vmName, vm);
if (!storagePoolMgr.connectPhysicalDisksViaVmSpec(vmSpec)) {
@ -150,4 +160,22 @@ public final class LibvirtStartCommandWrapper extends CommandWrapper<StartComman
}
}
}
private Long getFreeMemory(final Connect conn, final LibvirtComputingResource libvirtComputingResource){
try {
long allocatedMem = 0;
int[] ids = conn.listDomains();
for(int id :ids) {
Domain dm = conn.domainLookupByID(id);
allocatedMem += dm.getMaxMemory() * 1024L;
s_logger.debug("vm: " + dm.getName() + " mem: " + dm.getMaxMemory() * 1024L);
}
Long remainingMem = libvirtComputingResource.getTotalMemory() - allocatedMem;
s_logger.debug("remaining mem" + remainingMem);
return remainingMem;
} catch (Exception e) {
s_logger.debug("failed to get free memory", e);
return null;
}
}
}

View File

@ -88,6 +88,8 @@ import com.cloud.hypervisor.kvm.resource.LibvirtComputingResource;
import com.cloud.hypervisor.kvm.resource.LibvirtConnection;
import com.cloud.hypervisor.kvm.resource.LibvirtDomainXMLParser;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DeviceType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiscardType;
import com.cloud.hypervisor.kvm.resource.LibvirtVMDef.DiskDef.DiskProtocol;
import com.cloud.storage.JavaStorageLayer;
import com.cloud.storage.Storage.ImageFormat;
@ -972,13 +974,12 @@ public class KVMStorageProcessor implements StorageProcessor {
DiskDef diskdef = null;
final KVMStoragePool attachingPool = attachingDisk.getPool();
try {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String domXml = dm.getXMLDesc(0);
parser.parseDomainXML(domXml);
disks = parser.getDisks();
if (!attach) {
dm = conn.domainLookupByName(vmName);
final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
final String xml = dm.getXMLDesc(0);
parser.parseDomainXML(xml);
disks = parser.getDisks();
if (attachingPool.getType() == StoragePoolType.RBD) {
if (resource.getHypervisorType() == Hypervisor.HypervisorType.LXC) {
final String device = resource.mapRbdDevice(attachingDisk);
@ -1000,7 +1001,20 @@ public class KVMStorageProcessor implements StorageProcessor {
throw new InternalErrorException("disk: " + attachingDisk.getPath() + " is not attached before");
}
} else {
DiskDef.DiskBus busT = DiskDef.DiskBus.VIRTIO;
for (final DiskDef disk : disks) {
if (disk.getDeviceType() == DeviceType.DISK) {
if (disk.getBusType() == DiskDef.DiskBus.SCSI) {
busT = DiskDef.DiskBus.SCSI;
}
break;
}
}
diskdef = new DiskDef();
if (busT == DiskDef.DiskBus.SCSI) {
diskdef.setQemuDriver(true);
diskdef.setDiscard(DiscardType.UNMAP);
}
diskdef.setSerial(serial);
if (attachingPool.getType() == StoragePoolType.RBD) {
if(resource.getHypervisorType() == Hypervisor.HypervisorType.LXC){
@ -1008,24 +1022,24 @@ public class KVMStorageProcessor implements StorageProcessor {
final String device = resource.mapRbdDevice(attachingDisk);
if (device != null) {
s_logger.debug("RBD device on host is: "+device);
diskdef.defBlockBasedDisk(device, devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(device, devId, busT);
} else {
throw new InternalErrorException("Error while mapping disk "+attachingDisk.getPath()+" on host");
}
} else {
diskdef.defNetworkBasedDisk(attachingDisk.getPath(), attachingPool.getSourceHost(), attachingPool.getSourcePort(), attachingPool.getAuthUserName(),
attachingPool.getUuid(), devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
attachingPool.getUuid(), devId, busT, DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
}
} else if (attachingPool.getType() == StoragePoolType.Gluster) {
final String mountpoint = attachingPool.getLocalPath();
final String path = attachingDisk.getPath();
final String glusterVolume = attachingPool.getSourceDir().replace("/", "");
diskdef.defNetworkBasedDisk(glusterVolume + path.replace(mountpoint, ""), attachingPool.getSourceHost(), attachingPool.getSourcePort(), null,
null, devId, DiskDef.DiskBus.VIRTIO, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
null, devId, busT, DiskProtocol.GLUSTER, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.QCOW2) {
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO, DiskDef.DiskFmtType.QCOW2);
diskdef.defFileBasedDisk(attachingDisk.getPath(), devId, busT, DiskDef.DiskFmtType.QCOW2);
} else if (attachingDisk.getFormat() == PhysicalDiskFormat.RAW) {
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, DiskDef.DiskBus.VIRTIO);
diskdef.defBlockBasedDisk(attachingDisk.getPath(), devId, busT);
}
if ((bytesReadRate != null) && (bytesReadRate > 0)) {

View File

@ -4855,6 +4855,7 @@ public class LibvirtComputingResourceTest {
final NicTO nic = Mockito.mock(NicTO.class);
final NicTO[] nics = new NicTO[]{nic};
final int[] vms = new int[0];
final String vmName = "Test";
final String controlIp = "127.0.0.1";
@ -4868,6 +4869,7 @@ public class LibvirtComputingResourceTest {
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
try {
when(libvirtUtilitiesHelper.getConnectionByType(vmDef.getHvsType())).thenReturn(conn);
when(conn.listDomains()).thenReturn(vms);
doNothing().when(libvirtComputingResource).createVbd(conn, vmSpec, vmName, vmDef);
} catch (final LibvirtException e) {
fail(e.getMessage());
@ -4927,6 +4929,7 @@ public class LibvirtComputingResourceTest {
final NicTO nic = Mockito.mock(NicTO.class);
final NicTO[] nics = new NicTO[]{nic};
final int[] vms = new int[0];
final String vmName = "Test";
final String controlIp = "127.0.0.1";
@ -4940,6 +4943,7 @@ public class LibvirtComputingResourceTest {
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
try {
when(libvirtUtilitiesHelper.getConnectionByType(vmDef.getHvsType())).thenReturn(conn);
when(conn.listDomains()).thenReturn(vms);
doNothing().when(libvirtComputingResource).createVbd(conn, vmSpec, vmName, vmDef);
} catch (final LibvirtException e) {
fail(e.getMessage());
@ -4989,6 +4993,61 @@ public class LibvirtComputingResourceTest {
}
}
@Test
public void testStartCommandHostMemory() {
final VirtualMachineTO vmSpec = Mockito.mock(VirtualMachineTO.class);
final com.cloud.host.Host host = Mockito.mock(com.cloud.host.Host.class);
final boolean executeInSequence = false;
final StartCommand command = new StartCommand(vmSpec, host, executeInSequence);
final KVMStoragePoolManager storagePoolMgr = Mockito.mock(KVMStoragePoolManager.class);
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);
final Connect conn = Mockito.mock(Connect.class);
final LibvirtVMDef vmDef = Mockito.mock(LibvirtVMDef.class);
final NicTO nic = Mockito.mock(NicTO.class);
final NicTO[] nics = new NicTO[]{nic};
int vmId = 1;
final int[] vms = new int[]{vmId};
final Domain dm = Mockito.mock(Domain.class);
final String vmName = "Test";
when(libvirtComputingResource.getStoragePoolMgr()).thenReturn(storagePoolMgr);
when(vmSpec.getNics()).thenReturn(nics);
when(vmSpec.getType()).thenReturn(VirtualMachine.Type.User);
when(vmSpec.getName()).thenReturn(vmName);
when(vmSpec.getMaxRam()).thenReturn(512L);
when(libvirtComputingResource.createVMFromSpec(vmSpec)).thenReturn(vmDef);
when(libvirtComputingResource.getLibvirtUtilitiesHelper()).thenReturn(libvirtUtilitiesHelper);
try {
when(libvirtUtilitiesHelper.getConnectionByType(vmDef.getHvsType())).thenReturn(conn);
when(conn.listDomains()).thenReturn(vms);
when(conn.domainLookupByID(vmId)).thenReturn(dm);
when(dm.getMaxMemory()).thenReturn(1024L);
when(dm.getName()).thenReturn(vmName);
when(libvirtComputingResource.getTotalMemory()).thenReturn(2048*1024L);
doNothing().when(libvirtComputingResource).createVbd(conn, vmSpec, vmName, vmDef);
} catch (final LibvirtException e) {
fail(e.getMessage());
} catch (final InternalErrorException e) {
fail(e.getMessage());
} catch (final URISyntaxException e) {
fail(e.getMessage());
}
when(storagePoolMgr.connectPhysicalDisksViaVmSpec(vmSpec)).thenReturn(true);
final LibvirtRequestWrapper wrapper = LibvirtRequestWrapper.getInstance();
assertNotNull(wrapper);
final Answer answer = wrapper.execute(command, libvirtComputingResource);
assertTrue(answer.getResult());
}
@Test
public void testUpdateHostPasswordCommand() {
final LibvirtUtilitiesHelper libvirtUtilitiesHelper = Mockito.mock(LibvirtUtilitiesHelper.class);

View File

@ -29,7 +29,6 @@ import javax.naming.ConfigurationException;
import org.apache.log4j.Logger;
import com.vmware.vim25.ClusterDasConfigInfo;
import com.vmware.vim25.ManagedObjectReference;
import org.apache.cloudstack.api.ApiConstants;
@ -344,8 +343,7 @@ public class VmwareServerDiscoverer extends DiscovererBase implements Discoverer
return null;
} else {
ClusterMO clusterMo = new ClusterMO(context, morCluster);
ClusterDasConfigInfo dasConfig = clusterMo.getDasConfig();
if (dasConfig != null && dasConfig.isEnabled() != null && dasConfig.isEnabled().booleanValue()) {
if (clusterMo.isHAEnabled()) {
clusterDetails.put("NativeHA", "true");
_clusterDetailsDao.persist(clusterId, clusterDetails);
}

View File

@ -22,6 +22,8 @@ import java.util.Map;
import com.vmware.vim25.ManagedObjectReference;
import org.apache.cloudstack.framework.config.ConfigKey;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.hypervisor.vmware.mo.HostMO;
import com.cloud.hypervisor.vmware.util.VmwareContext;
@ -30,6 +32,9 @@ import com.cloud.utils.Pair;
public interface VmwareManager {
public final String CONTEXT_STOCK_NAME = "vmwareMgr";
public static final ConfigKey<Long> s_vmwareNicHotplugWaitTimeout = new ConfigKey<Long>("Advanced", Long.class, "vmware.nic.hotplug.wait.timeout", "15000",
"Wait timeout (milli seconds) for hot plugged NIC of VM to be detected by guest OS.", false, ConfigKey.Scope.Global);
String composeWorkerName();
String getSystemVMIsoFileNameOnDatastore();

View File

@ -45,6 +45,8 @@ import org.apache.cloudstack.api.command.admin.zone.ListVmwareDcsCmd;
import org.apache.cloudstack.api.command.admin.zone.RemoveVmwareDcCmd;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager;
import org.apache.cloudstack.framework.config.ConfigKey;
import org.apache.cloudstack.framework.config.Configurable;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.cloudstack.utils.identity.ManagementServerNode;
@ -123,12 +125,11 @@ import com.cloud.utils.script.Script;
import com.cloud.utils.ssh.SshHelper;
import com.cloud.vm.DomainRouterVO;
public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService {
public class VmwareManagerImpl extends ManagerBase implements VmwareManager, VmwareStorageMount, Listener, VmwareDatacenterService, Configurable {
private static final Logger s_logger = Logger.getLogger(VmwareManagerImpl.class);
private static final int STARTUP_DELAY = 60000; // 60 seconds
private static final long DEFAULT_HOST_SCAN_INTERVAL = 600000; // every 10 minutes
private long _hostScanInterval = DEFAULT_HOST_SCAN_INTERVAL;
private int _timeout;
@ -189,7 +190,7 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
private String _rootDiskController = DiskControllerType.ide.toString();
private String _dataDiskController = DiskControllerType.osdefault.toString();
private final String _dataDiskController = DiskControllerType.osdefault.toString();
private final Map<String, String> _storageMounts = new HashMap<String, String>();
@ -204,6 +205,16 @@ public class VmwareManagerImpl extends ManagerBase implements VmwareManager, Vmw
_storageMgr = new VmwareStorageManagerImpl(this);
}
@Override
public String getConfigComponentName() {
return VmwareManagerImpl.class.getSimpleName();
}
@Override
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {s_vmwareNicHotplugWaitTimeout};
}
@Override
public boolean configure(String name, Map<String, Object> params) throws ConfigurationException {
s_logger.info("Configure VmwareManagerImpl, manager name: " + name);

View File

@ -49,9 +49,9 @@ import org.apache.log4j.NDC;
import com.google.gson.Gson;
import com.vmware.vim25.AboutInfo;
import com.vmware.vim25.BoolPolicy;
import com.vmware.vim25.ClusterDasConfigInfo;
import com.vmware.vim25.ComputeResourceSummary;
import com.vmware.vim25.CustomFieldStringValue;
import com.vmware.vim25.DasVmPriority;
import com.vmware.vim25.DVPortConfigInfo;
import com.vmware.vim25.DVPortConfigSpec;
import com.vmware.vim25.DatastoreSummary;
@ -877,7 +877,8 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
// when we dynamically plug in a new NIC into virtual router, it may take time to show up in guest OS
// we use a waiting loop here as a workaround to synchronize activities in systems
long startTick = System.currentTimeMillis();
while (System.currentTimeMillis() - startTick < 15000) {
long waitTimeoutMillis = VmwareManager.s_vmwareNicHotplugWaitTimeout.value();
while (System.currentTimeMillis() - startTick < waitTimeoutMillis) {
// TODO : this is a temporary very inefficient solution, will refactor it later
Pair<Boolean, String> result = SshHelper.sshExecute(routerIp, DefaultDomRSshPort, "root", mgr.getSystemVMKeyFile(), null, "ls /proc/sys/net/ipv4/conf");
@ -1672,6 +1673,11 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
if (vmFolderExists && vmxFileFullPath != null) { // VM can be registered only if .vmx is present.
registerVm(vmNameOnVcenter, dsRootVolumeIsOn);
vmMo = hyperHost.findVmOnHyperHost(vmInternalCSName);
if (vmMo != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found registered vm " + vmInternalCSName + " at host " + hyperHost.getHyperHostName());
}
}
tearDownVm(vmMo);
}else if (!hyperHost.createBlankVm(vmNameOnVcenter, vmInternalCSName, vmSpec.getCpus(), vmSpec.getMaxSpeed().intValue(),
getReservedCpuMHZ(vmSpec), vmSpec.getLimitCpuUse(), (int)(vmSpec.getMaxRam() / (1024 * 1024)), getReservedMemoryMb(vmSpec),
@ -2024,10 +2030,15 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
throw new Exception("Failed to configure VM before start. vmName: " + vmInternalCSName);
}
if (vmSpec.getType() == VirtualMachine.Type.DomainRouter) {
hyperHost.setRestartPriorityForVM(vmMo, DasVmPriority.HIGH.value());
}
//For resizing root disk.
if (rootDiskTO != null && !hasSnapshot) {
resizeRootDisk(vmMo, rootDiskTO, hyperHost, context);
}
//
// Post Configuration
//
@ -4882,8 +4893,7 @@ public class VmwareResource implements StoragePoolResource, ServerResource, Vmwa
private void fillHostDetailsInfo(VmwareContext serviceContext, Map<String, String> details) throws Exception {
VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext());
ClusterDasConfigInfo dasConfig = hyperHost.getDasConfig();
if (dasConfig != null && dasConfig.isEnabled() != null && dasConfig.isEnabled().booleanValue()) {
if (hyperHost.isHAEnabled()) {
details.put("NativeHA", "true");
}
}

View File

@ -182,7 +182,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
return null;
}
private VirtualMachineMO copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl,
private Pair<VirtualMachineMO, Long> copyTemplateFromSecondaryToPrimary(VmwareHypervisorHost hyperHost, DatastoreMO datastoreMo, String secondaryStorageUrl,
String templatePathAtSecondaryStorage, String templateName, String templateUuid, boolean createSnapshot, Integer nfsVersion) throws Exception {
s_logger.info("Executing copyTemplateFromSecondaryToPrimary. secondaryStorage: " + secondaryStorageUrl + ", templatePathAtSecondaryStorage: " +
@ -229,6 +229,12 @@ public class VmwareStorageProcessor implements StorageProcessor {
throw new Exception(msg);
}
OVAProcessor processor = new OVAProcessor();
Map<String, Object> params = new HashMap<String, Object>();
params.put(StorageLayer.InstanceConfigKey, _storage);
processor.configure("OVA Processor", params);
long virtualSize = processor.getTemplateVirtualSize(secondaryMountPoint + "/" + templatePathAtSecondaryStorage, templateName);
if (createSnapshot) {
if (vmMo.createSnapshot("cloud.template.base", "Base snapshot", false, false)) {
// the same template may be deployed with multiple copies at per-datastore per-host basis,
@ -246,7 +252,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
}
}
return vmMo;
return new Pair<VirtualMachineMO, Long>(vmMo, new Long(virtualSize));
}
@Override
@ -322,6 +328,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
DatacenterMO dcMo = new DatacenterMO(context, hyperHost.getHyperHostDatacenter());
VirtualMachineMO templateMo = VmwareHelper.pickOneVmOnRunningHost(dcMo.findVmByNameAndLabel(templateUuidName), true);
DatastoreMO dsMo = null;
Pair<VirtualMachineMO, Long> vmInfo = null;
if (templateMo == null) {
if (s_logger.isInfoEnabled()) {
@ -343,9 +350,10 @@ public class VmwareStorageProcessor implements StorageProcessor {
dsMo = new DatastoreMO(context, morDs);
if (managed) {
VirtualMachineMO vmMo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(),
vmInfo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(),
managedStoragePoolRootVolumeName, false, _nfsVersion);
VirtualMachineMO vmMo = vmInfo.first();
vmMo.unregisterVm();
String[] vmwareLayoutFilePair = VmwareStorageLayoutHelper.getVmdkFilePairDatastorePath(dsMo, managedStoragePoolRootVolumeName,
@ -360,7 +368,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
dsMo.deleteFolder(folderToDelete, dcMo.getMor());
}
else {
copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(),
vmInfo = copyTemplateFromSecondaryToPrimary(hyperHost, dsMo, secondaryStorageUrl, templateInfo.first(), templateInfo.second(),
templateUuidName, true, _nfsVersion);
}
} else {
@ -378,7 +386,7 @@ public class VmwareStorageProcessor implements StorageProcessor {
else {
newTemplate.setPath(templateUuidName);
}
newTemplate.setSize(new Long(0)); // TODO: replace 0 with correct template physical_size.
newTemplate.setSize((vmInfo != null)? vmInfo.second() : new Long(0));
return new CopyCmdAnswer(newTemplate);
} catch (Throwable e) {

55
plugins/metrics/pom.xml Normal file
View File

@ -0,0 +1,55 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<artifactId>cloud-plugin-metrics</artifactId>
<name>Apache CloudStack Plugin - Metrics</name>
<parent>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloudstack-plugins</artifactId>
<version>4.10.0.0-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
<dependencies>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-api</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.cloudstack</groupId>
<artifactId>cloud-utils</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<argLine>-Xmx1024m</argLine>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -14,4 +14,5 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
name=metrics
parent=api

View File

@ -0,0 +1,27 @@
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<beans xmlns="http://www.springframework.org/schema/beans"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.springframework.org/schema/beans
http://www.springframework.org/schema/beans/spring-beans.xsd"
>
<bean id="metricsService" class="org.apache.cloudstack.metrics.MetricsServiceImpl" >
</bean>
</beans>

View File

@ -0,0 +1,51 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.admin.cluster.ListClustersCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.ClusterMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListClustersMetricsCmd.APINAME, description = "Lists clusters metrics", responseObject = ClusterMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin})
public class ListClustersMetricsCmd extends ListClustersCmd {
public static final String APINAME = "listClustersMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
final List<ClusterMetricsResponse> metricsResponses = metricsService.listClusterMetrics(getClusterResponses());
ListResponse<ClusterMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,54 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import com.cloud.host.Host;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.admin.host.ListHostsCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.HostMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListHostsMetricsCmd.APINAME, description = "Lists hosts metrics", responseObject = HostMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin})
public class ListHostsMetricsCmd extends ListHostsCmd {
public static final String APINAME = "listHostsMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
setType(Host.Type.Routing.toString());
final List<HostMetricsResponse> metricsResponses = metricsService.listHostMetrics(getHostResponses().getResponses());
ListResponse<HostMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,52 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.context.CallContext;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.InfrastructureResponse;
import javax.inject.Inject;
@APICommand(name = ListInfrastructureCmd.APINAME, description = "Lists infrastructure", responseObject = InfrastructureResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin})
public class ListInfrastructureCmd extends BaseCmd {
public static final String APINAME = "listInfrastructure";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public long getEntityOwnerId() {
return CallContext.current().getCallingAccountId();
}
@Override
public void execute() {
final InfrastructureResponse response = metricsService.listInfrastructure();
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,52 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.StoragePoolMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListStoragePoolsMetricsCmd.APINAME, description = "Lists storage pool metrics", responseObject = StoragePoolMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin})
public class ListStoragePoolsMetricsCmd extends ListStoragePoolsCmd {
public static final String APINAME = "listStoragePoolsMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
final List<StoragePoolMetricsResponse> metricsResponses = metricsService.listStoragePoolMetrics(_queryService.searchForStoragePools(this).getResponses());
ListResponse<StoragePoolMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,51 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.admin.vm.ListVMsCmdByAdmin;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.VmMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListVMsMetricsCmd.APINAME, description = "Lists VM metrics", responseObject = VmMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class ListVMsMetricsCmd extends ListVMsCmdByAdmin {
public static final String APINAME = "listVirtualMachinesMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
final List<VmMetricsResponse> metricsResponses = metricsService.listVmMetrics(_queryService.searchForUserVMs(this).getResponses());
ListResponse<VmMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,51 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.admin.volume.ListVolumesCmdByAdmin;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.VolumeMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListVolumesMetricsCmd.APINAME, description = "Lists volume metrics", responseObject = VolumeMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin, RoleType.ResourceAdmin, RoleType.DomainAdmin, RoleType.User})
public class ListVolumesMetricsCmd extends ListVolumesCmdByAdmin {
public static final String APINAME = "listVolumesMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
final List<VolumeMetricsResponse> metricsResponses = metricsService.listVolumeMetrics(_queryService.searchForVolumes(this).getResponses());
ListResponse<VolumeMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,52 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.api;
import org.apache.cloudstack.acl.RoleType;
import org.apache.cloudstack.api.command.user.zone.ListZonesCmd;
import org.apache.cloudstack.api.response.ListResponse;
import org.apache.cloudstack.metrics.MetricsService;
import org.apache.cloudstack.response.ZoneMetricsResponse;
import javax.inject.Inject;
import java.util.List;
@APICommand(name = ListZonesMetricsCmd.APINAME, description = "Lists zone metrics", responseObject = ZoneMetricsResponse.class,
requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, responseView = ResponseObject.ResponseView.Full,
since = "4.9.3", authorized = {RoleType.Admin})
public class ListZonesMetricsCmd extends ListZonesCmd {
public static final String APINAME = "listZonesMetrics";
@Inject
private MetricsService metricsService;
@Override
public String getCommandName() {
return APINAME.toLowerCase() + BaseCmd.RESPONSE_SUFFIX;
}
@Override
public void execute() {
final List<ZoneMetricsResponse> metricsResponses = metricsService.listZoneMetrics(_queryService.listDataCenters(this).getResponses());
ListResponse<ZoneMetricsResponse> response = new ListResponse<>();
response.setResponses(metricsResponses, metricsResponses.size());
response.setResponseName(getCommandName());
setResponseObject(response);
}
}

View File

@ -0,0 +1,46 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.metrics;
import com.cloud.utils.component.PluggableService;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.response.ClusterMetricsResponse;
import org.apache.cloudstack.response.HostMetricsResponse;
import org.apache.cloudstack.response.InfrastructureResponse;
import org.apache.cloudstack.response.StoragePoolMetricsResponse;
import org.apache.cloudstack.response.VmMetricsResponse;
import org.apache.cloudstack.response.VolumeMetricsResponse;
import org.apache.cloudstack.response.ZoneMetricsResponse;
import java.util.List;
public interface MetricsService extends PluggableService {
InfrastructureResponse listInfrastructure();
List<VolumeMetricsResponse> listVolumeMetrics(List<VolumeResponse> volumeResponses);
List<VmMetricsResponse> listVmMetrics(List<UserVmResponse> vmResponses);
List<StoragePoolMetricsResponse> listStoragePoolMetrics(List<StoragePoolResponse> poolResponses);
List<HostMetricsResponse> listHostMetrics(List<HostResponse> poolResponses);
List<ClusterMetricsResponse> listClusterMetrics(List<ClusterResponse> poolResponses);
List<ZoneMetricsResponse> listZoneMetrics(List<ZoneResponse> poolResponses);
}

View File

@ -0,0 +1,563 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.metrics;
import com.cloud.alert.AlertManager;
import com.cloud.api.ApiDBUtils;
import com.cloud.api.query.dao.HostJoinDao;
import com.cloud.api.query.vo.HostJoinVO;
import com.cloud.capacity.Capacity;
import com.cloud.capacity.CapacityManager;
import com.cloud.capacity.dao.CapacityDao;
import com.cloud.capacity.dao.CapacityDaoImpl;
import com.cloud.dc.DataCenter;
import com.cloud.dc.dao.ClusterDao;
import com.cloud.dc.dao.DataCenterDao;
import com.cloud.dc.dao.HostPodDao;
import com.cloud.deploy.DeploymentClusterPlanner;
import com.cloud.host.Host;
import com.cloud.host.HostStats;
import com.cloud.host.Status;
import com.cloud.host.dao.HostDao;
import com.cloud.org.Cluster;
import com.cloud.org.Grouping;
import com.cloud.org.Managed;
import com.cloud.utils.component.ComponentLifecycleBase;
import com.cloud.vm.VMInstanceVO;
import com.cloud.vm.VirtualMachine;
import com.cloud.vm.dao.DomainRouterDao;
import com.cloud.vm.dao.VMInstanceDao;
import org.apache.cloudstack.api.ApiErrorCode;
import org.apache.cloudstack.api.ListClustersMetricsCmd;
import org.apache.cloudstack.api.ListHostsMetricsCmd;
import org.apache.cloudstack.api.ListInfrastructureCmd;
import org.apache.cloudstack.api.ListStoragePoolsMetricsCmd;
import org.apache.cloudstack.api.ListVMsMetricsCmd;
import org.apache.cloudstack.api.ListVolumesMetricsCmd;
import org.apache.cloudstack.api.ListZonesMetricsCmd;
import org.apache.cloudstack.api.ServerApiException;
import org.apache.cloudstack.api.response.ClusterResponse;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.api.response.StoragePoolResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import org.apache.cloudstack.api.response.VolumeResponse;
import org.apache.cloudstack.api.response.ZoneResponse;
import org.apache.cloudstack.response.ClusterMetricsResponse;
import org.apache.cloudstack.response.HostMetricsResponse;
import org.apache.cloudstack.response.InfrastructureResponse;
import org.apache.cloudstack.response.StoragePoolMetricsResponse;
import org.apache.cloudstack.response.VmMetricsResponse;
import org.apache.cloudstack.response.VolumeMetricsResponse;
import org.apache.cloudstack.response.ZoneMetricsResponse;
import org.apache.cloudstack.storage.datastore.db.ImageStoreDao;
import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
import org.apache.commons.beanutils.BeanUtils;
import javax.inject.Inject;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
import java.util.List;
public class MetricsServiceImpl extends ComponentLifecycleBase implements MetricsService {
@Inject
private DataCenterDao dataCenterDao;
@Inject
private HostPodDao podDao;
@Inject
private ClusterDao clusterDao;
@Inject
private HostDao hostDao;
@Inject
private HostJoinDao hostJoinDao;
@Inject
private PrimaryDataStoreDao storagePoolDao;
@Inject
private ImageStoreDao imageStoreDao;
@Inject
private VMInstanceDao vmInstanceDao;
@Inject
private DomainRouterDao domainRouterDao;
@Inject
private CapacityDao capacityDao;
protected MetricsServiceImpl() {
super();
}
private Double findRatioValue(final String value) {
if (value != null) {
return Double.valueOf(value);
}
return 1.0;
}
private void updateHostMetrics(final Metrics metrics, final HostJoinVO host) {
metrics.incrTotalHosts();
metrics.addCpuAllocated(host.getCpuReservedCapacity() + host.getCpuUsedCapacity());
metrics.addMemoryAllocated(host.getMemReservedCapacity() + host.getMemUsedCapacity());
final HostStats hostStats = ApiDBUtils.getHostStatistics(host.getId());
if (hostStats != null) {
metrics.addCpuUsedPercentage(hostStats.getCpuUtilization());
metrics.addMemoryUsed((long) hostStats.getUsedMemory());
metrics.setMaximumCpuUsage(hostStats.getCpuUtilization());
metrics.setMaximumMemoryUsage((long) hostStats.getUsedMemory());
}
}
@Override
public InfrastructureResponse listInfrastructure() {
final InfrastructureResponse response = new InfrastructureResponse();
response.setZones(dataCenterDao.listAllZones().size());
response.setPods(podDao.listAllPods(null).size());
response.setClusters(clusterDao.listAllClusters(null).size());
response.setHosts(hostDao.listByType(Host.Type.Routing).size());
response.setStoragePools(storagePoolDao.listAll().size());
response.setImageStores(imageStoreDao.listImageStores().size());
response.setSystemvms(vmInstanceDao.listByTypes(VirtualMachine.Type.ConsoleProxy, VirtualMachine.Type.SecondaryStorageVm).size());
response.setRouters(domainRouterDao.listAll().size());
int cpuSockets = 0;
for (final Host host : hostDao.listByType(Host.Type.Routing)) {
if (host.getCpuSockets() != null) {
cpuSockets += host.getCpuSockets();
}
}
response.setCpuSockets(cpuSockets);
return response;
}
@Override
public List<VolumeMetricsResponse> listVolumeMetrics(List<VolumeResponse> volumeResponses) {
final List<VolumeMetricsResponse> metricsResponses = new ArrayList<>();
for (final VolumeResponse volumeResponse: volumeResponses) {
VolumeMetricsResponse metricsResponse = new VolumeMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, volumeResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate volume metrics response");
}
metricsResponse.setDiskSizeGB(volumeResponse.getSize());
metricsResponse.setStorageType(volumeResponse.getStorageType(), volumeResponse.getVolumeType());
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
@Override
public List<VmMetricsResponse> listVmMetrics(List<UserVmResponse> vmResponses) {
final List<VmMetricsResponse> metricsResponses = new ArrayList<>();
for (final UserVmResponse vmResponse: vmResponses) {
VmMetricsResponse metricsResponse = new VmMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, vmResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate vm metrics response");
}
metricsResponse.setIpAddress(vmResponse.getNics());
metricsResponse.setCpuTotal(vmResponse.getCpuNumber(), vmResponse.getCpuSpeed());
metricsResponse.setMemTotal(vmResponse.getMemory());
metricsResponse.setNetworkRead(vmResponse.getNetworkKbsRead());
metricsResponse.setNetworkWrite(vmResponse.getNetworkKbsWrite());
metricsResponse.setDiskRead(vmResponse.getDiskKbsRead());
metricsResponse.setDiskWrite(vmResponse.getDiskKbsWrite());
metricsResponse.setDiskIopsTotal(vmResponse.getDiskIORead(), vmResponse.getDiskIOWrite());
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
@Override
public List<StoragePoolMetricsResponse> listStoragePoolMetrics(List<StoragePoolResponse> poolResponses) {
final List<StoragePoolMetricsResponse> metricsResponses = new ArrayList<>();
for (final StoragePoolResponse poolResponse: poolResponses) {
StoragePoolMetricsResponse metricsResponse = new StoragePoolMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, poolResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate storagepool metrics response");
}
Long poolClusterId = null;
final Cluster cluster = clusterDao.findByUuid(poolResponse.getClusterId());
if (cluster != null) {
poolClusterId = cluster.getId();
}
final Double storageThreshold = AlertManager.StorageCapacityThreshold.valueIn(poolClusterId);
final Double storageDisableThreshold = CapacityManager.StorageCapacityDisableThreshold.valueIn(poolClusterId);
metricsResponse.setDiskSizeUsedGB(poolResponse.getDiskSizeUsed());
metricsResponse.setDiskSizeTotalGB(poolResponse.getDiskSizeTotal(), poolResponse.getOverProvisionFactor());
metricsResponse.setDiskSizeAllocatedGB(poolResponse.getDiskSizeAllocated());
metricsResponse.setDiskSizeUnallocatedGB(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor());
metricsResponse.setStorageUsedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageThreshold);
metricsResponse.setStorageUsedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
metricsResponse.setStorageAllocatedThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeAllocated(), poolResponse.getOverProvisionFactor(), storageThreshold);
metricsResponse.setStorageAllocatedDisableThreshold(poolResponse.getDiskSizeTotal(), poolResponse.getDiskSizeUsed(), poolResponse.getOverProvisionFactor(), storageDisableThreshold);
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
@Override
public List<HostMetricsResponse> listHostMetrics(List<HostResponse> hostResponses) {
final List<HostMetricsResponse> metricsResponses = new ArrayList<>();
for (final HostResponse hostResponse: hostResponses) {
HostMetricsResponse metricsResponse = new HostMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, hostResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate host metrics response");
}
final Host host = hostDao.findByUuid(hostResponse.getId());
if (host == null) {
continue;
}
final Long hostId = host.getId();
final Long clusterId = host.getClusterId();
// Thresholds
final Double cpuThreshold = AlertManager.CPUCapacityThreshold.valueIn(clusterId);
final Double memoryThreshold = AlertManager.MemoryCapacityThreshold.valueIn(clusterId);
final Float cpuDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId);
final Float memoryDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.valueIn(clusterId);
// Over commit ratios
final Double cpuOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "cpuOvercommitRatio"));
final Double memoryOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "memoryOvercommitRatio"));
Long upInstances = 0L;
Long totalInstances = 0L;
for (final VMInstanceVO instance: vmInstanceDao.listByHostId(hostId)) {
if (instance == null) {
continue;
}
if (instance.getType() == VirtualMachine.Type.User) {
totalInstances++;
if (instance.getState() == VirtualMachine.State.Running) {
upInstances++;
}
}
}
metricsResponse.setPowerState(hostResponse.getOutOfBandManagementResponse().getPowerState());
metricsResponse.setInstances(upInstances, totalInstances);
metricsResponse.setCpuTotal(hostResponse.getCpuNumber(), hostResponse.getCpuSpeed(), cpuOvercommitRatio);
metricsResponse.setCpuUsed(hostResponse.getCpuUsed(), hostResponse.getCpuNumber(), hostResponse.getCpuSpeed());
metricsResponse.setCpuAllocated(hostResponse.getCpuAllocated(), hostResponse.getCpuNumber(), hostResponse.getCpuSpeed());
metricsResponse.setMemTotal(hostResponse.getMemoryTotal(), memoryOvercommitRatio);
metricsResponse.setMemAllocated(hostResponse.getMemoryAllocated());
metricsResponse.setMemUsed(hostResponse.getMemoryUsed());
metricsResponse.setNetworkRead(hostResponse.getNetworkKbsRead());
metricsResponse.setNetworkWrite(hostResponse.getNetworkKbsWrite());
// CPU thresholds
metricsResponse.setCpuUsageThreshold(hostResponse.getCpuUsed(), cpuThreshold);
metricsResponse.setCpuUsageDisableThreshold(hostResponse.getCpuUsed(), cpuDisableThreshold);
metricsResponse.setCpuAllocatedThreshold(hostResponse.getCpuAllocated(), cpuOvercommitRatio, cpuThreshold);
metricsResponse.setCpuAllocatedDisableThreshold(hostResponse.getCpuAllocated(), cpuOvercommitRatio, cpuDisableThreshold);
// Memory thresholds
metricsResponse.setMemoryUsageThreshold(hostResponse.getMemoryUsed(), hostResponse.getMemoryTotal(), memoryThreshold);
metricsResponse.setMemoryUsageDisableThreshold(hostResponse.getMemoryUsed(), hostResponse.getMemoryTotal(), memoryDisableThreshold);
metricsResponse.setMemoryAllocatedThreshold(hostResponse.getMemoryAllocated(), hostResponse.getMemoryTotal(), memoryOvercommitRatio, memoryThreshold);
metricsResponse.setMemoryAllocatedDisableThreshold(hostResponse.getMemoryAllocated(), hostResponse.getMemoryTotal(), memoryOvercommitRatio, memoryDisableThreshold);
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
private CapacityDaoImpl.SummedCapacity getCapacity(final int capacityType, final Long zoneId, final Long clusterId) {
final List<CapacityDaoImpl.SummedCapacity> capacities = capacityDao.findCapacityBy(capacityType, zoneId, null, clusterId);
if (capacities == null || capacities.size() < 1) {
return null;
}
return capacities.get(0);
}
@Override
public List<ClusterMetricsResponse> listClusterMetrics(List<ClusterResponse> clusterResponses) {
final List<ClusterMetricsResponse> metricsResponses = new ArrayList<>();
for (final ClusterResponse clusterResponse: clusterResponses) {
ClusterMetricsResponse metricsResponse = new ClusterMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, clusterResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate cluster metrics response");
}
final Cluster cluster = clusterDao.findByUuid(clusterResponse.getId());
if (cluster == null) {
continue;
}
final Long clusterId = cluster.getId();
// Thresholds
final Double cpuThreshold = AlertManager.CPUCapacityThreshold.valueIn(clusterId);
final Double memoryThreshold = AlertManager.MemoryCapacityThreshold.valueIn(clusterId);
final Float cpuDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.valueIn(clusterId);
final Float memoryDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.valueIn(clusterId);
final Double cpuOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "cpuOvercommitRatio"));
final Double memoryOvercommitRatio = findRatioValue(ApiDBUtils.findClusterDetails(clusterId, "memoryOvercommitRatio"));
// CPU and memory capacities
final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, null, clusterId);
final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, null, clusterId);
final Metrics metrics = new Metrics(cpuCapacity, memoryCapacity);
for (final HostJoinVO host: hostJoinDao.findByClusterId(clusterId, Host.Type.Routing)) {
if (host.getStatus() == Status.Up) {
metrics.incrUpResources();
}
metrics.incrTotalResources();
updateHostMetrics(metrics, host);
}
metricsResponse.setState(clusterResponse.getAllocationState(), clusterResponse.getManagedState());
metricsResponse.setResources(metrics.getUpResources(), metrics.getTotalResources());
// CPU
metricsResponse.setCpuTotal(metrics.getTotalCpu());
metricsResponse.setCpuAllocated(metrics.getCpuAllocated(), metrics.getTotalCpu());
if (metrics.getCpuUsedPercentage() > 0L) {
metricsResponse.setCpuUsed(metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
metricsResponse.setCpuMaxDeviation(metrics.getMaximumCpuUsage(), metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
}
// Memory
metricsResponse.setMemTotal(metrics.getTotalMemory());
metricsResponse.setMemAllocated(metrics.getMemoryAllocated(), metrics.getTotalMemory());
if (metrics.getMemoryUsed() > 0L) {
metricsResponse.setMemUsed(metrics.getMemoryUsed(), metrics.getTotalMemory());
metricsResponse.setMemMaxDeviation(metrics.getMaximumMemoryUsage(), metrics.getMemoryUsed(), metrics.getTotalHosts());
}
// CPU thresholds
metricsResponse.setCpuUsageThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuThreshold);
metricsResponse.setCpuUsageDisableThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuDisableThreshold);
metricsResponse.setCpuAllocatedThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuOvercommitRatio, cpuThreshold);
metricsResponse.setCpuAllocatedDisableThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuOvercommitRatio, cpuDisableThreshold);
// Memory thresholds
metricsResponse.setMemoryUsageThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryThreshold);
metricsResponse.setMemoryUsageDisableThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryDisableThreshold);
metricsResponse.setMemoryAllocatedThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryOvercommitRatio, memoryThreshold);
metricsResponse.setMemoryAllocatedDisableThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryOvercommitRatio, memoryDisableThreshold);
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
@Override
public List<ZoneMetricsResponse> listZoneMetrics(List<ZoneResponse> zoneResponses) {
final List<ZoneMetricsResponse> metricsResponses = new ArrayList<>();
for (final ZoneResponse zoneResponse: zoneResponses) {
ZoneMetricsResponse metricsResponse = new ZoneMetricsResponse();
try {
BeanUtils.copyProperties(metricsResponse, zoneResponse);
} catch (IllegalAccessException | InvocationTargetException e) {
throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to generate zone metrics response");
}
final DataCenter zone = dataCenterDao.findByUuid(zoneResponse.getId());
if (zone == null) {
continue;
}
final Long zoneId = zone.getId();
// Thresholds
final Double cpuThreshold = AlertManager.CPUCapacityThreshold.value();
final Double memoryThreshold = AlertManager.MemoryCapacityThreshold.value();
final Float cpuDisableThreshold = DeploymentClusterPlanner.ClusterCPUCapacityDisableThreshold.value();
final Float memoryDisableThreshold = DeploymentClusterPlanner.ClusterMemoryCapacityDisableThreshold.value();
// CPU and memory capacities
final CapacityDaoImpl.SummedCapacity cpuCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_CPU, zoneId, null);
final CapacityDaoImpl.SummedCapacity memoryCapacity = getCapacity((int) Capacity.CAPACITY_TYPE_MEMORY, zoneId, null);
final Metrics metrics = new Metrics(cpuCapacity, memoryCapacity);
for (final Cluster cluster : clusterDao.listClustersByDcId(zoneId)) {
metrics.incrTotalResources();
if (cluster.getAllocationState() == Grouping.AllocationState.Enabled
&& cluster.getManagedState() == Managed.ManagedState.Managed) {
metrics.incrUpResources();
}
for (final HostJoinVO host: hostJoinDao.findByClusterId(cluster.getId(), Host.Type.Routing)) {
updateHostMetrics(metrics, host);
}
}
metricsResponse.setState(zoneResponse.getAllocationState());
metricsResponse.setResource(metrics.getUpResources(), metrics.getTotalResources());
// CPU
metricsResponse.setCpuTotal(metrics.getTotalCpu());
metricsResponse.setCpuAllocated(metrics.getCpuAllocated(), metrics.getTotalCpu());
if (metrics.getCpuUsedPercentage() > 0L) {
metricsResponse.setCpuUsed(metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
metricsResponse.setCpuMaxDeviation(metrics.getMaximumCpuUsage(), metrics.getCpuUsedPercentage(), metrics.getTotalHosts());
}
// Memory
metricsResponse.setMemTotal(metrics.getTotalMemory());
metricsResponse.setMemAllocated(metrics.getMemoryAllocated(), metrics.getTotalMemory());
if (metrics.getMemoryUsed() > 0L) {
metricsResponse.setMemUsed(metrics.getMemoryUsed(), metrics.getTotalMemory());
metricsResponse.setMemMaxDeviation(metrics.getMaximumMemoryUsage(), metrics.getMemoryUsed(), metrics.getTotalHosts());
}
// CPU thresholds
metricsResponse.setCpuUsageThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuThreshold);
metricsResponse.setCpuUsageDisableThreshold(metrics.getCpuUsedPercentage(), metrics.getTotalHosts(), cpuDisableThreshold);
metricsResponse.setCpuAllocatedThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuThreshold);
metricsResponse.setCpuAllocatedDisableThreshold(metrics.getCpuAllocated(), metrics.getTotalCpu(), cpuDisableThreshold);
// Memory thresholds
metricsResponse.setMemoryUsageThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryThreshold);
metricsResponse.setMemoryUsageDisableThreshold(metrics.getMemoryUsed(), metrics.getTotalMemory(), memoryDisableThreshold);
metricsResponse.setMemoryAllocatedThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryThreshold);
metricsResponse.setMemoryAllocatedDisableThreshold(metrics.getMemoryAllocated(), metrics.getTotalMemory(), memoryDisableThreshold);
metricsResponses.add(metricsResponse);
}
return metricsResponses;
}
@Override
public List<Class<?>> getCommands() {
List<Class<?>> cmdList = new ArrayList<Class<?>>();
cmdList.add(ListInfrastructureCmd.class);
cmdList.add(ListVolumesMetricsCmd.class);
cmdList.add(ListVMsMetricsCmd.class);
cmdList.add(ListStoragePoolsMetricsCmd.class);
cmdList.add(ListHostsMetricsCmd.class);
cmdList.add(ListClustersMetricsCmd.class);
cmdList.add(ListZonesMetricsCmd.class);
return cmdList;
}
private class Metrics {
// CPU metrics
private Long totalCpu = 0L;
private Long cpuAllocated = 0L;
private Double cpuUsedPercentage = 0.0;
private Double maximumCpuUsage = 0.0;
// Memory metrics
private Long totalMemory = 0L;
private Long memoryUsed = 0L;
private Long memoryAllocated = 0L;
private Long maximumMemoryUsage = 0L;
// Counters
private Long totalHosts = 0L;
private Long totalResources = 0L;
private Long upResources = 0L;
public Metrics(final CapacityDaoImpl.SummedCapacity totalCpu, final CapacityDaoImpl.SummedCapacity totalMemory) {
if (totalCpu != null) {
this.totalCpu = totalCpu.getTotalCapacity();
}
if (totalMemory != null) {
this.totalMemory = totalMemory.getTotalCapacity();
}
}
public void addCpuAllocated(Long cpuAllocated) {
this.cpuAllocated += cpuAllocated;
}
public void addCpuUsedPercentage(Double cpuUsedPercentage) {
this.cpuUsedPercentage += cpuUsedPercentage;
}
public void setMaximumCpuUsage(Double maximumCpuUsage) {
if (this.maximumCpuUsage == null || (maximumCpuUsage != null && maximumCpuUsage > this.maximumCpuUsage)) {
this.maximumCpuUsage = maximumCpuUsage;
}
}
public void addMemoryUsed(Long memoryUsed) {
this.memoryUsed += memoryUsed;
}
public void addMemoryAllocated(Long memoryAllocated) {
this.memoryAllocated += memoryAllocated;
}
public void setMaximumMemoryUsage(Long maximumMemoryUsage) {
if (this.maximumMemoryUsage == null || (maximumMemoryUsage != null && maximumMemoryUsage > this.maximumMemoryUsage)) {
this.maximumMemoryUsage = maximumMemoryUsage;
}
}
public void incrTotalHosts() {
this.totalHosts++;
}
public void incrTotalResources() {
this.totalResources++;
}
public void incrUpResources() {
this.upResources++;
}
public Long getTotalCpu() {
return totalCpu;
}
public Long getCpuAllocated() {
return cpuAllocated;
}
public Double getCpuUsedPercentage() {
return cpuUsedPercentage;
}
public Double getMaximumCpuUsage() {
return maximumCpuUsage;
}
public Long getTotalMemory() {
return totalMemory;
}
public Long getMemoryUsed() {
return memoryUsed;
}
public Long getMemoryAllocated() {
return memoryAllocated;
}
public Long getMaximumMemoryUsage() {
return maximumMemoryUsage;
}
public Long getTotalHosts() {
return totalHosts;
}
public Long getTotalResources() {
return totalResources;
}
public Long getUpResources() {
return upResources;
}
}
}

View File

@ -0,0 +1,211 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.response.ClusterResponse;
public class ClusterMetricsResponse extends ClusterResponse {
@SerializedName("state")
@Param(description = "state of the cluster")
private String state;
@SerializedName("hosts")
@Param(description = "running / total hosts in the cluster")
private String resources;
@SerializedName("cputotal")
@Param(description = "the total cpu capacity in Ghz")
private String cpuTotal;
@SerializedName("cpuused")
@Param(description = "the total cpu used in Ghz")
private String cpuUsed;
@SerializedName("cpuallocated")
@Param(description = "the total cpu allocated in Ghz")
private String cpuAllocated;
@SerializedName("cpumaxdeviation")
@Param(description = "the maximum cpu deviation")
private String cpuMaxDeviation;
@SerializedName("memorytotal")
@Param(description = "the total cpu capacity in GiB")
private String memTotal;
@SerializedName("memoryused")
@Param(description = "the total cpu used in GiB")
private String memUsed;
@SerializedName("memoryallocated")
@Param(description = "the total cpu allocated in GiB")
private String memAllocated;
@SerializedName("memorymaxdeviation")
@Param(description = "the maximum memory deviation")
private String memMaxDeviation;
@SerializedName("cputhreshold")
@Param(description = "cpu usage notification threshold exceeded")
private Boolean cpuThresholdExceeded;
@SerializedName("cpudisablethreshold")
@Param(description = "cpu usage disable threshold exceeded")
private Boolean cpuDisableThresholdExceeded;
@SerializedName("cpuallocatedthreshold")
@Param(description = "cpu allocated notification threshold exceeded")
private Boolean cpuAllocatedThresholdExceeded;
@SerializedName("cpuallocateddisablethreshold")
@Param(description = "cpu allocated disable threshold exceeded")
private Boolean cpuAllocatedDisableThresholdExceeded;
@SerializedName("memorythreshold")
@Param(description = "memory usage notification threshold exceeded")
private Boolean memoryThresholdExceeded;
@SerializedName("memorydisablethreshold")
@Param(description = "memory usage disable threshold exceeded")
private Boolean memoryDisableThresholdExceeded;
@SerializedName("memoryallocatedthreshold")
@Param(description = "memory allocated notification threshold exceeded")
private Boolean memoryAllocatedThresholdExceeded;
@SerializedName("memoryallocateddisablethreshold")
@Param(description = "memory allocated disable threshold exceeded")
private Boolean memoryAllocatedDisableThresholdExceeded;
public void setState(final String allocationState, final String managedState) {
this.state = allocationState;
if (managedState.equals("Unmanaged")) {
this.state = managedState;
}
if (managedState.equals("Managed")) {
this.state = allocationState;
}
}
public void setResources(final Long upResources, final Long totalResources) {
if (upResources != null && totalResources != null) {
this.resources = String.format("%d / %d", upResources, totalResources);
}
}
public void setCpuTotal(final Long cpuTotal) {
if (cpuTotal != null) {
this.cpuTotal = String.format("%.2f Ghz", cpuTotal / 1000.0);
}
}
public void setCpuUsed(final Double cpuUsedPercentage, final Long totalHosts) {
if (cpuUsedPercentage != null && totalHosts != null && totalHosts != 0) {
this.cpuUsed = String.format("%.2f%%", 1.0 * cpuUsedPercentage / totalHosts);
}
}
public void setCpuAllocated(final Long cpuAllocated, final Long cpuTotal) {
if (cpuAllocated != null && cpuTotal != null && cpuTotal != 0) {
this.cpuAllocated = String.format("%.2f%%", cpuAllocated * 100.0 / cpuTotal);
}
}
public void setCpuMaxDeviation(final Double maxCpuDeviation, final Double totalCpuUsed, final Long totalHosts) {
if (maxCpuDeviation != null && totalCpuUsed != null && totalHosts != null && totalHosts != 0) {
final Double averageCpuUsage = totalCpuUsed / totalHosts;
this.cpuMaxDeviation = String.format("%.2f%%", (maxCpuDeviation - averageCpuUsage) * 100.0 / averageCpuUsage);
}
}
public void setMemTotal(final Long memTotal) {
if (memTotal != null) {
this.memTotal = String.format("%.2f GB", memTotal / (1024.0 * 1024.0 * 1024.0));
}
}
public void setMemUsed( final Long memUsed, final Long memTotal) {
if (memUsed != null && memTotal != null && memTotal != 0) {
this.memUsed = String.format("%.2f%%", memUsed * 100.0 / memTotal);
}
}
public void setMemAllocated(final Long memAllocated, final Long memTotal) {
if (memAllocated != null && memTotal != null && memTotal != 0) {
this.memAllocated = String.format("%.2f%%", memAllocated * 100.0 / memTotal);
}
}
public void setMemMaxDeviation(final Long maxMemoryUsage, final Long totalMemory, final Long totalHosts) {
if (maxMemoryUsage != null && totalMemory != null && totalHosts != null && totalHosts != 0) {
final Double averageMemoryUsage = 1.0 * totalMemory / totalHosts;
this.memMaxDeviation = String.format("%.2f%%", (maxMemoryUsage - averageMemoryUsage) * 100.0 / averageMemoryUsage);
}
}
public void setCpuUsageThreshold(final Double cpuUsed, final Long totalHosts, final Double threshold) {
if (cpuUsed != null && totalHosts != null && threshold != null && totalHosts != 0) {
this.cpuThresholdExceeded = (cpuUsed / (100.0 * totalHosts)) > threshold;
}
}
public void setCpuUsageDisableThreshold(final Double cpuUsed, final Long totalHosts, final Float threshold) {
if (cpuUsed != null && totalHosts != null && threshold != null && totalHosts != 0) {
this.cpuDisableThresholdExceeded = (cpuUsed / (100.0 * totalHosts)) > threshold;
}
}
public void setCpuAllocatedThreshold(final Long cpuAllocated, final Long cpuUsed, final Double overCommitRatio, final Double threshold) {
if (cpuAllocated != null && cpuUsed != null && overCommitRatio != null && threshold != null && cpuUsed != 0) {
this.cpuAllocatedThresholdExceeded = (1.0 * cpuAllocated * overCommitRatio / cpuUsed) > threshold;
}
}
public void setCpuAllocatedDisableThreshold(final Long cpuAllocated, final Long cpuUsed, final Double overCommitRatio, final Float threshold) {
if (cpuAllocated != null && cpuUsed != null && overCommitRatio != null && threshold != null && cpuUsed != 0) {
this.cpuAllocatedDisableThresholdExceeded = (1.0 * cpuAllocated * overCommitRatio / cpuUsed) > threshold;
}
}
public void setMemoryUsageThreshold(final Long memUsed, final Long memTotal, final Double threshold) {
if (memUsed != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryThresholdExceeded = (1.0 * memUsed / memTotal) > threshold;
}
}
public void setMemoryUsageDisableThreshold(final Long memUsed, final Long memTotal, final Float threshold) {
if (memUsed != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryDisableThresholdExceeded = (1.0 * memUsed / memTotal) > threshold;
}
}
public void setMemoryAllocatedThreshold(final Long memAllocated, final Long memTotal, final Double overCommitRatio, final Double threshold) {
if (memAllocated != null && memTotal != null && overCommitRatio != null && threshold != null && memTotal != 0) {
this.memoryAllocatedThresholdExceeded = (1.0 * memAllocated * overCommitRatio / memTotal) > threshold;
}
}
public void setMemoryAllocatedDisableThreshold(final Long memAllocated, final Long memTotal, final Double overCommitRatio, final Float threshold) {
if (memAllocated != null && memTotal != null && overCommitRatio != null && threshold != null && memTotal != 0) {
this.memoryAllocatedDisableThresholdExceeded = (1.0 * memAllocated * overCommitRatio / memTotal) > threshold;
}
}
}

View File

@ -0,0 +1,204 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.response.HostResponse;
import org.apache.cloudstack.outofbandmanagement.OutOfBandManagement;
public class HostMetricsResponse extends HostResponse {
@SerializedName("powerstate")
@Param(description = "out-of-band management power state")
private OutOfBandManagement.PowerState powerState;
@SerializedName("instances")
@Param(description = "instances on the host")
private String instances;
@SerializedName("cputotalghz")
@Param(description = "the total cpu capacity in Ghz")
private String cpuTotal;
@SerializedName("cpuusedghz")
@Param(description = "the total cpu used in Ghz")
private String cpuUsed;
@SerializedName("cpuallocatedghz")
@Param(description = "the total cpu allocated in Ghz")
private String cpuAllocated;
@SerializedName("memorytotalgb")
@Param(description = "the total cpu capacity in GiB")
private String memTotal;
@SerializedName("memoryusedgb")
@Param(description = "the total cpu used in GiB")
private String memUsed;
@SerializedName("memoryallocatedgb")
@Param(description = "the total cpu allocated in GiB")
private String memAllocated;
@SerializedName("networkread")
@Param(description = "network read in GiB")
private String networkRead;
@SerializedName("networkwrite")
@Param(description = "network write in GiB")
private String networkWrite;
@SerializedName("cputhreshold")
@Param(description = "cpu usage notification threshold exceeded")
private Boolean cpuThresholdExceeded;
@SerializedName("cpudisablethreshold")
@Param(description = "cpu usage disable threshold exceeded")
private Boolean cpuDisableThresholdExceeded;
@SerializedName("cpuallocatedthreshold")
@Param(description = "cpu allocated notification threshold exceeded")
private Boolean cpuAllocatedThresholdExceeded;
@SerializedName("cpuallocateddisablethreshold")
@Param(description = "cpu allocated disable threshold exceeded")
private Boolean cpuAllocatedDisableThresholdExceeded;
@SerializedName("memorythreshold")
@Param(description = "memory usage notification threshold exceeded")
private Boolean memoryThresholdExceeded;
@SerializedName("memorydisablethreshold")
@Param(description = "memory usage disable threshold exceeded")
private Boolean memoryDisableThresholdExceeded;
@SerializedName("memoryallocatedthreshold")
@Param(description = "memory allocated notification threshold exceeded")
private Boolean memoryAllocatedThresholdExceeded;
@SerializedName("memoryallocateddisablethreshold")
@Param(description = "memory allocated disable threshold exceeded")
private Boolean memoryAllocatedDisableThresholdExceeded;
public void setPowerState(final OutOfBandManagement.PowerState powerState) {
this.powerState = powerState;
}
public void setInstances(final Long running, final Long total) {
if (running != null && total != null) {
this.instances = String.format("%d / %d", running, total);
}
}
public void setCpuTotal(final Integer cpuNumber, final Long cpuSpeed, final Double overcommitRatio) {
if (cpuNumber != null && cpuSpeed != null && overcommitRatio != null) {
this.cpuTotal = String.format("%.2f Ghz (x %.1f)", cpuNumber * cpuSpeed / 1000.0, overcommitRatio);
}
}
public void setCpuUsed(final String cpuUsed, final Integer cpuNumber, final Long cpuSpeed) {
if (cpuUsed != null && cpuNumber != null && cpuSpeed != null) {
this.cpuUsed = String.format("%.2f Ghz", Double.valueOf(cpuUsed.replace("%", "")) * cpuNumber * cpuSpeed / (100.0 * 1000.0));
}
}
public void setCpuAllocated(final String cpuAllocated, final Integer cpuNumber, final Long cpuSpeed) {
if (cpuAllocated != null && cpuNumber != null && cpuSpeed != null) {
this.cpuAllocated = String.format("%.2f Ghz", Double.valueOf(cpuAllocated.replace("%", "")) * cpuNumber * cpuSpeed / (100.0 * 1000.0));
}
}
public void setMemTotal(final Long memTotal, final Double overcommitRatio) {
if (memTotal != null && overcommitRatio != null) {
this.memTotal = String.format("%.2f GB (x %.1f)", memTotal / (1024.0 * 1024.0 * 1024.0), overcommitRatio);
}
}
public void setMemUsed(final Long memUsed) {
if (memUsed != null) {
this.memUsed = String.format("%.2f GB", memUsed / (1024.0 * 1024.0 * 1024.0));
}
}
public void setMemAllocated(final Long memAllocated) {
if (memAllocated != null) {
this.memAllocated = String.format("%.2f GB", memAllocated / (1024.0 * 1024.0 * 1024.0));
}
}
public void setNetworkRead(final Long networkReadKbs) {
if (networkReadKbs != null) {
this.networkRead = String.format("%.2f GB", networkReadKbs / (1024.0 * 1024.0));
}
}
public void setNetworkWrite(final Long networkWriteKbs) {
if (networkWriteKbs != null) {
this.networkWrite = String.format("%.2f GB", networkWriteKbs / (1024.0 * 1024.0));
}
}
public void setCpuUsageThreshold(final String cpuUsed, final Double threshold) {
if (cpuUsed != null && threshold != null) {
this.cpuThresholdExceeded = Double.valueOf(cpuUsed.replace("%", "")) > (100.0 * threshold);
}
}
public void setCpuUsageDisableThreshold(final String cpuUsed, final Float threshold) {
if (cpuUsed != null && threshold != null) {
this.cpuDisableThresholdExceeded = Double.valueOf(cpuUsed.replace("%", "")) > (100.0 * threshold);
}
}
public void setCpuAllocatedThreshold(final String cpuAllocated, final Double overCommitRatio, final Double threshold) {
if (cpuAllocated != null && overCommitRatio != null && threshold != null) {
this.cpuAllocatedThresholdExceeded = (Double.valueOf(cpuAllocated.replace("%", "")) * overCommitRatio) > (100.0 * threshold);
}
}
public void setCpuAllocatedDisableThreshold(final String cpuAllocated, final Double overCommitRatio, final Float threshold) {
if (cpuAllocated != null && overCommitRatio != null && threshold != null) {
this.cpuAllocatedDisableThresholdExceeded = (Double.valueOf(cpuAllocated.replace("%", "")) * overCommitRatio) > (100.0 * threshold);
}
}
public void setMemoryUsageThreshold(final Long memUsed, final Long memTotal, final Double threshold) {
if (memUsed != null && memTotal != null && threshold != null) {
this.memoryThresholdExceeded = memUsed > (memTotal * threshold);
}
}
public void setMemoryUsageDisableThreshold(final Long memUsed, final Long memTotal, final Float threshold) {
if (memUsed != null && memTotal != null && threshold != null) {
this.memoryDisableThresholdExceeded = memUsed > (memTotal * threshold);
}
}
public void setMemoryAllocatedThreshold(final Long memAllocated, final Long memTotal, final Double overCommitRatio, final Double threshold) {
if (memAllocated != null && memTotal != null && overCommitRatio != null && threshold != null) {
this.memoryAllocatedThresholdExceeded = (memAllocated * overCommitRatio) > (memTotal * threshold);
}
}
public void setMemoryAllocatedDisableThreshold(final Long memAllocated, final Long memTotal, final Double overCommitRatio, final Float threshold) {
if (memAllocated != null && memTotal != null && overCommitRatio != null && threshold != null) {
this.memoryAllocatedDisableThresholdExceeded = (memAllocated * overCommitRatio) > (memTotal * threshold);
}
}
}

View File

@ -0,0 +1,101 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.BaseResponse;
public class InfrastructureResponse extends BaseResponse {
@SerializedName("zones")
@Param(description = "Number of zones")
private Integer zones;
@SerializedName("pods")
@Param(description = "Number of pods")
private Integer pods;
@SerializedName("clusters")
@Param(description = "Number of clusters")
private Integer clusters;
@SerializedName("hosts")
@Param(description = "Number of hypervisor hosts")
private Integer hosts;
@SerializedName("storagepools")
@Param(description = "Number of storage pools")
private Integer storagePools;
@SerializedName("imagestores")
@Param(description = "Number of images stores")
private Integer imageStores;
@SerializedName("systemvms")
@Param(description = "Number of systemvms")
private Integer systemvms;
@SerializedName("routers")
@Param(description = "Number of routers")
private Integer routers;
@SerializedName("cpusockets")
@Param(description = "Number of cpu sockets")
private Integer cpuSockets;
public InfrastructureResponse() {
setObjectName("infrastructure");
}
public void setZones(final Integer zones) {
this.zones = zones;
}
public void setPods(final Integer pods) {
this.pods = pods;
}
public void setClusters(final Integer clusters) {
this.clusters = clusters;
}
public void setHosts(final Integer hosts) {
this.hosts = hosts;
}
public void setStoragePools(final Integer storagePools) {
this.storagePools = storagePools;
}
public void setImageStores(final Integer imageStores) {
this.imageStores = imageStores;
}
public void setSystemvms(final Integer systemvms) {
this.systemvms = systemvms;
}
public void setRouters(final Integer routers) {
this.routers = routers;
}
public void setCpuSockets(final Integer cpuSockets) {
this.cpuSockets = cpuSockets;
}
}

View File

@ -0,0 +1,105 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.response.StoragePoolResponse;
public class StoragePoolMetricsResponse extends StoragePoolResponse {
@SerializedName("disksizeusedgb")
@Param(description = "disk size used in GiB")
private String diskSizeUsedGB;
@SerializedName("disksizetotalgb")
@Param(description = "disk size in GiB")
private String diskSizeTotalGB;
@SerializedName("disksizeallocatedgb")
@Param(description = "disk size allocated in GiB")
private String diskSizeAllocatedGB;
@SerializedName("disksizeunallocatedgb")
@Param(description = "disk size unallocated in GiB")
private String diskSizeUnallocatedGB;
@SerializedName("storageusagethreshold")
@Param(description = "storage usage notification threshold exceeded")
private Boolean storageUsedThreshold;
@SerializedName("storageusagedisablethreshold")
@Param(description = "storage usage disable threshold exceeded")
private Boolean storageUsedDisableThreshold;
@SerializedName("storageallocatedthreshold")
@Param(description = "storage allocated notification threshold exceeded")
private Boolean storageAllocatedThreshold;
@SerializedName("storageallocateddisablethreshold")
@Param(description = "storage allocated disable threshold exceeded")
private Boolean storageAllocatedDisableThreshold;
public void setDiskSizeUsedGB(final Long diskSizeUsed) {
if (diskSizeUsed != null) {
this.diskSizeUsedGB = String.format("%.2f GB", diskSizeUsed / (1024.0 * 1024.0 * 1024.0));
}
}
public void setDiskSizeTotalGB(final Long totalDiskSize, final String overProvisionFactor) {
if (totalDiskSize != null && overProvisionFactor != null) {
this.diskSizeTotalGB = String.format("%.2f GB (x%s)", totalDiskSize / (1024.0 * 1024.0 * 1024.0), overProvisionFactor);
}
}
public void setDiskSizeAllocatedGB(final Long diskSizeAllocated) {
if (diskSizeAllocated != null) {
this.diskSizeAllocatedGB = String.format("%.2f GB", diskSizeAllocated / (1024.0 * 1024.0 * 1024.0));
}
}
public void setDiskSizeUnallocatedGB(final Long totalDiskSize, final Long diskSizeAllocated, final String overProvisionFactor) {
if (totalDiskSize != null && diskSizeAllocated != null && overProvisionFactor != null) {
this.diskSizeUnallocatedGB = String.format("%.2f GB", ((Double.valueOf(overProvisionFactor) * totalDiskSize) - diskSizeAllocated) / (1024.0 * 1024.0 * 1024.0));
}
}
public void setStorageUsedThreshold(final Long totalDiskSize, final Long diskSizeUsed, final String overProvisionFactor, final Double threshold) {
if (totalDiskSize != null && diskSizeUsed != null && overProvisionFactor != null && threshold != null) {
this.storageUsedThreshold = diskSizeUsed > (totalDiskSize * Double.valueOf(overProvisionFactor) * threshold) ;
}
}
public void setStorageUsedDisableThreshold(final Long totalDiskSize, final Long diskSizeUsed, final String overProvisionFactor, final Double threshold) {
if (totalDiskSize != null && diskSizeUsed != null && overProvisionFactor != null && threshold != null) {
this.storageUsedDisableThreshold = diskSizeUsed > (totalDiskSize * Double.valueOf(overProvisionFactor) * threshold);
}
}
public void setStorageAllocatedThreshold(final Long totalDiskSize, final Long diskSizeAllocated, final String overProvisionFactor, final Double threshold) {
if (totalDiskSize != null && diskSizeAllocated != null && overProvisionFactor != null && threshold != null) {
this.storageAllocatedThreshold = diskSizeAllocated > (totalDiskSize * Double.valueOf(overProvisionFactor) * threshold);
}
}
public void setStorageAllocatedDisableThreshold(final Long totalDiskSize, final Long diskSizeAllocated, final String overProvisionFactor, final Double threshold) {
if (totalDiskSize != null && diskSizeAllocated != null && overProvisionFactor != null && threshold != null) {
this.storageAllocatedDisableThreshold = diskSizeAllocated > (totalDiskSize * Double.valueOf(overProvisionFactor) * threshold);
}
}
}

View File

@ -0,0 +1,108 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.ApiConstants;
import org.apache.cloudstack.api.response.NicResponse;
import org.apache.cloudstack.api.response.UserVmResponse;
import java.util.Set;
public class VmMetricsResponse extends UserVmResponse {
@SerializedName(ApiConstants.IP_ADDRESS)
@Param(description = "the VM's primary IP address")
private String ipAddress;
@SerializedName("cputotal")
@Param(description = "the total cpu capacity in Ghz")
private String cpuTotal;
@SerializedName("memorytotal")
@Param(description = "the total memory capacity in GiB")
private String memTotal;
@SerializedName("networkread")
@Param(description = "network read in MiB")
private String networkRead;
@SerializedName("networkwrite")
@Param(description = "network write in MiB")
private String networkWrite;
@SerializedName("diskread")
@Param(description = "disk read in MiB")
private String diskRead;
@SerializedName("diskwrite")
@Param(description = "disk write in MiB")
private String diskWrite;
@SerializedName("diskiopstotal")
@Param(description = "the total disk iops")
private Long diskIopsTotal;
public void setIpAddress(final Set<NicResponse> nics) {
if (nics != null && nics.size() > 0) {
this.ipAddress = nics.iterator().next().getIpaddress();
}
}
public void setCpuTotal(final Integer cpuNumber, final Integer cpuSpeed) {
if (cpuNumber != null && cpuSpeed != null) {
this.cpuTotal = String.format("%.1f Ghz", cpuNumber * cpuSpeed / 1000.0);
}
}
public void setMemTotal(final Integer memory) {
if (memory != null) {
this.memTotal = String.format("%.2f GB", memory / 1024.0);
}
}
public void setNetworkRead(final Long networkReadKbs) {
if (networkReadKbs != null) {
this.networkRead = String.format("%.2f MB", networkReadKbs / 1024.0);
}
}
public void setNetworkWrite(final Long networkWriteKbs) {
if (networkWriteKbs != null) {
this.networkWrite = String.format("%.2f MB", networkWriteKbs / 1024.0);
}
}
public void setDiskRead(final Long diskReadKbs) {
if (diskReadKbs != null) {
this.networkRead = String.format("%.2f MB", diskReadKbs / 1024.0);
}
}
public void setDiskWrite(final Long diskWriteKbs) {
if (diskWriteKbs != null) {
this.networkWrite = String.format("%.2f MB", diskWriteKbs / 1024.0);
}
}
public void setDiskIopsTotal(final Long diskIoRead, final Long diskIoWrite) {
if (diskIoRead != null && diskIoWrite != null) {
this.diskIopsTotal = diskIoRead + diskIoWrite;
}
}
}

View File

@ -0,0 +1,41 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.common.base.Strings;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.response.VolumeResponse;
public class VolumeMetricsResponse extends VolumeResponse {
@SerializedName("sizegb")
@Param(description = "disk size in GiB")
private String diskSizeGB;
public void setStorageType(final String storageType, final String volumeType) {
if (!Strings.isNullOrEmpty(storageType) && !Strings.isNullOrEmpty(volumeType)) {
this.setStorageType(String.format("%s (%s)", storageType.substring(0, 1).toUpperCase() + storageType.substring(1), volumeType));
}
}
public void setDiskSizeGB(final Long size) {
if (size != null) {
this.diskSizeGB = String.format("%.2f GB", size / (1024.0 * 1024.0 * 1024.0));
}
}
}

View File

@ -0,0 +1,206 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.apache.cloudstack.response;
import com.cloud.serializer.Param;
import com.google.gson.annotations.SerializedName;
import org.apache.cloudstack.api.response.ZoneResponse;
public class ZoneMetricsResponse extends ZoneResponse {
@SerializedName("state")
@Param(description = "state of the cluster")
private String state;
@SerializedName("clusters")
@Param(description = "healthy / total clusters in the zone")
private String resources;
@SerializedName("cputotal")
@Param(description = "the total cpu capacity in Ghz")
private String cpuTotal;
@SerializedName("cpuused")
@Param(description = "the total cpu used in Ghz")
private String cpuUsed;
@SerializedName("cpuallocated")
@Param(description = "the total cpu allocated in Ghz")
private String cpuAllocated;
@SerializedName("cpumaxdeviation")
@Param(description = "the maximum cpu deviation")
private String cpuMaxDeviation;
@SerializedName("memorytotal")
@Param(description = "the total cpu capacity in GiB")
private String memTotal;
@SerializedName("memoryused")
@Param(description = "the total cpu used in GiB")
private String memUsed;
@SerializedName("memoryallocated")
@Param(description = "the total cpu allocated in GiB")
private String memAllocated;
@SerializedName("memorymaxdeviation")
@Param(description = "the maximum memory deviation")
private String memMaxDeviation;
@SerializedName("cputhreshold")
@Param(description = "cpu usage notification threshold exceeded")
private Boolean cpuThresholdExceeded;
@SerializedName("cpudisablethreshold")
@Param(description = "cpu usage disable threshold exceeded")
private Boolean cpuDisableThresholdExceeded;
@SerializedName("cpuallocatedthreshold")
@Param(description = "cpu allocated notification threshold exceeded")
private Boolean cpuAllocatedThresholdExceeded;
@SerializedName("cpuallocateddisablethreshold")
@Param(description = "cpu allocated disable threshold exceeded")
private Boolean cpuAllocatedDisableThresholdExceeded;
@SerializedName("memorythreshold")
@Param(description = "memory usage notification threshold exceeded")
private Boolean memoryThresholdExceeded;
@SerializedName("memorydisablethreshold")
@Param(description = "memory usage disable threshold exceeded")
private Boolean memoryDisableThresholdExceeded;
@SerializedName("memoryallocatedthreshold")
@Param(description = "memory allocated notification threshold exceeded")
private Boolean memoryAllocatedThresholdExceeded;
@SerializedName("memoryallocateddisablethreshold")
@Param(description = "memory allocated disable threshold exceeded")
private Boolean memoryAllocatedDisableThresholdExceeded;
public void setState(final String allocationState) {
this.state = allocationState;
}
public void setResource(final Long upResources, final Long totalResources) {
if (upResources != null && totalResources != null) {
this.resources = String.format("%d / %d", upResources, totalResources);
}
}
public void setCpuTotal(final Long cpuTotal) {
if (cpuTotal != null) {
this.cpuTotal = String.format("%.2f Ghz", cpuTotal / 1000.0);
}
}
public void setCpuUsed(final Double cpuUsedPercentage, final Long totalHosts) {
if (cpuUsedPercentage != null && totalHosts != null && totalHosts != 0) {
this.cpuUsed = String.format("%.2f%%", 1.0 * cpuUsedPercentage / totalHosts);
}
}
public void setCpuAllocated(final Long cpuAllocated, final Long cpuTotal) {
if (cpuAllocated != null && cpuTotal != null && cpuTotal != 0) {
this.cpuAllocated = String.format("%.2f%%", cpuAllocated * 100.0 / cpuTotal);
}
}
public void setCpuMaxDeviation(final Double maxCpuDeviation, final Double totalCpuUsed, final Long totalHosts) {
if (maxCpuDeviation != null && totalCpuUsed != null && totalHosts != null && totalHosts != 0) {
final Double averageCpuUsage = totalCpuUsed / totalHosts;
this.cpuMaxDeviation = String.format("%.2f%%", (maxCpuDeviation - averageCpuUsage) * 100.0 / averageCpuUsage);
}
}
public void setMemTotal(final Long memTotal) {
if (memTotal != null) {
this.memTotal = String.format("%.2f GB", memTotal / (1024.0 * 1024.0 * 1024.0));
}
}
public void setMemUsed( final Long memUsed, final Long memTotal) {
if (memUsed != null && memTotal != null) {
this.memUsed = String.format("%.2f%%", memUsed * 100.0 / memTotal);
}
}
public void setMemAllocated(final Long memAllocated, final Long memTotal) {
if (memAllocated != null && memTotal != null && memTotal != 0) {
this.memAllocated = String.format("%.2f%%", memAllocated * 100.0 / memTotal);
}
}
public void setMemMaxDeviation(final Long maxMemoryUsage, final Long totalMemory, final Long totalHosts) {
if (maxMemoryUsage != null && totalMemory != null && totalHosts != null && totalHosts != 0) {
final Long averageMemoryUsage = totalMemory / totalHosts;
this.memMaxDeviation = String.format("%.2f%%", (maxMemoryUsage - averageMemoryUsage) * 100.0 / averageMemoryUsage);
}
}
public void setCpuUsageThreshold(final Double cpuUsed, final Long totalHosts, final Double threshold) {
if (cpuUsed != null && totalHosts != null && threshold != null && totalHosts != 0) {
this.cpuThresholdExceeded = (cpuUsed / (100.0 * totalHosts)) > threshold;
}
}
public void setCpuUsageDisableThreshold(final Double cpuUsed, final Long totalHosts, final Float threshold) {
if (cpuUsed != null && totalHosts != null && threshold != null && totalHosts != 0) {
this.cpuDisableThresholdExceeded = (cpuUsed / (100.0 * totalHosts)) > threshold;
}
}
public void setCpuAllocatedThreshold(final Long cpuAllocated, final Long cpuUsed, final Double threshold) {
if (cpuAllocated != null && cpuUsed != null && threshold != null && cpuUsed != 0) {
this.cpuAllocatedThresholdExceeded = (1.0 * cpuAllocated / cpuUsed) > threshold;
}
}
public void setCpuAllocatedDisableThreshold(final Long cpuAllocated, final Long cpuUsed, final Float threshold) {
if (cpuAllocated != null && cpuUsed != null && threshold != null && cpuUsed != 0) {
this.cpuAllocatedDisableThresholdExceeded = (1.0 * cpuAllocated / cpuUsed) > threshold;
}
}
public void setMemoryUsageThreshold(final Long memUsed, final Long memTotal, final Double threshold) {
if (memUsed != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryThresholdExceeded = (1.0 * memUsed / memTotal) > threshold;
}
}
public void setMemoryUsageDisableThreshold(final Long memUsed, final Long memTotal, final Float threshold) {
if (memUsed != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryDisableThresholdExceeded = (1.0 * memUsed / memTotal) > threshold;
}
}
public void setMemoryAllocatedThreshold(final Long memAllocated, final Long memTotal, final Double threshold) {
if (memAllocated != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryAllocatedThresholdExceeded = (1.0 * memAllocated / memTotal) > threshold;
}
}
public void setMemoryAllocatedDisableThreshold(final Long memAllocated, final Long memTotal, final Float threshold) {
if (memAllocated != null && memTotal != null && threshold != null && memTotal != 0) {
this.memoryAllocatedDisableThresholdExceeded = (1.0 * memAllocated / memTotal) > threshold;
}
}
}

View File

@ -69,6 +69,7 @@
<module>hypervisors/ucs</module>
<module>hypervisors/hyperv</module>
<module>hypervisors/ovm3</module>
<module>metrics</module>
<module>network-elements/elastic-loadbalancer</module>
<module>network-elements/ovs</module>
<module>network-elements/juniper-contrail</module>
@ -78,7 +79,6 @@
<module>network-elements/nuage-vsp</module>
<module>network-elements/bigswitch</module>
<module>network-elements/brocade-vcs</module>
<module>network-elements/midonet</module>
<module>network-elements/stratosphere-ssp</module>
<module>network-elements/opendaylight</module>
<module>outofbandmanagement-drivers/ipmitool</module>
@ -205,5 +205,11 @@
<module>hypervisors/simulator</module>
</modules>
</profile>
<profile>
<id>midonet</id>
<modules>
<module>network-elements/midonet</module>
</modules>
</profile>
</profiles>
</project>

View File

@ -89,6 +89,12 @@ class cloudManagementConfig(serviceCfgBase):
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-ssl.xml /etc/cloudstack/management/server.xml")
if os.path.exists("/usr/share/tomcat7/bin"):
bash("rm -f /usr/share/cloudstack-management/bin")
bash("ln -s /usr/share/tomcat7/bin /usr/share/cloudstack-management/bin")
if os.path.exists("/usr/share/tomcat7/lib"):
bash("rm -f /usr/share/cloudstack-management/lib")
bash("ln -s /usr/share/tomcat7/lib /usr/share/cloudstack-management/lib")
else:
if not os.path.exists("/etc/cloudstack/management/server-ssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-ssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-ssl.xml or /etc/cloudstack/management/tomcat6-ssl.conf, https enable failed")
@ -98,6 +104,12 @@ class cloudManagementConfig(serviceCfgBase):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-ssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-ssl.conf /etc/cloudstack/management/tomcat6.conf")
if os.path.exists("/usr/share/tomcat6/bin"):
bash("rm -f /usr/share/cloudstack-management/bin")
bash("ln -s /usr/share/tomcat6/bin /usr/share/cloudstack-management/bin")
if os.path.exists("/usr/share/tomcat6/lib"):
bash("rm -f /usr/share/cloudstack-management/lib")
bash("ln -s /usr/share/tomcat6/lib /usr/share/cloudstack-management/lib")
if not bash("iptables-save |grep PREROUTING | grep 6443").isSuccess():
bash("iptables -A PREROUTING -t nat -p tcp --dport 443 -j REDIRECT --to-port 6443")
else:
@ -107,6 +119,12 @@ class cloudManagementConfig(serviceCfgBase):
if os.path.exists("/etc/cloudstack/management/server.xml"):
bash("rm -f /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/server7-nonssl.xml /etc/cloudstack/management/server.xml")
if os.path.exists("/usr/share/tomcat7/bin"):
bash("rm -f /usr/share/cloudstack-management/bin")
bash("ln -s /usr/share/tomcat7/bin /usr/share/cloudstack-management/bin")
if os.path.exists("/usr/share/tomcat7/lib"):
bash("rm -f /usr/share/cloudstack-management/lib")
bash("ln -s /usr/share/tomcat7/lib /usr/share/cloudstack-management/lib")
else:
if not os.path.exists("/etc/cloudstack/management/server-nonssl.xml") or not os.path.exists("/etc/cloudstack/management/tomcat6-nonssl.conf"):
raise CloudRuntimeException("Cannot find /etc/cloudstack/management/server-nonssl.xml or /etc/cloudstack/management/tomcat6-nonssl.conf, https enable failed")
@ -116,6 +134,12 @@ class cloudManagementConfig(serviceCfgBase):
bash("rm -f /etc/cloudstack/management/tomcat6.conf")
bash("ln -s /etc/cloudstack/management/server-nonssl.xml /etc/cloudstack/management/server.xml")
bash("ln -s /etc/cloudstack/management/tomcat6-nonssl.conf /etc/cloudstack/management/tomcat6.conf")
if os.path.exists("/usr/share/tomcat6/bin"):
bash("rm -f /usr/share/cloudstack-management/bin")
bash("ln -s /usr/share/tomcat6/bin /usr/share/cloudstack-management/bin")
if os.path.exists("/usr/share/tomcat6/lib"):
bash("rm -f /usr/share/cloudstack-management/lib")
bash("ln -s /usr/share/tomcat6/lib /usr/share/cloudstack-management/lib")
bash("touch /var/run/cloudstack-management.pid")
bash("chown cloud.cloud /var/run/cloudstack-management.pid")
#distro like sl 6.1 needs this folder, or tomcat6 failed to start

View File

@ -105,13 +105,13 @@ fi
if [[ "$uflag" == "1" && "$rflag" != "1" ]]
then
rttmplt=http://download.cloud.com/templates/builtin/a88232bf-6a18-38e7-aeee-c1702725079f.qcow2.bz2
rttmplt=http://download.cloudstack.org/templates/builtin/a88232bf-6a18-38e7-aeee-c1702725079f.qcow2.bz2
echo "download routing template from $rttmplt"
fi
if [[ "$uflag" == "1" && "$vflag" != "1" ]]
then
vmtmplt=http://download.cloud.com/templates/builtin/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2
vmtmplt=http://download.cloudstack.org/templates/builtin/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2
echo "download cnetos template from $vmtmplt"
fi

View File

@ -38,7 +38,7 @@ install_cloud_agent() {
exit 1
fi
fi
wget -N -P /etc/yum.repos.d/ http://download.cloud.com/foss/fedora/cloud.repo
wget -N -P /etc/yum.repos.d/ http://download.cloudstack.org/foss/fedora/cloud.repo
if [ $? -gt 0 ]
then
printf "Failed to download repo"
@ -99,7 +99,7 @@ install_cloud_consoleP() {
exit 1
fi
fi
wget -N -P=/etc/yum.repos.d/ http://download.cloud.com/foss/fedora/cloud.repo
wget -N -P=/etc/yum.repos.d/ http://download.cloudstack.org/foss/fedora/cloud.repo
if [ $? -gt 0 ]
then
printf "Failed to download repo"

View File

@ -89,7 +89,6 @@ import com.cloud.api.query.dao.ResourceTagJoinDao;
import com.cloud.api.query.dao.SecurityGroupJoinDao;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.api.query.dao.StorageTagDao;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.dao.UserAccountJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
@ -113,7 +112,6 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
@ -255,6 +253,7 @@ import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.StoragePool;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.StorageStats;
import com.cloud.storage.UploadVO;
import com.cloud.storage.VMTemplateVO;
@ -266,6 +265,7 @@ import com.cloud.storage.dao.GuestOSCategoryDao;
import com.cloud.storage.dao.GuestOSDao;
import com.cloud.storage.dao.SnapshotDao;
import com.cloud.storage.dao.SnapshotPolicyDao;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.UploadDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.storage.dao.VMTemplateDetailsDao;
@ -400,7 +400,7 @@ public class ApiDBUtils {
static HostJoinDao s_hostJoinDao;
static VolumeJoinDao s_volJoinDao;
static StoragePoolJoinDao s_poolJoinDao;
static StorageTagDao s_tagDao;
static StoragePoolTagsDao s_tagDao;
static HostTagDao s_hostTagDao;
static ImageStoreJoinDao s_imageStoreJoinDao;
static AccountJoinDao s_accountJoinDao;
@ -600,7 +600,7 @@ public class ApiDBUtils {
@Inject
private StoragePoolJoinDao poolJoinDao;
@Inject
private StorageTagDao tagDao;
private StoragePoolTagsDao tagDao;
@Inject
private HostTagDao hosttagDao;
@Inject
@ -1800,7 +1800,7 @@ public class ApiDBUtils {
return s_poolJoinDao.newStoragePoolResponse(vr);
}
public static StorageTagResponse newStorageTagResponse(StorageTagVO vr) {
public static StorageTagResponse newStorageTagResponse(StoragePoolTagVO vr) {
return s_tagDao.newStorageTagResponse(vr);
}

View File

@ -166,7 +166,7 @@ public class DefaultLoginAPIAuthenticatorCmd extends BaseCmd implements APIAuthe
throw new CloudAuthenticationException("Unable to find the domain from the path " + domain);
}
final UserAccount userAccount = _accountService.getActiveUserAccount(username[0], domainId);
if (userAccount == null || !(User.Source.UNKNOWN.equals(userAccount.getSource()) || User.Source.LDAP.equals(userAccount.getSource()))) {
if (userAccount != null && User.Source.SAML2 == userAccount.getSource()) {
throw new CloudAuthenticationException("User is not allowed CloudStack login");
}
return ApiResponseSerializer.toSerializedString(_apiServer.loginUser(session, username[0], pwd, domainId, domain, remoteAddress, params),

View File

@ -125,7 +125,6 @@ import com.cloud.api.query.dao.ResourceTagJoinDao;
import com.cloud.api.query.dao.SecurityGroupJoinDao;
import com.cloud.api.query.dao.ServiceOfferingJoinDao;
import com.cloud.api.query.dao.StoragePoolJoinDao;
import com.cloud.api.query.dao.StorageTagDao;
import com.cloud.api.query.dao.TemplateJoinDao;
import com.cloud.api.query.dao.UserAccountJoinDao;
import com.cloud.api.query.dao.UserVmJoinDao;
@ -149,7 +148,6 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
@ -190,8 +188,10 @@ import com.cloud.storage.ScopeType;
import com.cloud.storage.Storage;
import com.cloud.storage.Storage.ImageFormat;
import com.cloud.storage.Storage.TemplateType;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.storage.VMTemplateVO;
import com.cloud.storage.Volume;
import com.cloud.storage.dao.StoragePoolTagsDao;
import com.cloud.storage.dao.VMTemplateDao;
import com.cloud.tags.ResourceTagVO;
import com.cloud.tags.dao.ResourceTagDao;
@ -306,7 +306,7 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
private StoragePoolJoinDao _poolJoinDao;
@Inject
private StorageTagDao _storageTagDao;
private StoragePoolTagsDao _storageTagDao;
@Inject
private HostTagDao _hostTagDao;
@ -2268,43 +2268,43 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q
@Override
public ListResponse<StorageTagResponse> searchForStorageTags(ListStorageTagsCmd cmd) {
Pair<List<StorageTagVO>, Integer> result = searchForStorageTagsInternal(cmd);
Pair<List<StoragePoolTagVO>, Integer> result = searchForStorageTagsInternal(cmd);
ListResponse<StorageTagResponse> response = new ListResponse<StorageTagResponse>();
List<StorageTagResponse> tagResponses = ViewResponseHelper.createStorageTagResponse(result.first().toArray(new StorageTagVO[result.first().size()]));
List<StorageTagResponse> tagResponses = ViewResponseHelper.createStorageTagResponse(result.first().toArray(new StoragePoolTagVO[result.first().size()]));
response.setResponses(tagResponses, result.second());
return response;
}
private Pair<List<StorageTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StorageTagVO.class, "id", Boolean.TRUE, null, null);
private Pair<List<StoragePoolTagVO>, Integer> searchForStorageTagsInternal(ListStorageTagsCmd cmd) {
Filter searchFilter = new Filter(StoragePoolTagVO.class, "id", Boolean.TRUE, null, null);
SearchBuilder<StorageTagVO> sb = _storageTagDao.createSearchBuilder();
SearchBuilder<StoragePoolTagVO> sb = _storageTagDao.createSearchBuilder();
sb.select(null, Func.DISTINCT, sb.entity().getId()); // select distinct
SearchCriteria<StorageTagVO> sc = sb.create();
SearchCriteria<StoragePoolTagVO> sc = sb.create();
// search storage tag details by ids
Pair<List<StorageTagVO>, Integer> uniqueTagPair = _storageTagDao.searchAndCount(sc, searchFilter);
Pair<List<StoragePoolTagVO>, Integer> uniqueTagPair = _storageTagDao.searchAndCount(sc, searchFilter);
Integer count = uniqueTagPair.second();
if (count.intValue() == 0) {
return uniqueTagPair;
}
List<StorageTagVO> uniqueTags = uniqueTagPair.first();
List<StoragePoolTagVO> uniqueTags = uniqueTagPair.first();
Long[] vrIds = new Long[uniqueTags.size()];
int i = 0;
for (StorageTagVO v : uniqueTags) {
for (StoragePoolTagVO v : uniqueTags) {
vrIds[i++] = v.getId();
}
List<StorageTagVO> vrs = _storageTagDao.searchByIds(vrIds);
List<StoragePoolTagVO> vrs = _storageTagDao.searchByIds(vrIds);
return new Pair<List<StorageTagVO>, Integer>(vrs, count);
return new Pair<List<StoragePoolTagVO>, Integer>(vrs, count);
}
@Override

View File

@ -73,11 +73,11 @@ import com.cloud.api.query.vo.ResourceTagJoinVO;
import com.cloud.api.query.vo.SecurityGroupJoinVO;
import com.cloud.api.query.vo.ServiceOfferingJoinVO;
import com.cloud.api.query.vo.StoragePoolJoinVO;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.api.query.vo.TemplateJoinVO;
import com.cloud.api.query.vo.UserAccountJoinVO;
import com.cloud.api.query.vo.UserVmJoinVO;
import com.cloud.api.query.vo.VolumeJoinVO;
import com.cloud.storage.StoragePoolTagVO;
import com.cloud.user.Account;
/**
@ -294,10 +294,10 @@ public class ViewResponseHelper {
return new ArrayList<StoragePoolResponse>(vrDataList.values());
}
public static List<StorageTagResponse> createStorageTagResponse(StorageTagVO... storageTags) {
public static List<StorageTagResponse> createStorageTagResponse(StoragePoolTagVO... storageTags) {
ArrayList<StorageTagResponse> list = new ArrayList<StorageTagResponse>();
for (StorageTagVO vr : storageTags) {
for (StoragePoolTagVO vr : storageTags) {
list.add(ApiDBUtils.newStorageTagResponse(vr));
}

View File

@ -41,4 +41,6 @@ public interface HostJoinDao extends GenericDao<HostJoinVO, Long> {
List<HostJoinVO> searchByIds(Long... ids);
List<HostJoinVO> findByClusterId(Long clusterId, Host.Type type);
}

View File

@ -65,6 +65,8 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
private final SearchBuilder<HostJoinVO> hostIdSearch;
private final SearchBuilder<HostJoinVO> ClusterSearch;
protected HostJoinDaoImpl() {
hostSearch = createSearchBuilder();
@ -75,6 +77,11 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
hostIdSearch.and("id", hostIdSearch.entity().getId(), SearchCriteria.Op.EQ);
hostIdSearch.done();
ClusterSearch = createSearchBuilder();
ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ);
ClusterSearch.and("type", ClusterSearch.entity().getType(), SearchCriteria.Op.EQ);
ClusterSearch.done();
this._count = "select count(distinct id) from host_view WHERE ";
}
@ -432,4 +439,12 @@ public class HostJoinDaoImpl extends GenericDaoBase<HostJoinVO, Long> implements
return uvList;
}
@Override
public List<HostJoinVO> findByClusterId(Long clusterId, Host.Type type) {
SearchCriteria<HostJoinVO> sc = ClusterSearch.create();
sc.setParameters("clusterId", clusterId);
sc.setParameters("type", type);
return listBy(sc);
}
}

View File

@ -1,124 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.dao;
import java.util.ArrayList;
import java.util.List;
import javax.inject.Inject;
import org.apache.cloudstack.api.response.StorageTagResponse;
import org.apache.cloudstack.framework.config.dao.ConfigurationDao;
import org.apache.log4j.Logger;
import org.springframework.stereotype.Component;
import com.cloud.api.query.vo.StorageTagVO;
import com.cloud.utils.db.GenericDaoBase;
import com.cloud.utils.db.SearchBuilder;
import com.cloud.utils.db.SearchCriteria;
@Component
public class StorageTagDaoImpl extends GenericDaoBase<StorageTagVO, Long> implements StorageTagDao {
public static final Logger s_logger = Logger.getLogger(StorageTagDaoImpl.class);
@Inject
private ConfigurationDao _configDao;
private final SearchBuilder<StorageTagVO> stSearch;
private final SearchBuilder<StorageTagVO> stIdSearch;
protected StorageTagDaoImpl() {
stSearch = createSearchBuilder();
stSearch.and("idIN", stSearch.entity().getId(), SearchCriteria.Op.IN);
stSearch.done();
stIdSearch = createSearchBuilder();
stIdSearch.and("id", stIdSearch.entity().getId(), SearchCriteria.Op.EQ);
stIdSearch.done();
_count = "select count(distinct id) from storage_tag_view WHERE ";
}
@Override
public StorageTagResponse newStorageTagResponse(StorageTagVO tag) {
StorageTagResponse tagResponse = new StorageTagResponse();
tagResponse.setName(tag.getName());
tagResponse.setPoolId(tag.getPoolId());
tagResponse.setObjectName("storagetag");
return tagResponse;
}
@Override
public List<StorageTagVO> searchByIds(Long... stIds) {
String batchCfg = _configDao.getValue("detail.batch.query.size");
final int detailsBatchSize = batchCfg != null ? Integer.parseInt(batchCfg) : 2000;
// query details by batches
List<StorageTagVO> uvList = new ArrayList<StorageTagVO>();
int curr_index = 0;
if (stIds.length > detailsBatchSize) {
while ((curr_index + detailsBatchSize) <= stIds.length) {
Long[] ids = new Long[detailsBatchSize];
for (int k = 0, j = curr_index; j < curr_index + detailsBatchSize; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StorageTagVO> sc = stSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StorageTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
uvList.addAll(vms);
}
curr_index += detailsBatchSize;
}
}
if (curr_index < stIds.length) {
int batch_size = (stIds.length - curr_index);
// set the ids value
Long[] ids = new Long[batch_size];
for (int k = 0, j = curr_index; j < curr_index + batch_size; j++, k++) {
ids[k] = stIds[j];
}
SearchCriteria<StorageTagVO> sc = stSearch.create();
sc.setParameters("idIN", (Object[])ids);
List<StorageTagVO> vms = searchIncludingRemoved(sc, null, null, false);
if (vms != null) {
uvList.addAll(vms);
}
}
return uvList;
}
}

View File

@ -1,61 +0,0 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package com.cloud.api.query.vo;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.Id;
import javax.persistence.Table;
import org.apache.cloudstack.api.InternalIdentity;
/**
* Storage Tags DB view.
*
*/
@Entity
@Table(name = "storage_tag_view")
public class StorageTagVO extends BaseViewVO implements InternalIdentity {
private static final long serialVersionUID = 1L;
@Id
@Column(name = "id")
private long id;
@Column(name = "name")
private String name;
@Column(name = "pool_id")
long poolId;
@Override
public long getId() {
return id;
}
public String getName() {
return name;
}
public long getPoolId() {
return poolId;
}
public void setPoolId(long poolId) {
this.poolId = poolId;
}
}

View File

@ -184,7 +184,6 @@ import com.cloud.service.dao.ServiceOfferingDao;
import com.cloud.service.dao.ServiceOfferingDetailsDao;
import com.cloud.storage.DiskOfferingVO;
import com.cloud.storage.Storage.ProvisioningType;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageManager;
import com.cloud.storage.dao.DiskOfferingDao;
import com.cloud.storage.dao.VolumeDao;
@ -507,7 +506,7 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati
throw new InvalidParameterValueException("unable to find storage pool by id " + resourceId);
}
if(name.equals(CapacityManager.StorageOverprovisioningFactor.key())) {
if(pool.getPoolType() != StoragePoolType.NetworkFilesystem && pool.getPoolType() != StoragePoolType.VMFS) {
if(!pool.getPoolType().supportsOverProvisioning() ) {
throw new InvalidParameterValueException("Unable to update storage pool with id " + resourceId + ". Overprovision not supported for " + pool.getPoolType());
}
}

View File

@ -588,7 +588,7 @@ StateListener<State, VirtualMachine.Event, VirtualMachine> {
List<Long> allDedicatedPods = _dedicatedDao.listAllPods();
allPodsInDc.retainAll(allDedicatedPods);
List<Long> allClustersInDc = _clusterDao.listAllCusters(dc.getId());
List<Long> allClustersInDc = _clusterDao.listAllClusters(dc.getId());
List<Long> allDedicatedClusters = _dedicatedDao.listAllClusters();
allClustersInDc.retainAll(allDedicatedClusters);

Some files were not shown because too many files have changed in this diff Show More